1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * 3-axis accelerometer driver supporting many Bosch-Sensortec chips 4 * Copyright (c) 2014, Intel Corporation. 5 */ 6 7 #include <linux/module.h> 8 #include <linux/i2c.h> 9 #include <linux/interrupt.h> 10 #include <linux/delay.h> 11 #include <linux/slab.h> 12 #include <linux/acpi.h> 13 #include <linux/of_irq.h> 14 #include <linux/pm.h> 15 #include <linux/pm_runtime.h> 16 #include <linux/iio/iio.h> 17 #include <linux/iio/sysfs.h> 18 #include <linux/iio/buffer.h> 19 #include <linux/iio/events.h> 20 #include <linux/iio/trigger.h> 21 #include <linux/iio/trigger_consumer.h> 22 #include <linux/iio/triggered_buffer.h> 23 #include <linux/regmap.h> 24 #include <linux/regulator/consumer.h> 25 26 #include "bmc150-accel.h" 27 28 #define BMC150_ACCEL_DRV_NAME "bmc150_accel" 29 #define BMC150_ACCEL_IRQ_NAME "bmc150_accel_event" 30 31 #define BMC150_ACCEL_REG_CHIP_ID 0x00 32 33 #define BMC150_ACCEL_REG_INT_STATUS_2 0x0B 34 #define BMC150_ACCEL_ANY_MOTION_MASK 0x07 35 #define BMC150_ACCEL_ANY_MOTION_BIT_X BIT(0) 36 #define BMC150_ACCEL_ANY_MOTION_BIT_Y BIT(1) 37 #define BMC150_ACCEL_ANY_MOTION_BIT_Z BIT(2) 38 #define BMC150_ACCEL_ANY_MOTION_BIT_SIGN BIT(3) 39 40 #define BMC150_ACCEL_REG_PMU_LPW 0x11 41 #define BMC150_ACCEL_PMU_MODE_MASK 0xE0 42 #define BMC150_ACCEL_PMU_MODE_SHIFT 5 43 #define BMC150_ACCEL_PMU_BIT_SLEEP_DUR_MASK 0x17 44 #define BMC150_ACCEL_PMU_BIT_SLEEP_DUR_SHIFT 1 45 46 #define BMC150_ACCEL_REG_PMU_RANGE 0x0F 47 48 #define BMC150_ACCEL_DEF_RANGE_2G 0x03 49 #define BMC150_ACCEL_DEF_RANGE_4G 0x05 50 #define BMC150_ACCEL_DEF_RANGE_8G 0x08 51 #define BMC150_ACCEL_DEF_RANGE_16G 0x0C 52 53 /* Default BW: 125Hz */ 54 #define BMC150_ACCEL_REG_PMU_BW 0x10 55 #define BMC150_ACCEL_DEF_BW 125 56 57 #define BMC150_ACCEL_REG_RESET 0x14 58 #define BMC150_ACCEL_RESET_VAL 0xB6 59 60 #define BMC150_ACCEL_REG_INT_MAP_0 0x19 61 #define BMC150_ACCEL_INT_MAP_0_BIT_INT1_SLOPE BIT(2) 62 63 #define BMC150_ACCEL_REG_INT_MAP_1 0x1A 64 #define BMC150_ACCEL_INT_MAP_1_BIT_INT1_DATA BIT(0) 65 #define BMC150_ACCEL_INT_MAP_1_BIT_INT1_FWM BIT(1) 66 #define BMC150_ACCEL_INT_MAP_1_BIT_INT1_FFULL BIT(2) 67 #define BMC150_ACCEL_INT_MAP_1_BIT_INT2_FFULL BIT(5) 68 #define BMC150_ACCEL_INT_MAP_1_BIT_INT2_FWM BIT(6) 69 #define BMC150_ACCEL_INT_MAP_1_BIT_INT2_DATA BIT(7) 70 71 #define BMC150_ACCEL_REG_INT_MAP_2 0x1B 72 #define BMC150_ACCEL_INT_MAP_2_BIT_INT2_SLOPE BIT(2) 73 74 #define BMC150_ACCEL_REG_INT_RST_LATCH 0x21 75 #define BMC150_ACCEL_INT_MODE_LATCH_RESET 0x80 76 #define BMC150_ACCEL_INT_MODE_LATCH_INT 0x0F 77 #define BMC150_ACCEL_INT_MODE_NON_LATCH_INT 0x00 78 79 #define BMC150_ACCEL_REG_INT_EN_0 0x16 80 #define BMC150_ACCEL_INT_EN_BIT_SLP_X BIT(0) 81 #define BMC150_ACCEL_INT_EN_BIT_SLP_Y BIT(1) 82 #define BMC150_ACCEL_INT_EN_BIT_SLP_Z BIT(2) 83 84 #define BMC150_ACCEL_REG_INT_EN_1 0x17 85 #define BMC150_ACCEL_INT_EN_BIT_DATA_EN BIT(4) 86 #define BMC150_ACCEL_INT_EN_BIT_FFULL_EN BIT(5) 87 #define BMC150_ACCEL_INT_EN_BIT_FWM_EN BIT(6) 88 89 #define BMC150_ACCEL_REG_INT_OUT_CTRL 0x20 90 #define BMC150_ACCEL_INT_OUT_CTRL_INT1_LVL BIT(0) 91 #define BMC150_ACCEL_INT_OUT_CTRL_INT2_LVL BIT(2) 92 93 #define BMC150_ACCEL_REG_INT_5 0x27 94 #define BMC150_ACCEL_SLOPE_DUR_MASK 0x03 95 96 #define BMC150_ACCEL_REG_INT_6 0x28 97 #define BMC150_ACCEL_SLOPE_THRES_MASK 0xFF 98 99 /* Slope duration in terms of number of samples */ 100 #define BMC150_ACCEL_DEF_SLOPE_DURATION 1 101 /* in terms of multiples of g's/LSB, based on range */ 102 #define BMC150_ACCEL_DEF_SLOPE_THRESHOLD 1 103 104 #define BMC150_ACCEL_REG_XOUT_L 0x02 105 106 #define BMC150_ACCEL_MAX_STARTUP_TIME_MS 100 107 108 /* Sleep Duration values */ 109 #define BMC150_ACCEL_SLEEP_500_MICRO 0x05 110 #define BMC150_ACCEL_SLEEP_1_MS 0x06 111 #define BMC150_ACCEL_SLEEP_2_MS 0x07 112 #define BMC150_ACCEL_SLEEP_4_MS 0x08 113 #define BMC150_ACCEL_SLEEP_6_MS 0x09 114 #define BMC150_ACCEL_SLEEP_10_MS 0x0A 115 #define BMC150_ACCEL_SLEEP_25_MS 0x0B 116 #define BMC150_ACCEL_SLEEP_50_MS 0x0C 117 #define BMC150_ACCEL_SLEEP_100_MS 0x0D 118 #define BMC150_ACCEL_SLEEP_500_MS 0x0E 119 #define BMC150_ACCEL_SLEEP_1_SEC 0x0F 120 121 #define BMC150_ACCEL_REG_TEMP 0x08 122 #define BMC150_ACCEL_TEMP_CENTER_VAL 23 123 124 #define BMC150_ACCEL_AXIS_TO_REG(axis) (BMC150_ACCEL_REG_XOUT_L + (axis * 2)) 125 #define BMC150_AUTO_SUSPEND_DELAY_MS 2000 126 127 #define BMC150_ACCEL_REG_FIFO_STATUS 0x0E 128 #define BMC150_ACCEL_REG_FIFO_CONFIG0 0x30 129 #define BMC150_ACCEL_REG_FIFO_CONFIG1 0x3E 130 #define BMC150_ACCEL_REG_FIFO_DATA 0x3F 131 #define BMC150_ACCEL_FIFO_LENGTH 32 132 133 enum bmc150_accel_axis { 134 AXIS_X, 135 AXIS_Y, 136 AXIS_Z, 137 AXIS_MAX, 138 }; 139 140 enum bmc150_power_modes { 141 BMC150_ACCEL_SLEEP_MODE_NORMAL, 142 BMC150_ACCEL_SLEEP_MODE_DEEP_SUSPEND, 143 BMC150_ACCEL_SLEEP_MODE_LPM, 144 BMC150_ACCEL_SLEEP_MODE_SUSPEND = 0x04, 145 }; 146 147 struct bmc150_scale_info { 148 int scale; 149 u8 reg_range; 150 }; 151 152 struct bmc150_accel_chip_info { 153 const char *name; 154 u8 chip_id; 155 const struct iio_chan_spec *channels; 156 int num_channels; 157 const struct bmc150_scale_info scale_table[4]; 158 }; 159 160 static const struct { 161 int val; 162 int val2; 163 u8 bw_bits; 164 } bmc150_accel_samp_freq_table[] = { {15, 620000, 0x08}, 165 {31, 260000, 0x09}, 166 {62, 500000, 0x0A}, 167 {125, 0, 0x0B}, 168 {250, 0, 0x0C}, 169 {500, 0, 0x0D}, 170 {1000, 0, 0x0E}, 171 {2000, 0, 0x0F} }; 172 173 static const struct { 174 int bw_bits; 175 int msec; 176 } bmc150_accel_sample_upd_time[] = { {0x08, 64}, 177 {0x09, 32}, 178 {0x0A, 16}, 179 {0x0B, 8}, 180 {0x0C, 4}, 181 {0x0D, 2}, 182 {0x0E, 1}, 183 {0x0F, 1} }; 184 185 static const struct { 186 int sleep_dur; 187 u8 reg_value; 188 } bmc150_accel_sleep_value_table[] = { {0, 0}, 189 {500, BMC150_ACCEL_SLEEP_500_MICRO}, 190 {1000, BMC150_ACCEL_SLEEP_1_MS}, 191 {2000, BMC150_ACCEL_SLEEP_2_MS}, 192 {4000, BMC150_ACCEL_SLEEP_4_MS}, 193 {6000, BMC150_ACCEL_SLEEP_6_MS}, 194 {10000, BMC150_ACCEL_SLEEP_10_MS}, 195 {25000, BMC150_ACCEL_SLEEP_25_MS}, 196 {50000, BMC150_ACCEL_SLEEP_50_MS}, 197 {100000, BMC150_ACCEL_SLEEP_100_MS}, 198 {500000, BMC150_ACCEL_SLEEP_500_MS}, 199 {1000000, BMC150_ACCEL_SLEEP_1_SEC} }; 200 201 const struct regmap_config bmc150_regmap_conf = { 202 .reg_bits = 8, 203 .val_bits = 8, 204 .max_register = 0x3f, 205 }; 206 EXPORT_SYMBOL_GPL(bmc150_regmap_conf); 207 208 static int bmc150_accel_set_mode(struct bmc150_accel_data *data, 209 enum bmc150_power_modes mode, 210 int dur_us) 211 { 212 struct device *dev = regmap_get_device(data->regmap); 213 int i; 214 int ret; 215 u8 lpw_bits; 216 int dur_val = -1; 217 218 if (dur_us > 0) { 219 for (i = 0; i < ARRAY_SIZE(bmc150_accel_sleep_value_table); 220 ++i) { 221 if (bmc150_accel_sleep_value_table[i].sleep_dur == 222 dur_us) 223 dur_val = 224 bmc150_accel_sleep_value_table[i].reg_value; 225 } 226 } else { 227 dur_val = 0; 228 } 229 230 if (dur_val < 0) 231 return -EINVAL; 232 233 lpw_bits = mode << BMC150_ACCEL_PMU_MODE_SHIFT; 234 lpw_bits |= (dur_val << BMC150_ACCEL_PMU_BIT_SLEEP_DUR_SHIFT); 235 236 dev_dbg(dev, "Set Mode bits %x\n", lpw_bits); 237 238 ret = regmap_write(data->regmap, BMC150_ACCEL_REG_PMU_LPW, lpw_bits); 239 if (ret < 0) { 240 dev_err(dev, "Error writing reg_pmu_lpw\n"); 241 return ret; 242 } 243 244 return 0; 245 } 246 247 static int bmc150_accel_set_bw(struct bmc150_accel_data *data, int val, 248 int val2) 249 { 250 int i; 251 int ret; 252 253 for (i = 0; i < ARRAY_SIZE(bmc150_accel_samp_freq_table); ++i) { 254 if (bmc150_accel_samp_freq_table[i].val == val && 255 bmc150_accel_samp_freq_table[i].val2 == val2) { 256 ret = regmap_write(data->regmap, 257 BMC150_ACCEL_REG_PMU_BW, 258 bmc150_accel_samp_freq_table[i].bw_bits); 259 if (ret < 0) 260 return ret; 261 262 data->bw_bits = 263 bmc150_accel_samp_freq_table[i].bw_bits; 264 return 0; 265 } 266 } 267 268 return -EINVAL; 269 } 270 271 static int bmc150_accel_update_slope(struct bmc150_accel_data *data) 272 { 273 struct device *dev = regmap_get_device(data->regmap); 274 int ret; 275 276 ret = regmap_write(data->regmap, BMC150_ACCEL_REG_INT_6, 277 data->slope_thres); 278 if (ret < 0) { 279 dev_err(dev, "Error writing reg_int_6\n"); 280 return ret; 281 } 282 283 ret = regmap_update_bits(data->regmap, BMC150_ACCEL_REG_INT_5, 284 BMC150_ACCEL_SLOPE_DUR_MASK, data->slope_dur); 285 if (ret < 0) { 286 dev_err(dev, "Error updating reg_int_5\n"); 287 return ret; 288 } 289 290 dev_dbg(dev, "%x %x\n", data->slope_thres, data->slope_dur); 291 292 return ret; 293 } 294 295 static int bmc150_accel_any_motion_setup(struct bmc150_accel_trigger *t, 296 bool state) 297 { 298 if (state) 299 return bmc150_accel_update_slope(t->data); 300 301 return 0; 302 } 303 304 static int bmc150_accel_get_bw(struct bmc150_accel_data *data, int *val, 305 int *val2) 306 { 307 int i; 308 309 for (i = 0; i < ARRAY_SIZE(bmc150_accel_samp_freq_table); ++i) { 310 if (bmc150_accel_samp_freq_table[i].bw_bits == data->bw_bits) { 311 *val = bmc150_accel_samp_freq_table[i].val; 312 *val2 = bmc150_accel_samp_freq_table[i].val2; 313 return IIO_VAL_INT_PLUS_MICRO; 314 } 315 } 316 317 return -EINVAL; 318 } 319 320 #ifdef CONFIG_PM 321 static int bmc150_accel_get_startup_times(struct bmc150_accel_data *data) 322 { 323 int i; 324 325 for (i = 0; i < ARRAY_SIZE(bmc150_accel_sample_upd_time); ++i) { 326 if (bmc150_accel_sample_upd_time[i].bw_bits == data->bw_bits) 327 return bmc150_accel_sample_upd_time[i].msec; 328 } 329 330 return BMC150_ACCEL_MAX_STARTUP_TIME_MS; 331 } 332 333 static int bmc150_accel_set_power_state(struct bmc150_accel_data *data, bool on) 334 { 335 struct device *dev = regmap_get_device(data->regmap); 336 int ret; 337 338 if (on) { 339 ret = pm_runtime_resume_and_get(dev); 340 } else { 341 pm_runtime_mark_last_busy(dev); 342 ret = pm_runtime_put_autosuspend(dev); 343 } 344 345 if (ret < 0) { 346 dev_err(dev, 347 "Failed: %s for %d\n", __func__, on); 348 return ret; 349 } 350 351 return 0; 352 } 353 #else 354 static int bmc150_accel_set_power_state(struct bmc150_accel_data *data, bool on) 355 { 356 return 0; 357 } 358 #endif 359 360 #ifdef CONFIG_ACPI 361 /* 362 * Support for getting accelerometer information from BOSC0200 ACPI nodes. 363 * 364 * There are 2 variants of the BOSC0200 ACPI node. Some 2-in-1s with 360 degree 365 * hinges declare 2 I2C ACPI-resources for 2 accelerometers, 1 in the display 366 * and 1 in the base of the 2-in-1. On these 2-in-1s the ROMS ACPI object 367 * contains the mount-matrix for the sensor in the display and ROMK contains 368 * the mount-matrix for the sensor in the base. On devices using a single 369 * sensor there is a ROTM ACPI object which contains the mount-matrix. 370 * 371 * Here is an incomplete list of devices known to use 1 of these setups: 372 * 373 * Yoga devices with 2 accelerometers using ROMS + ROMK for the mount-matrices: 374 * Lenovo Thinkpad Yoga 11e 3th gen 375 * Lenovo Thinkpad Yoga 11e 4th gen 376 * 377 * Tablets using a single accelerometer using ROTM for the mount-matrix: 378 * Chuwi Hi8 Pro (CWI513) 379 * Chuwi Vi8 Plus (CWI519) 380 * Chuwi Hi13 381 * Irbis TW90 382 * Jumper EZpad mini 3 383 * Onda V80 plus 384 * Predia Basic Tablet 385 */ 386 static bool bmc150_apply_bosc0200_acpi_orientation(struct device *dev, 387 struct iio_mount_matrix *orientation) 388 { 389 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 390 struct iio_dev *indio_dev = dev_get_drvdata(dev); 391 struct acpi_device *adev = ACPI_COMPANION(dev); 392 char *name, *alt_name, *label, *str; 393 union acpi_object *obj, *elements; 394 acpi_status status; 395 int i, j, val[3]; 396 397 if (strcmp(dev_name(dev), "i2c-BOSC0200:base") == 0) { 398 alt_name = "ROMK"; 399 label = "accel-base"; 400 } else { 401 alt_name = "ROMS"; 402 label = "accel-display"; 403 } 404 405 if (acpi_has_method(adev->handle, "ROTM")) { 406 name = "ROTM"; 407 } else if (acpi_has_method(adev->handle, alt_name)) { 408 name = alt_name; 409 indio_dev->label = label; 410 } else { 411 return false; 412 } 413 414 status = acpi_evaluate_object(adev->handle, name, NULL, &buffer); 415 if (ACPI_FAILURE(status)) { 416 dev_warn(dev, "Failed to get ACPI mount matrix: %d\n", status); 417 return false; 418 } 419 420 obj = buffer.pointer; 421 if (obj->type != ACPI_TYPE_PACKAGE || obj->package.count != 3) 422 goto unknown_format; 423 424 elements = obj->package.elements; 425 for (i = 0; i < 3; i++) { 426 if (elements[i].type != ACPI_TYPE_STRING) 427 goto unknown_format; 428 429 str = elements[i].string.pointer; 430 if (sscanf(str, "%d %d %d", &val[0], &val[1], &val[2]) != 3) 431 goto unknown_format; 432 433 for (j = 0; j < 3; j++) { 434 switch (val[j]) { 435 case -1: str = "-1"; break; 436 case 0: str = "0"; break; 437 case 1: str = "1"; break; 438 default: goto unknown_format; 439 } 440 orientation->rotation[i * 3 + j] = str; 441 } 442 } 443 444 kfree(buffer.pointer); 445 return true; 446 447 unknown_format: 448 dev_warn(dev, "Unknown ACPI mount matrix format, ignoring\n"); 449 kfree(buffer.pointer); 450 return false; 451 } 452 453 static bool bmc150_apply_dual250e_acpi_orientation(struct device *dev, 454 struct iio_mount_matrix *orientation) 455 { 456 struct iio_dev *indio_dev = dev_get_drvdata(dev); 457 458 if (strcmp(dev_name(dev), "i2c-DUAL250E:base") == 0) 459 indio_dev->label = "accel-base"; 460 else 461 indio_dev->label = "accel-display"; 462 463 return false; /* DUAL250E fwnodes have no mount matrix info */ 464 } 465 466 static bool bmc150_apply_acpi_orientation(struct device *dev, 467 struct iio_mount_matrix *orientation) 468 { 469 struct acpi_device *adev = ACPI_COMPANION(dev); 470 471 if (adev && acpi_dev_hid_uid_match(adev, "BOSC0200", NULL)) 472 return bmc150_apply_bosc0200_acpi_orientation(dev, orientation); 473 474 if (adev && acpi_dev_hid_uid_match(adev, "DUAL250E", NULL)) 475 return bmc150_apply_dual250e_acpi_orientation(dev, orientation); 476 477 return false; 478 } 479 #else 480 static bool bmc150_apply_acpi_orientation(struct device *dev, 481 struct iio_mount_matrix *orientation) 482 { 483 return false; 484 } 485 #endif 486 487 struct bmc150_accel_interrupt_info { 488 u8 map_reg; 489 u8 map_bitmask; 490 u8 en_reg; 491 u8 en_bitmask; 492 }; 493 494 static const struct bmc150_accel_interrupt_info 495 bmc150_accel_interrupts_int1[BMC150_ACCEL_INTERRUPTS] = { 496 { /* data ready interrupt */ 497 .map_reg = BMC150_ACCEL_REG_INT_MAP_1, 498 .map_bitmask = BMC150_ACCEL_INT_MAP_1_BIT_INT1_DATA, 499 .en_reg = BMC150_ACCEL_REG_INT_EN_1, 500 .en_bitmask = BMC150_ACCEL_INT_EN_BIT_DATA_EN, 501 }, 502 { /* motion interrupt */ 503 .map_reg = BMC150_ACCEL_REG_INT_MAP_0, 504 .map_bitmask = BMC150_ACCEL_INT_MAP_0_BIT_INT1_SLOPE, 505 .en_reg = BMC150_ACCEL_REG_INT_EN_0, 506 .en_bitmask = BMC150_ACCEL_INT_EN_BIT_SLP_X | 507 BMC150_ACCEL_INT_EN_BIT_SLP_Y | 508 BMC150_ACCEL_INT_EN_BIT_SLP_Z 509 }, 510 { /* fifo watermark interrupt */ 511 .map_reg = BMC150_ACCEL_REG_INT_MAP_1, 512 .map_bitmask = BMC150_ACCEL_INT_MAP_1_BIT_INT1_FWM, 513 .en_reg = BMC150_ACCEL_REG_INT_EN_1, 514 .en_bitmask = BMC150_ACCEL_INT_EN_BIT_FWM_EN, 515 }, 516 }; 517 518 static const struct bmc150_accel_interrupt_info 519 bmc150_accel_interrupts_int2[BMC150_ACCEL_INTERRUPTS] = { 520 { /* data ready interrupt */ 521 .map_reg = BMC150_ACCEL_REG_INT_MAP_1, 522 .map_bitmask = BMC150_ACCEL_INT_MAP_1_BIT_INT2_DATA, 523 .en_reg = BMC150_ACCEL_REG_INT_EN_1, 524 .en_bitmask = BMC150_ACCEL_INT_EN_BIT_DATA_EN, 525 }, 526 { /* motion interrupt */ 527 .map_reg = BMC150_ACCEL_REG_INT_MAP_2, 528 .map_bitmask = BMC150_ACCEL_INT_MAP_2_BIT_INT2_SLOPE, 529 .en_reg = BMC150_ACCEL_REG_INT_EN_0, 530 .en_bitmask = BMC150_ACCEL_INT_EN_BIT_SLP_X | 531 BMC150_ACCEL_INT_EN_BIT_SLP_Y | 532 BMC150_ACCEL_INT_EN_BIT_SLP_Z 533 }, 534 { /* fifo watermark interrupt */ 535 .map_reg = BMC150_ACCEL_REG_INT_MAP_1, 536 .map_bitmask = BMC150_ACCEL_INT_MAP_1_BIT_INT2_FWM, 537 .en_reg = BMC150_ACCEL_REG_INT_EN_1, 538 .en_bitmask = BMC150_ACCEL_INT_EN_BIT_FWM_EN, 539 }, 540 }; 541 542 static void bmc150_accel_interrupts_setup(struct iio_dev *indio_dev, 543 struct bmc150_accel_data *data, int irq) 544 { 545 const struct bmc150_accel_interrupt_info *irq_info = NULL; 546 struct device *dev = regmap_get_device(data->regmap); 547 int i; 548 549 /* 550 * For now we map all interrupts to the same output pin. 551 * However, some boards may have just INT2 (and not INT1) connected, 552 * so we try to detect which IRQ it is based on the interrupt-names. 553 * Without interrupt-names, we assume the irq belongs to INT1. 554 */ 555 irq_info = bmc150_accel_interrupts_int1; 556 if (data->type == BOSCH_BMC156 || 557 irq == of_irq_get_byname(dev->of_node, "INT2")) 558 irq_info = bmc150_accel_interrupts_int2; 559 560 for (i = 0; i < BMC150_ACCEL_INTERRUPTS; i++) 561 data->interrupts[i].info = &irq_info[i]; 562 } 563 564 static int bmc150_accel_set_interrupt(struct bmc150_accel_data *data, int i, 565 bool state) 566 { 567 struct device *dev = regmap_get_device(data->regmap); 568 struct bmc150_accel_interrupt *intr = &data->interrupts[i]; 569 const struct bmc150_accel_interrupt_info *info = intr->info; 570 int ret; 571 572 if (state) { 573 if (atomic_inc_return(&intr->users) > 1) 574 return 0; 575 } else { 576 if (atomic_dec_return(&intr->users) > 0) 577 return 0; 578 } 579 580 /* 581 * We will expect the enable and disable to do operation in reverse 582 * order. This will happen here anyway, as our resume operation uses 583 * sync mode runtime pm calls. The suspend operation will be delayed 584 * by autosuspend delay. 585 * So the disable operation will still happen in reverse order of 586 * enable operation. When runtime pm is disabled the mode is always on, 587 * so sequence doesn't matter. 588 */ 589 ret = bmc150_accel_set_power_state(data, state); 590 if (ret < 0) 591 return ret; 592 593 /* map the interrupt to the appropriate pins */ 594 ret = regmap_update_bits(data->regmap, info->map_reg, info->map_bitmask, 595 (state ? info->map_bitmask : 0)); 596 if (ret < 0) { 597 dev_err(dev, "Error updating reg_int_map\n"); 598 goto out_fix_power_state; 599 } 600 601 /* enable/disable the interrupt */ 602 ret = regmap_update_bits(data->regmap, info->en_reg, info->en_bitmask, 603 (state ? info->en_bitmask : 0)); 604 if (ret < 0) { 605 dev_err(dev, "Error updating reg_int_en\n"); 606 goto out_fix_power_state; 607 } 608 609 return 0; 610 611 out_fix_power_state: 612 bmc150_accel_set_power_state(data, false); 613 return ret; 614 } 615 616 static int bmc150_accel_set_scale(struct bmc150_accel_data *data, int val) 617 { 618 struct device *dev = regmap_get_device(data->regmap); 619 int ret, i; 620 621 for (i = 0; i < ARRAY_SIZE(data->chip_info->scale_table); ++i) { 622 if (data->chip_info->scale_table[i].scale == val) { 623 ret = regmap_write(data->regmap, 624 BMC150_ACCEL_REG_PMU_RANGE, 625 data->chip_info->scale_table[i].reg_range); 626 if (ret < 0) { 627 dev_err(dev, "Error writing pmu_range\n"); 628 return ret; 629 } 630 631 data->range = data->chip_info->scale_table[i].reg_range; 632 return 0; 633 } 634 } 635 636 return -EINVAL; 637 } 638 639 static int bmc150_accel_get_temp(struct bmc150_accel_data *data, int *val) 640 { 641 struct device *dev = regmap_get_device(data->regmap); 642 int ret; 643 unsigned int value; 644 645 mutex_lock(&data->mutex); 646 647 ret = regmap_read(data->regmap, BMC150_ACCEL_REG_TEMP, &value); 648 if (ret < 0) { 649 dev_err(dev, "Error reading reg_temp\n"); 650 mutex_unlock(&data->mutex); 651 return ret; 652 } 653 *val = sign_extend32(value, 7); 654 655 mutex_unlock(&data->mutex); 656 657 return IIO_VAL_INT; 658 } 659 660 static int bmc150_accel_get_axis(struct bmc150_accel_data *data, 661 struct iio_chan_spec const *chan, 662 int *val) 663 { 664 struct device *dev = regmap_get_device(data->regmap); 665 int ret; 666 int axis = chan->scan_index; 667 __le16 raw_val; 668 669 mutex_lock(&data->mutex); 670 ret = bmc150_accel_set_power_state(data, true); 671 if (ret < 0) { 672 mutex_unlock(&data->mutex); 673 return ret; 674 } 675 676 ret = regmap_bulk_read(data->regmap, BMC150_ACCEL_AXIS_TO_REG(axis), 677 &raw_val, sizeof(raw_val)); 678 if (ret < 0) { 679 dev_err(dev, "Error reading axis %d\n", axis); 680 bmc150_accel_set_power_state(data, false); 681 mutex_unlock(&data->mutex); 682 return ret; 683 } 684 *val = sign_extend32(le16_to_cpu(raw_val) >> chan->scan_type.shift, 685 chan->scan_type.realbits - 1); 686 ret = bmc150_accel_set_power_state(data, false); 687 mutex_unlock(&data->mutex); 688 if (ret < 0) 689 return ret; 690 691 return IIO_VAL_INT; 692 } 693 694 static int bmc150_accel_read_raw(struct iio_dev *indio_dev, 695 struct iio_chan_spec const *chan, 696 int *val, int *val2, long mask) 697 { 698 struct bmc150_accel_data *data = iio_priv(indio_dev); 699 int ret; 700 701 switch (mask) { 702 case IIO_CHAN_INFO_RAW: 703 switch (chan->type) { 704 case IIO_TEMP: 705 return bmc150_accel_get_temp(data, val); 706 case IIO_ACCEL: 707 if (iio_buffer_enabled(indio_dev)) 708 return -EBUSY; 709 else 710 return bmc150_accel_get_axis(data, chan, val); 711 default: 712 return -EINVAL; 713 } 714 case IIO_CHAN_INFO_OFFSET: 715 if (chan->type == IIO_TEMP) { 716 *val = BMC150_ACCEL_TEMP_CENTER_VAL; 717 return IIO_VAL_INT; 718 } else { 719 return -EINVAL; 720 } 721 case IIO_CHAN_INFO_SCALE: 722 *val = 0; 723 switch (chan->type) { 724 case IIO_TEMP: 725 *val2 = 500000; 726 return IIO_VAL_INT_PLUS_MICRO; 727 case IIO_ACCEL: 728 { 729 int i; 730 const struct bmc150_scale_info *si; 731 int st_size = ARRAY_SIZE(data->chip_info->scale_table); 732 733 for (i = 0; i < st_size; ++i) { 734 si = &data->chip_info->scale_table[i]; 735 if (si->reg_range == data->range) { 736 *val2 = si->scale; 737 return IIO_VAL_INT_PLUS_MICRO; 738 } 739 } 740 return -EINVAL; 741 } 742 default: 743 return -EINVAL; 744 } 745 case IIO_CHAN_INFO_SAMP_FREQ: 746 mutex_lock(&data->mutex); 747 ret = bmc150_accel_get_bw(data, val, val2); 748 mutex_unlock(&data->mutex); 749 return ret; 750 default: 751 return -EINVAL; 752 } 753 } 754 755 static int bmc150_accel_write_raw(struct iio_dev *indio_dev, 756 struct iio_chan_spec const *chan, 757 int val, int val2, long mask) 758 { 759 struct bmc150_accel_data *data = iio_priv(indio_dev); 760 int ret; 761 762 switch (mask) { 763 case IIO_CHAN_INFO_SAMP_FREQ: 764 mutex_lock(&data->mutex); 765 ret = bmc150_accel_set_bw(data, val, val2); 766 mutex_unlock(&data->mutex); 767 break; 768 case IIO_CHAN_INFO_SCALE: 769 if (val) 770 return -EINVAL; 771 772 mutex_lock(&data->mutex); 773 ret = bmc150_accel_set_scale(data, val2); 774 mutex_unlock(&data->mutex); 775 return ret; 776 default: 777 ret = -EINVAL; 778 } 779 780 return ret; 781 } 782 783 static int bmc150_accel_read_event(struct iio_dev *indio_dev, 784 const struct iio_chan_spec *chan, 785 enum iio_event_type type, 786 enum iio_event_direction dir, 787 enum iio_event_info info, 788 int *val, int *val2) 789 { 790 struct bmc150_accel_data *data = iio_priv(indio_dev); 791 792 *val2 = 0; 793 switch (info) { 794 case IIO_EV_INFO_VALUE: 795 *val = data->slope_thres; 796 break; 797 case IIO_EV_INFO_PERIOD: 798 *val = data->slope_dur; 799 break; 800 default: 801 return -EINVAL; 802 } 803 804 return IIO_VAL_INT; 805 } 806 807 static int bmc150_accel_write_event(struct iio_dev *indio_dev, 808 const struct iio_chan_spec *chan, 809 enum iio_event_type type, 810 enum iio_event_direction dir, 811 enum iio_event_info info, 812 int val, int val2) 813 { 814 struct bmc150_accel_data *data = iio_priv(indio_dev); 815 816 if (data->ev_enable_state) 817 return -EBUSY; 818 819 switch (info) { 820 case IIO_EV_INFO_VALUE: 821 data->slope_thres = val & BMC150_ACCEL_SLOPE_THRES_MASK; 822 break; 823 case IIO_EV_INFO_PERIOD: 824 data->slope_dur = val & BMC150_ACCEL_SLOPE_DUR_MASK; 825 break; 826 default: 827 return -EINVAL; 828 } 829 830 return 0; 831 } 832 833 static int bmc150_accel_read_event_config(struct iio_dev *indio_dev, 834 const struct iio_chan_spec *chan, 835 enum iio_event_type type, 836 enum iio_event_direction dir) 837 { 838 struct bmc150_accel_data *data = iio_priv(indio_dev); 839 840 return data->ev_enable_state; 841 } 842 843 static int bmc150_accel_write_event_config(struct iio_dev *indio_dev, 844 const struct iio_chan_spec *chan, 845 enum iio_event_type type, 846 enum iio_event_direction dir, 847 int state) 848 { 849 struct bmc150_accel_data *data = iio_priv(indio_dev); 850 int ret; 851 852 if (state == data->ev_enable_state) 853 return 0; 854 855 mutex_lock(&data->mutex); 856 857 ret = bmc150_accel_set_interrupt(data, BMC150_ACCEL_INT_ANY_MOTION, 858 state); 859 if (ret < 0) { 860 mutex_unlock(&data->mutex); 861 return ret; 862 } 863 864 data->ev_enable_state = state; 865 mutex_unlock(&data->mutex); 866 867 return 0; 868 } 869 870 static int bmc150_accel_validate_trigger(struct iio_dev *indio_dev, 871 struct iio_trigger *trig) 872 { 873 struct bmc150_accel_data *data = iio_priv(indio_dev); 874 int i; 875 876 for (i = 0; i < BMC150_ACCEL_TRIGGERS; i++) { 877 if (data->triggers[i].indio_trig == trig) 878 return 0; 879 } 880 881 return -EINVAL; 882 } 883 884 static ssize_t bmc150_accel_get_fifo_watermark(struct device *dev, 885 struct device_attribute *attr, 886 char *buf) 887 { 888 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 889 struct bmc150_accel_data *data = iio_priv(indio_dev); 890 int wm; 891 892 mutex_lock(&data->mutex); 893 wm = data->watermark; 894 mutex_unlock(&data->mutex); 895 896 return sprintf(buf, "%d\n", wm); 897 } 898 899 static ssize_t bmc150_accel_get_fifo_state(struct device *dev, 900 struct device_attribute *attr, 901 char *buf) 902 { 903 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 904 struct bmc150_accel_data *data = iio_priv(indio_dev); 905 bool state; 906 907 mutex_lock(&data->mutex); 908 state = data->fifo_mode; 909 mutex_unlock(&data->mutex); 910 911 return sprintf(buf, "%d\n", state); 912 } 913 914 static const struct iio_mount_matrix * 915 bmc150_accel_get_mount_matrix(const struct iio_dev *indio_dev, 916 const struct iio_chan_spec *chan) 917 { 918 struct bmc150_accel_data *data = iio_priv(indio_dev); 919 920 return &data->orientation; 921 } 922 923 static const struct iio_chan_spec_ext_info bmc150_accel_ext_info[] = { 924 IIO_MOUNT_MATRIX(IIO_SHARED_BY_DIR, bmc150_accel_get_mount_matrix), 925 { } 926 }; 927 928 static IIO_CONST_ATTR(hwfifo_watermark_min, "1"); 929 static IIO_CONST_ATTR(hwfifo_watermark_max, 930 __stringify(BMC150_ACCEL_FIFO_LENGTH)); 931 static IIO_DEVICE_ATTR(hwfifo_enabled, S_IRUGO, 932 bmc150_accel_get_fifo_state, NULL, 0); 933 static IIO_DEVICE_ATTR(hwfifo_watermark, S_IRUGO, 934 bmc150_accel_get_fifo_watermark, NULL, 0); 935 936 static const struct attribute *bmc150_accel_fifo_attributes[] = { 937 &iio_const_attr_hwfifo_watermark_min.dev_attr.attr, 938 &iio_const_attr_hwfifo_watermark_max.dev_attr.attr, 939 &iio_dev_attr_hwfifo_watermark.dev_attr.attr, 940 &iio_dev_attr_hwfifo_enabled.dev_attr.attr, 941 NULL, 942 }; 943 944 static int bmc150_accel_set_watermark(struct iio_dev *indio_dev, unsigned val) 945 { 946 struct bmc150_accel_data *data = iio_priv(indio_dev); 947 948 if (val > BMC150_ACCEL_FIFO_LENGTH) 949 val = BMC150_ACCEL_FIFO_LENGTH; 950 951 mutex_lock(&data->mutex); 952 data->watermark = val; 953 mutex_unlock(&data->mutex); 954 955 return 0; 956 } 957 958 /* 959 * We must read at least one full frame in one burst, otherwise the rest of the 960 * frame data is discarded. 961 */ 962 static int bmc150_accel_fifo_transfer(struct bmc150_accel_data *data, 963 char *buffer, int samples) 964 { 965 struct device *dev = regmap_get_device(data->regmap); 966 int sample_length = 3 * 2; 967 int ret; 968 int total_length = samples * sample_length; 969 970 ret = regmap_raw_read(data->regmap, BMC150_ACCEL_REG_FIFO_DATA, 971 buffer, total_length); 972 if (ret) 973 dev_err(dev, 974 "Error transferring data from fifo: %d\n", ret); 975 976 return ret; 977 } 978 979 static int __bmc150_accel_fifo_flush(struct iio_dev *indio_dev, 980 unsigned samples, bool irq) 981 { 982 struct bmc150_accel_data *data = iio_priv(indio_dev); 983 struct device *dev = regmap_get_device(data->regmap); 984 int ret, i; 985 u8 count; 986 u16 buffer[BMC150_ACCEL_FIFO_LENGTH * 3]; 987 int64_t tstamp; 988 uint64_t sample_period; 989 unsigned int val; 990 991 ret = regmap_read(data->regmap, BMC150_ACCEL_REG_FIFO_STATUS, &val); 992 if (ret < 0) { 993 dev_err(dev, "Error reading reg_fifo_status\n"); 994 return ret; 995 } 996 997 count = val & 0x7F; 998 999 if (!count) 1000 return 0; 1001 1002 /* 1003 * If we getting called from IRQ handler we know the stored timestamp is 1004 * fairly accurate for the last stored sample. Otherwise, if we are 1005 * called as a result of a read operation from userspace and hence 1006 * before the watermark interrupt was triggered, take a timestamp 1007 * now. We can fall anywhere in between two samples so the error in this 1008 * case is at most one sample period. 1009 */ 1010 if (!irq) { 1011 data->old_timestamp = data->timestamp; 1012 data->timestamp = iio_get_time_ns(indio_dev); 1013 } 1014 1015 /* 1016 * Approximate timestamps for each of the sample based on the sampling 1017 * frequency, timestamp for last sample and number of samples. 1018 * 1019 * Note that we can't use the current bandwidth settings to compute the 1020 * sample period because the sample rate varies with the device 1021 * (e.g. between 31.70ms to 32.20ms for a bandwidth of 15.63HZ). That 1022 * small variation adds when we store a large number of samples and 1023 * creates significant jitter between the last and first samples in 1024 * different batches (e.g. 32ms vs 21ms). 1025 * 1026 * To avoid this issue we compute the actual sample period ourselves 1027 * based on the timestamp delta between the last two flush operations. 1028 */ 1029 sample_period = (data->timestamp - data->old_timestamp); 1030 do_div(sample_period, count); 1031 tstamp = data->timestamp - (count - 1) * sample_period; 1032 1033 if (samples && count > samples) 1034 count = samples; 1035 1036 ret = bmc150_accel_fifo_transfer(data, (u8 *)buffer, count); 1037 if (ret) 1038 return ret; 1039 1040 /* 1041 * Ideally we want the IIO core to handle the demux when running in fifo 1042 * mode but not when running in triggered buffer mode. Unfortunately 1043 * this does not seem to be possible, so stick with driver demux for 1044 * now. 1045 */ 1046 for (i = 0; i < count; i++) { 1047 int j, bit; 1048 1049 j = 0; 1050 for_each_set_bit(bit, indio_dev->active_scan_mask, 1051 indio_dev->masklength) 1052 memcpy(&data->scan.channels[j++], &buffer[i * 3 + bit], 1053 sizeof(data->scan.channels[0])); 1054 1055 iio_push_to_buffers_with_timestamp(indio_dev, &data->scan, 1056 tstamp); 1057 1058 tstamp += sample_period; 1059 } 1060 1061 return count; 1062 } 1063 1064 static int bmc150_accel_fifo_flush(struct iio_dev *indio_dev, unsigned samples) 1065 { 1066 struct bmc150_accel_data *data = iio_priv(indio_dev); 1067 int ret; 1068 1069 mutex_lock(&data->mutex); 1070 ret = __bmc150_accel_fifo_flush(indio_dev, samples, false); 1071 mutex_unlock(&data->mutex); 1072 1073 return ret; 1074 } 1075 1076 static IIO_CONST_ATTR_SAMP_FREQ_AVAIL( 1077 "15.620000 31.260000 62.50000 125 250 500 1000 2000"); 1078 1079 static struct attribute *bmc150_accel_attributes[] = { 1080 &iio_const_attr_sampling_frequency_available.dev_attr.attr, 1081 NULL, 1082 }; 1083 1084 static const struct attribute_group bmc150_accel_attrs_group = { 1085 .attrs = bmc150_accel_attributes, 1086 }; 1087 1088 static const struct iio_event_spec bmc150_accel_event = { 1089 .type = IIO_EV_TYPE_ROC, 1090 .dir = IIO_EV_DIR_EITHER, 1091 .mask_separate = BIT(IIO_EV_INFO_VALUE) | 1092 BIT(IIO_EV_INFO_ENABLE) | 1093 BIT(IIO_EV_INFO_PERIOD) 1094 }; 1095 1096 #define BMC150_ACCEL_CHANNEL(_axis, bits) { \ 1097 .type = IIO_ACCEL, \ 1098 .modified = 1, \ 1099 .channel2 = IIO_MOD_##_axis, \ 1100 .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \ 1101 .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) | \ 1102 BIT(IIO_CHAN_INFO_SAMP_FREQ), \ 1103 .scan_index = AXIS_##_axis, \ 1104 .scan_type = { \ 1105 .sign = 's', \ 1106 .realbits = (bits), \ 1107 .storagebits = 16, \ 1108 .shift = 16 - (bits), \ 1109 .endianness = IIO_LE, \ 1110 }, \ 1111 .ext_info = bmc150_accel_ext_info, \ 1112 .event_spec = &bmc150_accel_event, \ 1113 .num_event_specs = 1 \ 1114 } 1115 1116 #define BMC150_ACCEL_CHANNELS(bits) { \ 1117 { \ 1118 .type = IIO_TEMP, \ 1119 .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \ 1120 BIT(IIO_CHAN_INFO_SCALE) | \ 1121 BIT(IIO_CHAN_INFO_OFFSET), \ 1122 .scan_index = -1, \ 1123 }, \ 1124 BMC150_ACCEL_CHANNEL(X, bits), \ 1125 BMC150_ACCEL_CHANNEL(Y, bits), \ 1126 BMC150_ACCEL_CHANNEL(Z, bits), \ 1127 IIO_CHAN_SOFT_TIMESTAMP(3), \ 1128 } 1129 1130 static const struct iio_chan_spec bma222e_accel_channels[] = 1131 BMC150_ACCEL_CHANNELS(8); 1132 static const struct iio_chan_spec bma250e_accel_channels[] = 1133 BMC150_ACCEL_CHANNELS(10); 1134 static const struct iio_chan_spec bmc150_accel_channels[] = 1135 BMC150_ACCEL_CHANNELS(12); 1136 static const struct iio_chan_spec bma280_accel_channels[] = 1137 BMC150_ACCEL_CHANNELS(14); 1138 1139 /* 1140 * The range for the Bosch sensors is typically +-2g/4g/8g/16g, distributed 1141 * over the amount of bits (see above). The scale table can be calculated using 1142 * (range / 2^bits) * g = (range / 2^bits) * 9.80665 m/s^2 1143 * e.g. for +-2g and 12 bits: (4 / 2^12) * 9.80665 m/s^2 = 0.0095768... m/s^2 1144 * Multiply 10^6 and round to get the values listed below. 1145 */ 1146 static const struct bmc150_accel_chip_info bmc150_accel_chip_info_tbl[] = { 1147 { 1148 .name = "BMA222", 1149 .chip_id = 0x03, 1150 .channels = bma222e_accel_channels, 1151 .num_channels = ARRAY_SIZE(bma222e_accel_channels), 1152 .scale_table = { {153229, BMC150_ACCEL_DEF_RANGE_2G}, 1153 {306458, BMC150_ACCEL_DEF_RANGE_4G}, 1154 {612916, BMC150_ACCEL_DEF_RANGE_8G}, 1155 {1225831, BMC150_ACCEL_DEF_RANGE_16G} }, 1156 }, 1157 { 1158 .name = "BMA222E", 1159 .chip_id = 0xF8, 1160 .channels = bma222e_accel_channels, 1161 .num_channels = ARRAY_SIZE(bma222e_accel_channels), 1162 .scale_table = { {153229, BMC150_ACCEL_DEF_RANGE_2G}, 1163 {306458, BMC150_ACCEL_DEF_RANGE_4G}, 1164 {612916, BMC150_ACCEL_DEF_RANGE_8G}, 1165 {1225831, BMC150_ACCEL_DEF_RANGE_16G} }, 1166 }, 1167 { 1168 .name = "BMA250E", 1169 .chip_id = 0xF9, 1170 .channels = bma250e_accel_channels, 1171 .num_channels = ARRAY_SIZE(bma250e_accel_channels), 1172 .scale_table = { {38307, BMC150_ACCEL_DEF_RANGE_2G}, 1173 {76614, BMC150_ACCEL_DEF_RANGE_4G}, 1174 {153229, BMC150_ACCEL_DEF_RANGE_8G}, 1175 {306458, BMC150_ACCEL_DEF_RANGE_16G} }, 1176 }, 1177 { 1178 .name = "BMA253/BMA254/BMA255/BMC150/BMC156/BMI055", 1179 .chip_id = 0xFA, 1180 .channels = bmc150_accel_channels, 1181 .num_channels = ARRAY_SIZE(bmc150_accel_channels), 1182 .scale_table = { {9577, BMC150_ACCEL_DEF_RANGE_2G}, 1183 {19154, BMC150_ACCEL_DEF_RANGE_4G}, 1184 {38307, BMC150_ACCEL_DEF_RANGE_8G}, 1185 {76614, BMC150_ACCEL_DEF_RANGE_16G} }, 1186 }, 1187 { 1188 .name = "BMA280", 1189 .chip_id = 0xFB, 1190 .channels = bma280_accel_channels, 1191 .num_channels = ARRAY_SIZE(bma280_accel_channels), 1192 .scale_table = { {2394, BMC150_ACCEL_DEF_RANGE_2G}, 1193 {4788, BMC150_ACCEL_DEF_RANGE_4G}, 1194 {9577, BMC150_ACCEL_DEF_RANGE_8G}, 1195 {19154, BMC150_ACCEL_DEF_RANGE_16G} }, 1196 }, 1197 }; 1198 1199 static const struct iio_info bmc150_accel_info = { 1200 .attrs = &bmc150_accel_attrs_group, 1201 .read_raw = bmc150_accel_read_raw, 1202 .write_raw = bmc150_accel_write_raw, 1203 .read_event_value = bmc150_accel_read_event, 1204 .write_event_value = bmc150_accel_write_event, 1205 .write_event_config = bmc150_accel_write_event_config, 1206 .read_event_config = bmc150_accel_read_event_config, 1207 }; 1208 1209 static const struct iio_info bmc150_accel_info_fifo = { 1210 .attrs = &bmc150_accel_attrs_group, 1211 .read_raw = bmc150_accel_read_raw, 1212 .write_raw = bmc150_accel_write_raw, 1213 .read_event_value = bmc150_accel_read_event, 1214 .write_event_value = bmc150_accel_write_event, 1215 .write_event_config = bmc150_accel_write_event_config, 1216 .read_event_config = bmc150_accel_read_event_config, 1217 .validate_trigger = bmc150_accel_validate_trigger, 1218 .hwfifo_set_watermark = bmc150_accel_set_watermark, 1219 .hwfifo_flush_to_buffer = bmc150_accel_fifo_flush, 1220 }; 1221 1222 static const unsigned long bmc150_accel_scan_masks[] = { 1223 BIT(AXIS_X) | BIT(AXIS_Y) | BIT(AXIS_Z), 1224 0}; 1225 1226 static irqreturn_t bmc150_accel_trigger_handler(int irq, void *p) 1227 { 1228 struct iio_poll_func *pf = p; 1229 struct iio_dev *indio_dev = pf->indio_dev; 1230 struct bmc150_accel_data *data = iio_priv(indio_dev); 1231 int ret; 1232 1233 mutex_lock(&data->mutex); 1234 ret = regmap_bulk_read(data->regmap, BMC150_ACCEL_REG_XOUT_L, 1235 data->buffer, AXIS_MAX * 2); 1236 mutex_unlock(&data->mutex); 1237 if (ret < 0) 1238 goto err_read; 1239 1240 iio_push_to_buffers_with_timestamp(indio_dev, data->buffer, 1241 pf->timestamp); 1242 err_read: 1243 iio_trigger_notify_done(indio_dev->trig); 1244 1245 return IRQ_HANDLED; 1246 } 1247 1248 static void bmc150_accel_trig_reen(struct iio_trigger *trig) 1249 { 1250 struct bmc150_accel_trigger *t = iio_trigger_get_drvdata(trig); 1251 struct bmc150_accel_data *data = t->data; 1252 struct device *dev = regmap_get_device(data->regmap); 1253 int ret; 1254 1255 /* new data interrupts don't need ack */ 1256 if (t == &t->data->triggers[BMC150_ACCEL_TRIGGER_DATA_READY]) 1257 return; 1258 1259 mutex_lock(&data->mutex); 1260 /* clear any latched interrupt */ 1261 ret = regmap_write(data->regmap, BMC150_ACCEL_REG_INT_RST_LATCH, 1262 BMC150_ACCEL_INT_MODE_LATCH_INT | 1263 BMC150_ACCEL_INT_MODE_LATCH_RESET); 1264 mutex_unlock(&data->mutex); 1265 if (ret < 0) 1266 dev_err(dev, "Error writing reg_int_rst_latch\n"); 1267 } 1268 1269 static int bmc150_accel_trigger_set_state(struct iio_trigger *trig, 1270 bool state) 1271 { 1272 struct bmc150_accel_trigger *t = iio_trigger_get_drvdata(trig); 1273 struct bmc150_accel_data *data = t->data; 1274 int ret; 1275 1276 mutex_lock(&data->mutex); 1277 1278 if (t->enabled == state) { 1279 mutex_unlock(&data->mutex); 1280 return 0; 1281 } 1282 1283 if (t->setup) { 1284 ret = t->setup(t, state); 1285 if (ret < 0) { 1286 mutex_unlock(&data->mutex); 1287 return ret; 1288 } 1289 } 1290 1291 ret = bmc150_accel_set_interrupt(data, t->intr, state); 1292 if (ret < 0) { 1293 mutex_unlock(&data->mutex); 1294 return ret; 1295 } 1296 1297 t->enabled = state; 1298 1299 mutex_unlock(&data->mutex); 1300 1301 return ret; 1302 } 1303 1304 static const struct iio_trigger_ops bmc150_accel_trigger_ops = { 1305 .set_trigger_state = bmc150_accel_trigger_set_state, 1306 .reenable = bmc150_accel_trig_reen, 1307 }; 1308 1309 static int bmc150_accel_handle_roc_event(struct iio_dev *indio_dev) 1310 { 1311 struct bmc150_accel_data *data = iio_priv(indio_dev); 1312 struct device *dev = regmap_get_device(data->regmap); 1313 int dir; 1314 int ret; 1315 unsigned int val; 1316 1317 ret = regmap_read(data->regmap, BMC150_ACCEL_REG_INT_STATUS_2, &val); 1318 if (ret < 0) { 1319 dev_err(dev, "Error reading reg_int_status_2\n"); 1320 return ret; 1321 } 1322 1323 if (val & BMC150_ACCEL_ANY_MOTION_BIT_SIGN) 1324 dir = IIO_EV_DIR_FALLING; 1325 else 1326 dir = IIO_EV_DIR_RISING; 1327 1328 if (val & BMC150_ACCEL_ANY_MOTION_BIT_X) 1329 iio_push_event(indio_dev, 1330 IIO_MOD_EVENT_CODE(IIO_ACCEL, 1331 0, 1332 IIO_MOD_X, 1333 IIO_EV_TYPE_ROC, 1334 dir), 1335 data->timestamp); 1336 1337 if (val & BMC150_ACCEL_ANY_MOTION_BIT_Y) 1338 iio_push_event(indio_dev, 1339 IIO_MOD_EVENT_CODE(IIO_ACCEL, 1340 0, 1341 IIO_MOD_Y, 1342 IIO_EV_TYPE_ROC, 1343 dir), 1344 data->timestamp); 1345 1346 if (val & BMC150_ACCEL_ANY_MOTION_BIT_Z) 1347 iio_push_event(indio_dev, 1348 IIO_MOD_EVENT_CODE(IIO_ACCEL, 1349 0, 1350 IIO_MOD_Z, 1351 IIO_EV_TYPE_ROC, 1352 dir), 1353 data->timestamp); 1354 1355 return ret; 1356 } 1357 1358 static irqreturn_t bmc150_accel_irq_thread_handler(int irq, void *private) 1359 { 1360 struct iio_dev *indio_dev = private; 1361 struct bmc150_accel_data *data = iio_priv(indio_dev); 1362 struct device *dev = regmap_get_device(data->regmap); 1363 bool ack = false; 1364 int ret; 1365 1366 mutex_lock(&data->mutex); 1367 1368 if (data->fifo_mode) { 1369 ret = __bmc150_accel_fifo_flush(indio_dev, 1370 BMC150_ACCEL_FIFO_LENGTH, true); 1371 if (ret > 0) 1372 ack = true; 1373 } 1374 1375 if (data->ev_enable_state) { 1376 ret = bmc150_accel_handle_roc_event(indio_dev); 1377 if (ret > 0) 1378 ack = true; 1379 } 1380 1381 if (ack) { 1382 ret = regmap_write(data->regmap, BMC150_ACCEL_REG_INT_RST_LATCH, 1383 BMC150_ACCEL_INT_MODE_LATCH_INT | 1384 BMC150_ACCEL_INT_MODE_LATCH_RESET); 1385 if (ret) 1386 dev_err(dev, "Error writing reg_int_rst_latch\n"); 1387 1388 ret = IRQ_HANDLED; 1389 } else { 1390 ret = IRQ_NONE; 1391 } 1392 1393 mutex_unlock(&data->mutex); 1394 1395 return ret; 1396 } 1397 1398 static irqreturn_t bmc150_accel_irq_handler(int irq, void *private) 1399 { 1400 struct iio_dev *indio_dev = private; 1401 struct bmc150_accel_data *data = iio_priv(indio_dev); 1402 bool ack = false; 1403 int i; 1404 1405 data->old_timestamp = data->timestamp; 1406 data->timestamp = iio_get_time_ns(indio_dev); 1407 1408 for (i = 0; i < BMC150_ACCEL_TRIGGERS; i++) { 1409 if (data->triggers[i].enabled) { 1410 iio_trigger_poll(data->triggers[i].indio_trig); 1411 ack = true; 1412 break; 1413 } 1414 } 1415 1416 if (data->ev_enable_state || data->fifo_mode) 1417 return IRQ_WAKE_THREAD; 1418 1419 if (ack) 1420 return IRQ_HANDLED; 1421 1422 return IRQ_NONE; 1423 } 1424 1425 static const struct { 1426 int intr; 1427 const char *name; 1428 int (*setup)(struct bmc150_accel_trigger *t, bool state); 1429 } bmc150_accel_triggers[BMC150_ACCEL_TRIGGERS] = { 1430 { 1431 .intr = 0, 1432 .name = "%s-dev%d", 1433 }, 1434 { 1435 .intr = 1, 1436 .name = "%s-any-motion-dev%d", 1437 .setup = bmc150_accel_any_motion_setup, 1438 }, 1439 }; 1440 1441 static void bmc150_accel_unregister_triggers(struct bmc150_accel_data *data, 1442 int from) 1443 { 1444 int i; 1445 1446 for (i = from; i >= 0; i--) { 1447 if (data->triggers[i].indio_trig) { 1448 iio_trigger_unregister(data->triggers[i].indio_trig); 1449 data->triggers[i].indio_trig = NULL; 1450 } 1451 } 1452 } 1453 1454 static int bmc150_accel_triggers_setup(struct iio_dev *indio_dev, 1455 struct bmc150_accel_data *data) 1456 { 1457 struct device *dev = regmap_get_device(data->regmap); 1458 int i, ret; 1459 1460 for (i = 0; i < BMC150_ACCEL_TRIGGERS; i++) { 1461 struct bmc150_accel_trigger *t = &data->triggers[i]; 1462 1463 t->indio_trig = devm_iio_trigger_alloc(dev, 1464 bmc150_accel_triggers[i].name, 1465 indio_dev->name, 1466 iio_device_id(indio_dev)); 1467 if (!t->indio_trig) { 1468 ret = -ENOMEM; 1469 break; 1470 } 1471 1472 t->indio_trig->ops = &bmc150_accel_trigger_ops; 1473 t->intr = bmc150_accel_triggers[i].intr; 1474 t->data = data; 1475 t->setup = bmc150_accel_triggers[i].setup; 1476 iio_trigger_set_drvdata(t->indio_trig, t); 1477 1478 ret = iio_trigger_register(t->indio_trig); 1479 if (ret) 1480 break; 1481 } 1482 1483 if (ret) 1484 bmc150_accel_unregister_triggers(data, i - 1); 1485 1486 return ret; 1487 } 1488 1489 #define BMC150_ACCEL_FIFO_MODE_STREAM 0x80 1490 #define BMC150_ACCEL_FIFO_MODE_FIFO 0x40 1491 #define BMC150_ACCEL_FIFO_MODE_BYPASS 0x00 1492 1493 static int bmc150_accel_fifo_set_mode(struct bmc150_accel_data *data) 1494 { 1495 struct device *dev = regmap_get_device(data->regmap); 1496 u8 reg = BMC150_ACCEL_REG_FIFO_CONFIG1; 1497 int ret; 1498 1499 ret = regmap_write(data->regmap, reg, data->fifo_mode); 1500 if (ret < 0) { 1501 dev_err(dev, "Error writing reg_fifo_config1\n"); 1502 return ret; 1503 } 1504 1505 if (!data->fifo_mode) 1506 return 0; 1507 1508 ret = regmap_write(data->regmap, BMC150_ACCEL_REG_FIFO_CONFIG0, 1509 data->watermark); 1510 if (ret < 0) 1511 dev_err(dev, "Error writing reg_fifo_config0\n"); 1512 1513 return ret; 1514 } 1515 1516 static int bmc150_accel_buffer_preenable(struct iio_dev *indio_dev) 1517 { 1518 struct bmc150_accel_data *data = iio_priv(indio_dev); 1519 1520 return bmc150_accel_set_power_state(data, true); 1521 } 1522 1523 static int bmc150_accel_buffer_postenable(struct iio_dev *indio_dev) 1524 { 1525 struct bmc150_accel_data *data = iio_priv(indio_dev); 1526 int ret = 0; 1527 1528 if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) 1529 return 0; 1530 1531 mutex_lock(&data->mutex); 1532 1533 if (!data->watermark) 1534 goto out; 1535 1536 ret = bmc150_accel_set_interrupt(data, BMC150_ACCEL_INT_WATERMARK, 1537 true); 1538 if (ret) 1539 goto out; 1540 1541 data->fifo_mode = BMC150_ACCEL_FIFO_MODE_FIFO; 1542 1543 ret = bmc150_accel_fifo_set_mode(data); 1544 if (ret) { 1545 data->fifo_mode = 0; 1546 bmc150_accel_set_interrupt(data, BMC150_ACCEL_INT_WATERMARK, 1547 false); 1548 } 1549 1550 out: 1551 mutex_unlock(&data->mutex); 1552 1553 return ret; 1554 } 1555 1556 static int bmc150_accel_buffer_predisable(struct iio_dev *indio_dev) 1557 { 1558 struct bmc150_accel_data *data = iio_priv(indio_dev); 1559 1560 if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) 1561 return 0; 1562 1563 mutex_lock(&data->mutex); 1564 1565 if (!data->fifo_mode) 1566 goto out; 1567 1568 bmc150_accel_set_interrupt(data, BMC150_ACCEL_INT_WATERMARK, false); 1569 __bmc150_accel_fifo_flush(indio_dev, BMC150_ACCEL_FIFO_LENGTH, false); 1570 data->fifo_mode = 0; 1571 bmc150_accel_fifo_set_mode(data); 1572 1573 out: 1574 mutex_unlock(&data->mutex); 1575 1576 return 0; 1577 } 1578 1579 static int bmc150_accel_buffer_postdisable(struct iio_dev *indio_dev) 1580 { 1581 struct bmc150_accel_data *data = iio_priv(indio_dev); 1582 1583 return bmc150_accel_set_power_state(data, false); 1584 } 1585 1586 static const struct iio_buffer_setup_ops bmc150_accel_buffer_ops = { 1587 .preenable = bmc150_accel_buffer_preenable, 1588 .postenable = bmc150_accel_buffer_postenable, 1589 .predisable = bmc150_accel_buffer_predisable, 1590 .postdisable = bmc150_accel_buffer_postdisable, 1591 }; 1592 1593 static int bmc150_accel_chip_init(struct bmc150_accel_data *data) 1594 { 1595 struct device *dev = regmap_get_device(data->regmap); 1596 int ret, i; 1597 unsigned int val; 1598 1599 /* 1600 * Reset chip to get it in a known good state. A delay of 1.8ms after 1601 * reset is required according to the data sheets of supported chips. 1602 */ 1603 regmap_write(data->regmap, BMC150_ACCEL_REG_RESET, 1604 BMC150_ACCEL_RESET_VAL); 1605 usleep_range(1800, 2500); 1606 1607 ret = regmap_read(data->regmap, BMC150_ACCEL_REG_CHIP_ID, &val); 1608 if (ret < 0) { 1609 dev_err(dev, "Error: Reading chip id\n"); 1610 return ret; 1611 } 1612 1613 dev_dbg(dev, "Chip Id %x\n", val); 1614 for (i = 0; i < ARRAY_SIZE(bmc150_accel_chip_info_tbl); i++) { 1615 if (bmc150_accel_chip_info_tbl[i].chip_id == val) { 1616 data->chip_info = &bmc150_accel_chip_info_tbl[i]; 1617 break; 1618 } 1619 } 1620 1621 if (!data->chip_info) { 1622 dev_err(dev, "Invalid chip %x\n", val); 1623 return -ENODEV; 1624 } 1625 1626 ret = bmc150_accel_set_mode(data, BMC150_ACCEL_SLEEP_MODE_NORMAL, 0); 1627 if (ret < 0) 1628 return ret; 1629 1630 /* Set Bandwidth */ 1631 ret = bmc150_accel_set_bw(data, BMC150_ACCEL_DEF_BW, 0); 1632 if (ret < 0) 1633 return ret; 1634 1635 /* Set Default Range */ 1636 ret = regmap_write(data->regmap, BMC150_ACCEL_REG_PMU_RANGE, 1637 BMC150_ACCEL_DEF_RANGE_4G); 1638 if (ret < 0) { 1639 dev_err(dev, "Error writing reg_pmu_range\n"); 1640 return ret; 1641 } 1642 1643 data->range = BMC150_ACCEL_DEF_RANGE_4G; 1644 1645 /* Set default slope duration and thresholds */ 1646 data->slope_thres = BMC150_ACCEL_DEF_SLOPE_THRESHOLD; 1647 data->slope_dur = BMC150_ACCEL_DEF_SLOPE_DURATION; 1648 ret = bmc150_accel_update_slope(data); 1649 if (ret < 0) 1650 return ret; 1651 1652 /* Set default as latched interrupts */ 1653 ret = regmap_write(data->regmap, BMC150_ACCEL_REG_INT_RST_LATCH, 1654 BMC150_ACCEL_INT_MODE_LATCH_INT | 1655 BMC150_ACCEL_INT_MODE_LATCH_RESET); 1656 if (ret < 0) { 1657 dev_err(dev, "Error writing reg_int_rst_latch\n"); 1658 return ret; 1659 } 1660 1661 return 0; 1662 } 1663 1664 int bmc150_accel_core_probe(struct device *dev, struct regmap *regmap, int irq, 1665 enum bmc150_type type, const char *name, 1666 bool block_supported) 1667 { 1668 const struct attribute **fifo_attrs; 1669 struct bmc150_accel_data *data; 1670 struct iio_dev *indio_dev; 1671 int ret; 1672 1673 indio_dev = devm_iio_device_alloc(dev, sizeof(*data)); 1674 if (!indio_dev) 1675 return -ENOMEM; 1676 1677 data = iio_priv(indio_dev); 1678 dev_set_drvdata(dev, indio_dev); 1679 1680 data->regmap = regmap; 1681 data->type = type; 1682 1683 if (!bmc150_apply_acpi_orientation(dev, &data->orientation)) { 1684 ret = iio_read_mount_matrix(dev, &data->orientation); 1685 if (ret) 1686 return ret; 1687 } 1688 1689 /* 1690 * VDD is the analog and digital domain voltage supply 1691 * VDDIO is the digital I/O voltage supply 1692 */ 1693 data->regulators[0].supply = "vdd"; 1694 data->regulators[1].supply = "vddio"; 1695 ret = devm_regulator_bulk_get(dev, 1696 ARRAY_SIZE(data->regulators), 1697 data->regulators); 1698 if (ret) 1699 return dev_err_probe(dev, ret, "failed to get regulators\n"); 1700 1701 ret = regulator_bulk_enable(ARRAY_SIZE(data->regulators), 1702 data->regulators); 1703 if (ret) { 1704 dev_err(dev, "failed to enable regulators: %d\n", ret); 1705 return ret; 1706 } 1707 /* 1708 * 2ms or 3ms power-on time according to datasheets, let's better 1709 * be safe than sorry and set this delay to 5ms. 1710 */ 1711 msleep(5); 1712 1713 ret = bmc150_accel_chip_init(data); 1714 if (ret < 0) 1715 goto err_disable_regulators; 1716 1717 mutex_init(&data->mutex); 1718 1719 indio_dev->channels = data->chip_info->channels; 1720 indio_dev->num_channels = data->chip_info->num_channels; 1721 indio_dev->name = name ? name : data->chip_info->name; 1722 indio_dev->available_scan_masks = bmc150_accel_scan_masks; 1723 indio_dev->modes = INDIO_DIRECT_MODE; 1724 indio_dev->info = &bmc150_accel_info; 1725 1726 if (block_supported) { 1727 indio_dev->modes |= INDIO_BUFFER_SOFTWARE; 1728 indio_dev->info = &bmc150_accel_info_fifo; 1729 fifo_attrs = bmc150_accel_fifo_attributes; 1730 } else { 1731 fifo_attrs = NULL; 1732 } 1733 1734 ret = iio_triggered_buffer_setup_ext(indio_dev, 1735 &iio_pollfunc_store_time, 1736 bmc150_accel_trigger_handler, 1737 IIO_BUFFER_DIRECTION_IN, 1738 &bmc150_accel_buffer_ops, 1739 fifo_attrs); 1740 if (ret < 0) { 1741 dev_err(dev, "Failed: iio triggered buffer setup\n"); 1742 goto err_disable_regulators; 1743 } 1744 1745 if (irq > 0) { 1746 ret = devm_request_threaded_irq(dev, irq, 1747 bmc150_accel_irq_handler, 1748 bmc150_accel_irq_thread_handler, 1749 IRQF_TRIGGER_RISING, 1750 BMC150_ACCEL_IRQ_NAME, 1751 indio_dev); 1752 if (ret) 1753 goto err_buffer_cleanup; 1754 1755 /* 1756 * Set latched mode interrupt. While certain interrupts are 1757 * non-latched regardless of this settings (e.g. new data) we 1758 * want to use latch mode when we can to prevent interrupt 1759 * flooding. 1760 */ 1761 ret = regmap_write(data->regmap, BMC150_ACCEL_REG_INT_RST_LATCH, 1762 BMC150_ACCEL_INT_MODE_LATCH_RESET); 1763 if (ret < 0) { 1764 dev_err(dev, "Error writing reg_int_rst_latch\n"); 1765 goto err_buffer_cleanup; 1766 } 1767 1768 bmc150_accel_interrupts_setup(indio_dev, data, irq); 1769 1770 ret = bmc150_accel_triggers_setup(indio_dev, data); 1771 if (ret) 1772 goto err_buffer_cleanup; 1773 } 1774 1775 ret = pm_runtime_set_active(dev); 1776 if (ret) 1777 goto err_trigger_unregister; 1778 1779 pm_runtime_enable(dev); 1780 pm_runtime_set_autosuspend_delay(dev, BMC150_AUTO_SUSPEND_DELAY_MS); 1781 pm_runtime_use_autosuspend(dev); 1782 1783 ret = iio_device_register(indio_dev); 1784 if (ret < 0) { 1785 dev_err(dev, "Unable to register iio device\n"); 1786 goto err_trigger_unregister; 1787 } 1788 1789 return 0; 1790 1791 err_trigger_unregister: 1792 bmc150_accel_unregister_triggers(data, BMC150_ACCEL_TRIGGERS - 1); 1793 err_buffer_cleanup: 1794 iio_triggered_buffer_cleanup(indio_dev); 1795 err_disable_regulators: 1796 regulator_bulk_disable(ARRAY_SIZE(data->regulators), 1797 data->regulators); 1798 1799 return ret; 1800 } 1801 EXPORT_SYMBOL_GPL(bmc150_accel_core_probe); 1802 1803 void bmc150_accel_core_remove(struct device *dev) 1804 { 1805 struct iio_dev *indio_dev = dev_get_drvdata(dev); 1806 struct bmc150_accel_data *data = iio_priv(indio_dev); 1807 1808 iio_device_unregister(indio_dev); 1809 1810 pm_runtime_disable(dev); 1811 pm_runtime_set_suspended(dev); 1812 1813 bmc150_accel_unregister_triggers(data, BMC150_ACCEL_TRIGGERS - 1); 1814 1815 iio_triggered_buffer_cleanup(indio_dev); 1816 1817 mutex_lock(&data->mutex); 1818 bmc150_accel_set_mode(data, BMC150_ACCEL_SLEEP_MODE_DEEP_SUSPEND, 0); 1819 mutex_unlock(&data->mutex); 1820 1821 regulator_bulk_disable(ARRAY_SIZE(data->regulators), 1822 data->regulators); 1823 } 1824 EXPORT_SYMBOL_GPL(bmc150_accel_core_remove); 1825 1826 #ifdef CONFIG_PM_SLEEP 1827 static int bmc150_accel_suspend(struct device *dev) 1828 { 1829 struct iio_dev *indio_dev = dev_get_drvdata(dev); 1830 struct bmc150_accel_data *data = iio_priv(indio_dev); 1831 1832 mutex_lock(&data->mutex); 1833 bmc150_accel_set_mode(data, BMC150_ACCEL_SLEEP_MODE_SUSPEND, 0); 1834 mutex_unlock(&data->mutex); 1835 1836 return 0; 1837 } 1838 1839 static int bmc150_accel_resume(struct device *dev) 1840 { 1841 struct iio_dev *indio_dev = dev_get_drvdata(dev); 1842 struct bmc150_accel_data *data = iio_priv(indio_dev); 1843 1844 mutex_lock(&data->mutex); 1845 bmc150_accel_set_mode(data, BMC150_ACCEL_SLEEP_MODE_NORMAL, 0); 1846 bmc150_accel_fifo_set_mode(data); 1847 mutex_unlock(&data->mutex); 1848 1849 if (data->resume_callback) 1850 data->resume_callback(dev); 1851 1852 return 0; 1853 } 1854 #endif 1855 1856 #ifdef CONFIG_PM 1857 static int bmc150_accel_runtime_suspend(struct device *dev) 1858 { 1859 struct iio_dev *indio_dev = dev_get_drvdata(dev); 1860 struct bmc150_accel_data *data = iio_priv(indio_dev); 1861 int ret; 1862 1863 ret = bmc150_accel_set_mode(data, BMC150_ACCEL_SLEEP_MODE_SUSPEND, 0); 1864 if (ret < 0) 1865 return -EAGAIN; 1866 1867 return 0; 1868 } 1869 1870 static int bmc150_accel_runtime_resume(struct device *dev) 1871 { 1872 struct iio_dev *indio_dev = dev_get_drvdata(dev); 1873 struct bmc150_accel_data *data = iio_priv(indio_dev); 1874 int ret; 1875 int sleep_val; 1876 1877 ret = bmc150_accel_set_mode(data, BMC150_ACCEL_SLEEP_MODE_NORMAL, 0); 1878 if (ret < 0) 1879 return ret; 1880 ret = bmc150_accel_fifo_set_mode(data); 1881 if (ret < 0) 1882 return ret; 1883 1884 sleep_val = bmc150_accel_get_startup_times(data); 1885 if (sleep_val < 20) 1886 usleep_range(sleep_val * 1000, 20000); 1887 else 1888 msleep_interruptible(sleep_val); 1889 1890 return 0; 1891 } 1892 #endif 1893 1894 const struct dev_pm_ops bmc150_accel_pm_ops = { 1895 SET_SYSTEM_SLEEP_PM_OPS(bmc150_accel_suspend, bmc150_accel_resume) 1896 SET_RUNTIME_PM_OPS(bmc150_accel_runtime_suspend, 1897 bmc150_accel_runtime_resume, NULL) 1898 }; 1899 EXPORT_SYMBOL_GPL(bmc150_accel_pm_ops); 1900 1901 MODULE_AUTHOR("Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>"); 1902 MODULE_LICENSE("GPL v2"); 1903 MODULE_DESCRIPTION("BMC150 accelerometer driver"); 1904