1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * 3-axis accelerometer driver supporting many Bosch-Sensortec chips 4 * Copyright (c) 2014, Intel Corporation. 5 */ 6 7 #include <linux/module.h> 8 #include <linux/i2c.h> 9 #include <linux/interrupt.h> 10 #include <linux/delay.h> 11 #include <linux/slab.h> 12 #include <linux/acpi.h> 13 #include <linux/pm.h> 14 #include <linux/pm_runtime.h> 15 #include <linux/iio/iio.h> 16 #include <linux/iio/sysfs.h> 17 #include <linux/iio/buffer.h> 18 #include <linux/iio/events.h> 19 #include <linux/iio/trigger.h> 20 #include <linux/iio/trigger_consumer.h> 21 #include <linux/iio/triggered_buffer.h> 22 #include <linux/regmap.h> 23 #include <linux/regulator/consumer.h> 24 25 #include "bmc150-accel.h" 26 27 #define BMC150_ACCEL_DRV_NAME "bmc150_accel" 28 #define BMC150_ACCEL_IRQ_NAME "bmc150_accel_event" 29 30 #define BMC150_ACCEL_REG_CHIP_ID 0x00 31 32 #define BMC150_ACCEL_REG_INT_STATUS_2 0x0B 33 #define BMC150_ACCEL_ANY_MOTION_MASK 0x07 34 #define BMC150_ACCEL_ANY_MOTION_BIT_X BIT(0) 35 #define BMC150_ACCEL_ANY_MOTION_BIT_Y BIT(1) 36 #define BMC150_ACCEL_ANY_MOTION_BIT_Z BIT(2) 37 #define BMC150_ACCEL_ANY_MOTION_BIT_SIGN BIT(3) 38 39 #define BMC150_ACCEL_REG_PMU_LPW 0x11 40 #define BMC150_ACCEL_PMU_MODE_MASK 0xE0 41 #define BMC150_ACCEL_PMU_MODE_SHIFT 5 42 #define BMC150_ACCEL_PMU_BIT_SLEEP_DUR_MASK 0x17 43 #define BMC150_ACCEL_PMU_BIT_SLEEP_DUR_SHIFT 1 44 45 #define BMC150_ACCEL_REG_PMU_RANGE 0x0F 46 47 #define BMC150_ACCEL_DEF_RANGE_2G 0x03 48 #define BMC150_ACCEL_DEF_RANGE_4G 0x05 49 #define BMC150_ACCEL_DEF_RANGE_8G 0x08 50 #define BMC150_ACCEL_DEF_RANGE_16G 0x0C 51 52 /* Default BW: 125Hz */ 53 #define BMC150_ACCEL_REG_PMU_BW 0x10 54 #define BMC150_ACCEL_DEF_BW 125 55 56 #define BMC150_ACCEL_REG_RESET 0x14 57 #define BMC150_ACCEL_RESET_VAL 0xB6 58 59 #define BMC150_ACCEL_REG_INT_MAP_0 0x19 60 #define BMC150_ACCEL_INT_MAP_0_BIT_SLOPE BIT(2) 61 62 #define BMC150_ACCEL_REG_INT_MAP_1 0x1A 63 #define BMC150_ACCEL_INT_MAP_1_BIT_DATA BIT(0) 64 #define BMC150_ACCEL_INT_MAP_1_BIT_FWM BIT(1) 65 #define BMC150_ACCEL_INT_MAP_1_BIT_FFULL BIT(2) 66 67 #define BMC150_ACCEL_REG_INT_RST_LATCH 0x21 68 #define BMC150_ACCEL_INT_MODE_LATCH_RESET 0x80 69 #define BMC150_ACCEL_INT_MODE_LATCH_INT 0x0F 70 #define BMC150_ACCEL_INT_MODE_NON_LATCH_INT 0x00 71 72 #define BMC150_ACCEL_REG_INT_EN_0 0x16 73 #define BMC150_ACCEL_INT_EN_BIT_SLP_X BIT(0) 74 #define BMC150_ACCEL_INT_EN_BIT_SLP_Y BIT(1) 75 #define BMC150_ACCEL_INT_EN_BIT_SLP_Z BIT(2) 76 77 #define BMC150_ACCEL_REG_INT_EN_1 0x17 78 #define BMC150_ACCEL_INT_EN_BIT_DATA_EN BIT(4) 79 #define BMC150_ACCEL_INT_EN_BIT_FFULL_EN BIT(5) 80 #define BMC150_ACCEL_INT_EN_BIT_FWM_EN BIT(6) 81 82 #define BMC150_ACCEL_REG_INT_OUT_CTRL 0x20 83 #define BMC150_ACCEL_INT_OUT_CTRL_INT1_LVL BIT(0) 84 85 #define BMC150_ACCEL_REG_INT_5 0x27 86 #define BMC150_ACCEL_SLOPE_DUR_MASK 0x03 87 88 #define BMC150_ACCEL_REG_INT_6 0x28 89 #define BMC150_ACCEL_SLOPE_THRES_MASK 0xFF 90 91 /* Slope duration in terms of number of samples */ 92 #define BMC150_ACCEL_DEF_SLOPE_DURATION 1 93 /* in terms of multiples of g's/LSB, based on range */ 94 #define BMC150_ACCEL_DEF_SLOPE_THRESHOLD 1 95 96 #define BMC150_ACCEL_REG_XOUT_L 0x02 97 98 #define BMC150_ACCEL_MAX_STARTUP_TIME_MS 100 99 100 /* Sleep Duration values */ 101 #define BMC150_ACCEL_SLEEP_500_MICRO 0x05 102 #define BMC150_ACCEL_SLEEP_1_MS 0x06 103 #define BMC150_ACCEL_SLEEP_2_MS 0x07 104 #define BMC150_ACCEL_SLEEP_4_MS 0x08 105 #define BMC150_ACCEL_SLEEP_6_MS 0x09 106 #define BMC150_ACCEL_SLEEP_10_MS 0x0A 107 #define BMC150_ACCEL_SLEEP_25_MS 0x0B 108 #define BMC150_ACCEL_SLEEP_50_MS 0x0C 109 #define BMC150_ACCEL_SLEEP_100_MS 0x0D 110 #define BMC150_ACCEL_SLEEP_500_MS 0x0E 111 #define BMC150_ACCEL_SLEEP_1_SEC 0x0F 112 113 #define BMC150_ACCEL_REG_TEMP 0x08 114 #define BMC150_ACCEL_TEMP_CENTER_VAL 23 115 116 #define BMC150_ACCEL_AXIS_TO_REG(axis) (BMC150_ACCEL_REG_XOUT_L + (axis * 2)) 117 #define BMC150_AUTO_SUSPEND_DELAY_MS 2000 118 119 #define BMC150_ACCEL_REG_FIFO_STATUS 0x0E 120 #define BMC150_ACCEL_REG_FIFO_CONFIG0 0x30 121 #define BMC150_ACCEL_REG_FIFO_CONFIG1 0x3E 122 #define BMC150_ACCEL_REG_FIFO_DATA 0x3F 123 #define BMC150_ACCEL_FIFO_LENGTH 32 124 125 enum bmc150_accel_axis { 126 AXIS_X, 127 AXIS_Y, 128 AXIS_Z, 129 AXIS_MAX, 130 }; 131 132 enum bmc150_power_modes { 133 BMC150_ACCEL_SLEEP_MODE_NORMAL, 134 BMC150_ACCEL_SLEEP_MODE_DEEP_SUSPEND, 135 BMC150_ACCEL_SLEEP_MODE_LPM, 136 BMC150_ACCEL_SLEEP_MODE_SUSPEND = 0x04, 137 }; 138 139 struct bmc150_scale_info { 140 int scale; 141 u8 reg_range; 142 }; 143 144 struct bmc150_accel_chip_info { 145 const char *name; 146 u8 chip_id; 147 const struct iio_chan_spec *channels; 148 int num_channels; 149 const struct bmc150_scale_info scale_table[4]; 150 }; 151 152 static const struct { 153 int val; 154 int val2; 155 u8 bw_bits; 156 } bmc150_accel_samp_freq_table[] = { {15, 620000, 0x08}, 157 {31, 260000, 0x09}, 158 {62, 500000, 0x0A}, 159 {125, 0, 0x0B}, 160 {250, 0, 0x0C}, 161 {500, 0, 0x0D}, 162 {1000, 0, 0x0E}, 163 {2000, 0, 0x0F} }; 164 165 static const struct { 166 int bw_bits; 167 int msec; 168 } bmc150_accel_sample_upd_time[] = { {0x08, 64}, 169 {0x09, 32}, 170 {0x0A, 16}, 171 {0x0B, 8}, 172 {0x0C, 4}, 173 {0x0D, 2}, 174 {0x0E, 1}, 175 {0x0F, 1} }; 176 177 static const struct { 178 int sleep_dur; 179 u8 reg_value; 180 } bmc150_accel_sleep_value_table[] = { {0, 0}, 181 {500, BMC150_ACCEL_SLEEP_500_MICRO}, 182 {1000, BMC150_ACCEL_SLEEP_1_MS}, 183 {2000, BMC150_ACCEL_SLEEP_2_MS}, 184 {4000, BMC150_ACCEL_SLEEP_4_MS}, 185 {6000, BMC150_ACCEL_SLEEP_6_MS}, 186 {10000, BMC150_ACCEL_SLEEP_10_MS}, 187 {25000, BMC150_ACCEL_SLEEP_25_MS}, 188 {50000, BMC150_ACCEL_SLEEP_50_MS}, 189 {100000, BMC150_ACCEL_SLEEP_100_MS}, 190 {500000, BMC150_ACCEL_SLEEP_500_MS}, 191 {1000000, BMC150_ACCEL_SLEEP_1_SEC} }; 192 193 const struct regmap_config bmc150_regmap_conf = { 194 .reg_bits = 8, 195 .val_bits = 8, 196 .max_register = 0x3f, 197 }; 198 EXPORT_SYMBOL_GPL(bmc150_regmap_conf); 199 200 static int bmc150_accel_set_mode(struct bmc150_accel_data *data, 201 enum bmc150_power_modes mode, 202 int dur_us) 203 { 204 struct device *dev = regmap_get_device(data->regmap); 205 int i; 206 int ret; 207 u8 lpw_bits; 208 int dur_val = -1; 209 210 if (dur_us > 0) { 211 for (i = 0; i < ARRAY_SIZE(bmc150_accel_sleep_value_table); 212 ++i) { 213 if (bmc150_accel_sleep_value_table[i].sleep_dur == 214 dur_us) 215 dur_val = 216 bmc150_accel_sleep_value_table[i].reg_value; 217 } 218 } else { 219 dur_val = 0; 220 } 221 222 if (dur_val < 0) 223 return -EINVAL; 224 225 lpw_bits = mode << BMC150_ACCEL_PMU_MODE_SHIFT; 226 lpw_bits |= (dur_val << BMC150_ACCEL_PMU_BIT_SLEEP_DUR_SHIFT); 227 228 dev_dbg(dev, "Set Mode bits %x\n", lpw_bits); 229 230 ret = regmap_write(data->regmap, BMC150_ACCEL_REG_PMU_LPW, lpw_bits); 231 if (ret < 0) { 232 dev_err(dev, "Error writing reg_pmu_lpw\n"); 233 return ret; 234 } 235 236 return 0; 237 } 238 239 static int bmc150_accel_set_bw(struct bmc150_accel_data *data, int val, 240 int val2) 241 { 242 int i; 243 int ret; 244 245 for (i = 0; i < ARRAY_SIZE(bmc150_accel_samp_freq_table); ++i) { 246 if (bmc150_accel_samp_freq_table[i].val == val && 247 bmc150_accel_samp_freq_table[i].val2 == val2) { 248 ret = regmap_write(data->regmap, 249 BMC150_ACCEL_REG_PMU_BW, 250 bmc150_accel_samp_freq_table[i].bw_bits); 251 if (ret < 0) 252 return ret; 253 254 data->bw_bits = 255 bmc150_accel_samp_freq_table[i].bw_bits; 256 return 0; 257 } 258 } 259 260 return -EINVAL; 261 } 262 263 static int bmc150_accel_update_slope(struct bmc150_accel_data *data) 264 { 265 struct device *dev = regmap_get_device(data->regmap); 266 int ret; 267 268 ret = regmap_write(data->regmap, BMC150_ACCEL_REG_INT_6, 269 data->slope_thres); 270 if (ret < 0) { 271 dev_err(dev, "Error writing reg_int_6\n"); 272 return ret; 273 } 274 275 ret = regmap_update_bits(data->regmap, BMC150_ACCEL_REG_INT_5, 276 BMC150_ACCEL_SLOPE_DUR_MASK, data->slope_dur); 277 if (ret < 0) { 278 dev_err(dev, "Error updating reg_int_5\n"); 279 return ret; 280 } 281 282 dev_dbg(dev, "%x %x\n", data->slope_thres, data->slope_dur); 283 284 return ret; 285 } 286 287 static int bmc150_accel_any_motion_setup(struct bmc150_accel_trigger *t, 288 bool state) 289 { 290 if (state) 291 return bmc150_accel_update_slope(t->data); 292 293 return 0; 294 } 295 296 static int bmc150_accel_get_bw(struct bmc150_accel_data *data, int *val, 297 int *val2) 298 { 299 int i; 300 301 for (i = 0; i < ARRAY_SIZE(bmc150_accel_samp_freq_table); ++i) { 302 if (bmc150_accel_samp_freq_table[i].bw_bits == data->bw_bits) { 303 *val = bmc150_accel_samp_freq_table[i].val; 304 *val2 = bmc150_accel_samp_freq_table[i].val2; 305 return IIO_VAL_INT_PLUS_MICRO; 306 } 307 } 308 309 return -EINVAL; 310 } 311 312 #ifdef CONFIG_PM 313 static int bmc150_accel_get_startup_times(struct bmc150_accel_data *data) 314 { 315 int i; 316 317 for (i = 0; i < ARRAY_SIZE(bmc150_accel_sample_upd_time); ++i) { 318 if (bmc150_accel_sample_upd_time[i].bw_bits == data->bw_bits) 319 return bmc150_accel_sample_upd_time[i].msec; 320 } 321 322 return BMC150_ACCEL_MAX_STARTUP_TIME_MS; 323 } 324 325 static int bmc150_accel_set_power_state(struct bmc150_accel_data *data, bool on) 326 { 327 struct device *dev = regmap_get_device(data->regmap); 328 int ret; 329 330 if (on) { 331 ret = pm_runtime_resume_and_get(dev); 332 } else { 333 pm_runtime_mark_last_busy(dev); 334 ret = pm_runtime_put_autosuspend(dev); 335 } 336 337 if (ret < 0) { 338 dev_err(dev, 339 "Failed: %s for %d\n", __func__, on); 340 return ret; 341 } 342 343 return 0; 344 } 345 #else 346 static int bmc150_accel_set_power_state(struct bmc150_accel_data *data, bool on) 347 { 348 return 0; 349 } 350 #endif 351 352 #ifdef CONFIG_ACPI 353 /* 354 * Support for getting accelerometer information from BOSC0200 ACPI nodes. 355 * 356 * There are 2 variants of the BOSC0200 ACPI node. Some 2-in-1s with 360 degree 357 * hinges declare 2 I2C ACPI-resources for 2 accelerometers, 1 in the display 358 * and 1 in the base of the 2-in-1. On these 2-in-1s the ROMS ACPI object 359 * contains the mount-matrix for the sensor in the display and ROMK contains 360 * the mount-matrix for the sensor in the base. On devices using a single 361 * sensor there is a ROTM ACPI object which contains the mount-matrix. 362 * 363 * Here is an incomplete list of devices known to use 1 of these setups: 364 * 365 * Yoga devices with 2 accelerometers using ROMS + ROMK for the mount-matrices: 366 * Lenovo Thinkpad Yoga 11e 3th gen 367 * Lenovo Thinkpad Yoga 11e 4th gen 368 * 369 * Tablets using a single accelerometer using ROTM for the mount-matrix: 370 * Chuwi Hi8 Pro (CWI513) 371 * Chuwi Vi8 Plus (CWI519) 372 * Chuwi Hi13 373 * Irbis TW90 374 * Jumper EZpad mini 3 375 * Onda V80 plus 376 * Predia Basic Tablet 377 */ 378 static bool bmc150_apply_bosc0200_acpi_orientation(struct device *dev, 379 struct iio_mount_matrix *orientation) 380 { 381 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 382 struct iio_dev *indio_dev = dev_get_drvdata(dev); 383 struct acpi_device *adev = ACPI_COMPANION(dev); 384 char *name, *alt_name, *label, *str; 385 union acpi_object *obj, *elements; 386 acpi_status status; 387 int i, j, val[3]; 388 389 if (strcmp(dev_name(dev), "i2c-BOSC0200:base") == 0) { 390 alt_name = "ROMK"; 391 label = "accel-base"; 392 } else { 393 alt_name = "ROMS"; 394 label = "accel-display"; 395 } 396 397 if (acpi_has_method(adev->handle, "ROTM")) { 398 name = "ROTM"; 399 } else if (acpi_has_method(adev->handle, alt_name)) { 400 name = alt_name; 401 indio_dev->label = label; 402 } else { 403 return false; 404 } 405 406 status = acpi_evaluate_object(adev->handle, name, NULL, &buffer); 407 if (ACPI_FAILURE(status)) { 408 dev_warn(dev, "Failed to get ACPI mount matrix: %d\n", status); 409 return false; 410 } 411 412 obj = buffer.pointer; 413 if (obj->type != ACPI_TYPE_PACKAGE || obj->package.count != 3) 414 goto unknown_format; 415 416 elements = obj->package.elements; 417 for (i = 0; i < 3; i++) { 418 if (elements[i].type != ACPI_TYPE_STRING) 419 goto unknown_format; 420 421 str = elements[i].string.pointer; 422 if (sscanf(str, "%d %d %d", &val[0], &val[1], &val[2]) != 3) 423 goto unknown_format; 424 425 for (j = 0; j < 3; j++) { 426 switch (val[j]) { 427 case -1: str = "-1"; break; 428 case 0: str = "0"; break; 429 case 1: str = "1"; break; 430 default: goto unknown_format; 431 } 432 orientation->rotation[i * 3 + j] = str; 433 } 434 } 435 436 kfree(buffer.pointer); 437 return true; 438 439 unknown_format: 440 dev_warn(dev, "Unknown ACPI mount matrix format, ignoring\n"); 441 kfree(buffer.pointer); 442 return false; 443 } 444 445 static bool bmc150_apply_dual250e_acpi_orientation(struct device *dev, 446 struct iio_mount_matrix *orientation) 447 { 448 struct iio_dev *indio_dev = dev_get_drvdata(dev); 449 450 if (strcmp(dev_name(dev), "i2c-DUAL250E:base") == 0) 451 indio_dev->label = "accel-base"; 452 else 453 indio_dev->label = "accel-display"; 454 455 return false; /* DUAL250E fwnodes have no mount matrix info */ 456 } 457 458 static bool bmc150_apply_acpi_orientation(struct device *dev, 459 struct iio_mount_matrix *orientation) 460 { 461 struct acpi_device *adev = ACPI_COMPANION(dev); 462 463 if (adev && acpi_dev_hid_uid_match(adev, "BOSC0200", NULL)) 464 return bmc150_apply_bosc0200_acpi_orientation(dev, orientation); 465 466 if (adev && acpi_dev_hid_uid_match(adev, "DUAL250E", NULL)) 467 return bmc150_apply_dual250e_acpi_orientation(dev, orientation); 468 469 return false; 470 } 471 #else 472 static bool bmc150_apply_acpi_orientation(struct device *dev, 473 struct iio_mount_matrix *orientation) 474 { 475 return false; 476 } 477 #endif 478 479 static const struct bmc150_accel_interrupt_info { 480 u8 map_reg; 481 u8 map_bitmask; 482 u8 en_reg; 483 u8 en_bitmask; 484 } bmc150_accel_interrupts[BMC150_ACCEL_INTERRUPTS] = { 485 { /* data ready interrupt */ 486 .map_reg = BMC150_ACCEL_REG_INT_MAP_1, 487 .map_bitmask = BMC150_ACCEL_INT_MAP_1_BIT_DATA, 488 .en_reg = BMC150_ACCEL_REG_INT_EN_1, 489 .en_bitmask = BMC150_ACCEL_INT_EN_BIT_DATA_EN, 490 }, 491 { /* motion interrupt */ 492 .map_reg = BMC150_ACCEL_REG_INT_MAP_0, 493 .map_bitmask = BMC150_ACCEL_INT_MAP_0_BIT_SLOPE, 494 .en_reg = BMC150_ACCEL_REG_INT_EN_0, 495 .en_bitmask = BMC150_ACCEL_INT_EN_BIT_SLP_X | 496 BMC150_ACCEL_INT_EN_BIT_SLP_Y | 497 BMC150_ACCEL_INT_EN_BIT_SLP_Z 498 }, 499 { /* fifo watermark interrupt */ 500 .map_reg = BMC150_ACCEL_REG_INT_MAP_1, 501 .map_bitmask = BMC150_ACCEL_INT_MAP_1_BIT_FWM, 502 .en_reg = BMC150_ACCEL_REG_INT_EN_1, 503 .en_bitmask = BMC150_ACCEL_INT_EN_BIT_FWM_EN, 504 }, 505 }; 506 507 static void bmc150_accel_interrupts_setup(struct iio_dev *indio_dev, 508 struct bmc150_accel_data *data) 509 { 510 int i; 511 512 for (i = 0; i < BMC150_ACCEL_INTERRUPTS; i++) 513 data->interrupts[i].info = &bmc150_accel_interrupts[i]; 514 } 515 516 static int bmc150_accel_set_interrupt(struct bmc150_accel_data *data, int i, 517 bool state) 518 { 519 struct device *dev = regmap_get_device(data->regmap); 520 struct bmc150_accel_interrupt *intr = &data->interrupts[i]; 521 const struct bmc150_accel_interrupt_info *info = intr->info; 522 int ret; 523 524 if (state) { 525 if (atomic_inc_return(&intr->users) > 1) 526 return 0; 527 } else { 528 if (atomic_dec_return(&intr->users) > 0) 529 return 0; 530 } 531 532 /* 533 * We will expect the enable and disable to do operation in reverse 534 * order. This will happen here anyway, as our resume operation uses 535 * sync mode runtime pm calls. The suspend operation will be delayed 536 * by autosuspend delay. 537 * So the disable operation will still happen in reverse order of 538 * enable operation. When runtime pm is disabled the mode is always on, 539 * so sequence doesn't matter. 540 */ 541 ret = bmc150_accel_set_power_state(data, state); 542 if (ret < 0) 543 return ret; 544 545 /* map the interrupt to the appropriate pins */ 546 ret = regmap_update_bits(data->regmap, info->map_reg, info->map_bitmask, 547 (state ? info->map_bitmask : 0)); 548 if (ret < 0) { 549 dev_err(dev, "Error updating reg_int_map\n"); 550 goto out_fix_power_state; 551 } 552 553 /* enable/disable the interrupt */ 554 ret = regmap_update_bits(data->regmap, info->en_reg, info->en_bitmask, 555 (state ? info->en_bitmask : 0)); 556 if (ret < 0) { 557 dev_err(dev, "Error updating reg_int_en\n"); 558 goto out_fix_power_state; 559 } 560 561 return 0; 562 563 out_fix_power_state: 564 bmc150_accel_set_power_state(data, false); 565 return ret; 566 } 567 568 static int bmc150_accel_set_scale(struct bmc150_accel_data *data, int val) 569 { 570 struct device *dev = regmap_get_device(data->regmap); 571 int ret, i; 572 573 for (i = 0; i < ARRAY_SIZE(data->chip_info->scale_table); ++i) { 574 if (data->chip_info->scale_table[i].scale == val) { 575 ret = regmap_write(data->regmap, 576 BMC150_ACCEL_REG_PMU_RANGE, 577 data->chip_info->scale_table[i].reg_range); 578 if (ret < 0) { 579 dev_err(dev, "Error writing pmu_range\n"); 580 return ret; 581 } 582 583 data->range = data->chip_info->scale_table[i].reg_range; 584 return 0; 585 } 586 } 587 588 return -EINVAL; 589 } 590 591 static int bmc150_accel_get_temp(struct bmc150_accel_data *data, int *val) 592 { 593 struct device *dev = regmap_get_device(data->regmap); 594 int ret; 595 unsigned int value; 596 597 mutex_lock(&data->mutex); 598 599 ret = regmap_read(data->regmap, BMC150_ACCEL_REG_TEMP, &value); 600 if (ret < 0) { 601 dev_err(dev, "Error reading reg_temp\n"); 602 mutex_unlock(&data->mutex); 603 return ret; 604 } 605 *val = sign_extend32(value, 7); 606 607 mutex_unlock(&data->mutex); 608 609 return IIO_VAL_INT; 610 } 611 612 static int bmc150_accel_get_axis(struct bmc150_accel_data *data, 613 struct iio_chan_spec const *chan, 614 int *val) 615 { 616 struct device *dev = regmap_get_device(data->regmap); 617 int ret; 618 int axis = chan->scan_index; 619 __le16 raw_val; 620 621 mutex_lock(&data->mutex); 622 ret = bmc150_accel_set_power_state(data, true); 623 if (ret < 0) { 624 mutex_unlock(&data->mutex); 625 return ret; 626 } 627 628 ret = regmap_bulk_read(data->regmap, BMC150_ACCEL_AXIS_TO_REG(axis), 629 &raw_val, sizeof(raw_val)); 630 if (ret < 0) { 631 dev_err(dev, "Error reading axis %d\n", axis); 632 bmc150_accel_set_power_state(data, false); 633 mutex_unlock(&data->mutex); 634 return ret; 635 } 636 *val = sign_extend32(le16_to_cpu(raw_val) >> chan->scan_type.shift, 637 chan->scan_type.realbits - 1); 638 ret = bmc150_accel_set_power_state(data, false); 639 mutex_unlock(&data->mutex); 640 if (ret < 0) 641 return ret; 642 643 return IIO_VAL_INT; 644 } 645 646 static int bmc150_accel_read_raw(struct iio_dev *indio_dev, 647 struct iio_chan_spec const *chan, 648 int *val, int *val2, long mask) 649 { 650 struct bmc150_accel_data *data = iio_priv(indio_dev); 651 int ret; 652 653 switch (mask) { 654 case IIO_CHAN_INFO_RAW: 655 switch (chan->type) { 656 case IIO_TEMP: 657 return bmc150_accel_get_temp(data, val); 658 case IIO_ACCEL: 659 if (iio_buffer_enabled(indio_dev)) 660 return -EBUSY; 661 else 662 return bmc150_accel_get_axis(data, chan, val); 663 default: 664 return -EINVAL; 665 } 666 case IIO_CHAN_INFO_OFFSET: 667 if (chan->type == IIO_TEMP) { 668 *val = BMC150_ACCEL_TEMP_CENTER_VAL; 669 return IIO_VAL_INT; 670 } else { 671 return -EINVAL; 672 } 673 case IIO_CHAN_INFO_SCALE: 674 *val = 0; 675 switch (chan->type) { 676 case IIO_TEMP: 677 *val2 = 500000; 678 return IIO_VAL_INT_PLUS_MICRO; 679 case IIO_ACCEL: 680 { 681 int i; 682 const struct bmc150_scale_info *si; 683 int st_size = ARRAY_SIZE(data->chip_info->scale_table); 684 685 for (i = 0; i < st_size; ++i) { 686 si = &data->chip_info->scale_table[i]; 687 if (si->reg_range == data->range) { 688 *val2 = si->scale; 689 return IIO_VAL_INT_PLUS_MICRO; 690 } 691 } 692 return -EINVAL; 693 } 694 default: 695 return -EINVAL; 696 } 697 case IIO_CHAN_INFO_SAMP_FREQ: 698 mutex_lock(&data->mutex); 699 ret = bmc150_accel_get_bw(data, val, val2); 700 mutex_unlock(&data->mutex); 701 return ret; 702 default: 703 return -EINVAL; 704 } 705 } 706 707 static int bmc150_accel_write_raw(struct iio_dev *indio_dev, 708 struct iio_chan_spec const *chan, 709 int val, int val2, long mask) 710 { 711 struct bmc150_accel_data *data = iio_priv(indio_dev); 712 int ret; 713 714 switch (mask) { 715 case IIO_CHAN_INFO_SAMP_FREQ: 716 mutex_lock(&data->mutex); 717 ret = bmc150_accel_set_bw(data, val, val2); 718 mutex_unlock(&data->mutex); 719 break; 720 case IIO_CHAN_INFO_SCALE: 721 if (val) 722 return -EINVAL; 723 724 mutex_lock(&data->mutex); 725 ret = bmc150_accel_set_scale(data, val2); 726 mutex_unlock(&data->mutex); 727 return ret; 728 default: 729 ret = -EINVAL; 730 } 731 732 return ret; 733 } 734 735 static int bmc150_accel_read_event(struct iio_dev *indio_dev, 736 const struct iio_chan_spec *chan, 737 enum iio_event_type type, 738 enum iio_event_direction dir, 739 enum iio_event_info info, 740 int *val, int *val2) 741 { 742 struct bmc150_accel_data *data = iio_priv(indio_dev); 743 744 *val2 = 0; 745 switch (info) { 746 case IIO_EV_INFO_VALUE: 747 *val = data->slope_thres; 748 break; 749 case IIO_EV_INFO_PERIOD: 750 *val = data->slope_dur; 751 break; 752 default: 753 return -EINVAL; 754 } 755 756 return IIO_VAL_INT; 757 } 758 759 static int bmc150_accel_write_event(struct iio_dev *indio_dev, 760 const struct iio_chan_spec *chan, 761 enum iio_event_type type, 762 enum iio_event_direction dir, 763 enum iio_event_info info, 764 int val, int val2) 765 { 766 struct bmc150_accel_data *data = iio_priv(indio_dev); 767 768 if (data->ev_enable_state) 769 return -EBUSY; 770 771 switch (info) { 772 case IIO_EV_INFO_VALUE: 773 data->slope_thres = val & BMC150_ACCEL_SLOPE_THRES_MASK; 774 break; 775 case IIO_EV_INFO_PERIOD: 776 data->slope_dur = val & BMC150_ACCEL_SLOPE_DUR_MASK; 777 break; 778 default: 779 return -EINVAL; 780 } 781 782 return 0; 783 } 784 785 static int bmc150_accel_read_event_config(struct iio_dev *indio_dev, 786 const struct iio_chan_spec *chan, 787 enum iio_event_type type, 788 enum iio_event_direction dir) 789 { 790 struct bmc150_accel_data *data = iio_priv(indio_dev); 791 792 return data->ev_enable_state; 793 } 794 795 static int bmc150_accel_write_event_config(struct iio_dev *indio_dev, 796 const struct iio_chan_spec *chan, 797 enum iio_event_type type, 798 enum iio_event_direction dir, 799 int state) 800 { 801 struct bmc150_accel_data *data = iio_priv(indio_dev); 802 int ret; 803 804 if (state == data->ev_enable_state) 805 return 0; 806 807 mutex_lock(&data->mutex); 808 809 ret = bmc150_accel_set_interrupt(data, BMC150_ACCEL_INT_ANY_MOTION, 810 state); 811 if (ret < 0) { 812 mutex_unlock(&data->mutex); 813 return ret; 814 } 815 816 data->ev_enable_state = state; 817 mutex_unlock(&data->mutex); 818 819 return 0; 820 } 821 822 static int bmc150_accel_validate_trigger(struct iio_dev *indio_dev, 823 struct iio_trigger *trig) 824 { 825 struct bmc150_accel_data *data = iio_priv(indio_dev); 826 int i; 827 828 for (i = 0; i < BMC150_ACCEL_TRIGGERS; i++) { 829 if (data->triggers[i].indio_trig == trig) 830 return 0; 831 } 832 833 return -EINVAL; 834 } 835 836 static ssize_t bmc150_accel_get_fifo_watermark(struct device *dev, 837 struct device_attribute *attr, 838 char *buf) 839 { 840 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 841 struct bmc150_accel_data *data = iio_priv(indio_dev); 842 int wm; 843 844 mutex_lock(&data->mutex); 845 wm = data->watermark; 846 mutex_unlock(&data->mutex); 847 848 return sprintf(buf, "%d\n", wm); 849 } 850 851 static ssize_t bmc150_accel_get_fifo_state(struct device *dev, 852 struct device_attribute *attr, 853 char *buf) 854 { 855 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 856 struct bmc150_accel_data *data = iio_priv(indio_dev); 857 bool state; 858 859 mutex_lock(&data->mutex); 860 state = data->fifo_mode; 861 mutex_unlock(&data->mutex); 862 863 return sprintf(buf, "%d\n", state); 864 } 865 866 static const struct iio_mount_matrix * 867 bmc150_accel_get_mount_matrix(const struct iio_dev *indio_dev, 868 const struct iio_chan_spec *chan) 869 { 870 struct bmc150_accel_data *data = iio_priv(indio_dev); 871 872 return &data->orientation; 873 } 874 875 static const struct iio_chan_spec_ext_info bmc150_accel_ext_info[] = { 876 IIO_MOUNT_MATRIX(IIO_SHARED_BY_DIR, bmc150_accel_get_mount_matrix), 877 { } 878 }; 879 880 static IIO_CONST_ATTR(hwfifo_watermark_min, "1"); 881 static IIO_CONST_ATTR(hwfifo_watermark_max, 882 __stringify(BMC150_ACCEL_FIFO_LENGTH)); 883 static IIO_DEVICE_ATTR(hwfifo_enabled, S_IRUGO, 884 bmc150_accel_get_fifo_state, NULL, 0); 885 static IIO_DEVICE_ATTR(hwfifo_watermark, S_IRUGO, 886 bmc150_accel_get_fifo_watermark, NULL, 0); 887 888 static const struct attribute *bmc150_accel_fifo_attributes[] = { 889 &iio_const_attr_hwfifo_watermark_min.dev_attr.attr, 890 &iio_const_attr_hwfifo_watermark_max.dev_attr.attr, 891 &iio_dev_attr_hwfifo_watermark.dev_attr.attr, 892 &iio_dev_attr_hwfifo_enabled.dev_attr.attr, 893 NULL, 894 }; 895 896 static int bmc150_accel_set_watermark(struct iio_dev *indio_dev, unsigned val) 897 { 898 struct bmc150_accel_data *data = iio_priv(indio_dev); 899 900 if (val > BMC150_ACCEL_FIFO_LENGTH) 901 val = BMC150_ACCEL_FIFO_LENGTH; 902 903 mutex_lock(&data->mutex); 904 data->watermark = val; 905 mutex_unlock(&data->mutex); 906 907 return 0; 908 } 909 910 /* 911 * We must read at least one full frame in one burst, otherwise the rest of the 912 * frame data is discarded. 913 */ 914 static int bmc150_accel_fifo_transfer(struct bmc150_accel_data *data, 915 char *buffer, int samples) 916 { 917 struct device *dev = regmap_get_device(data->regmap); 918 int sample_length = 3 * 2; 919 int ret; 920 int total_length = samples * sample_length; 921 922 ret = regmap_raw_read(data->regmap, BMC150_ACCEL_REG_FIFO_DATA, 923 buffer, total_length); 924 if (ret) 925 dev_err(dev, 926 "Error transferring data from fifo: %d\n", ret); 927 928 return ret; 929 } 930 931 static int __bmc150_accel_fifo_flush(struct iio_dev *indio_dev, 932 unsigned samples, bool irq) 933 { 934 struct bmc150_accel_data *data = iio_priv(indio_dev); 935 struct device *dev = regmap_get_device(data->regmap); 936 int ret, i; 937 u8 count; 938 u16 buffer[BMC150_ACCEL_FIFO_LENGTH * 3]; 939 int64_t tstamp; 940 uint64_t sample_period; 941 unsigned int val; 942 943 ret = regmap_read(data->regmap, BMC150_ACCEL_REG_FIFO_STATUS, &val); 944 if (ret < 0) { 945 dev_err(dev, "Error reading reg_fifo_status\n"); 946 return ret; 947 } 948 949 count = val & 0x7F; 950 951 if (!count) 952 return 0; 953 954 /* 955 * If we getting called from IRQ handler we know the stored timestamp is 956 * fairly accurate for the last stored sample. Otherwise, if we are 957 * called as a result of a read operation from userspace and hence 958 * before the watermark interrupt was triggered, take a timestamp 959 * now. We can fall anywhere in between two samples so the error in this 960 * case is at most one sample period. 961 */ 962 if (!irq) { 963 data->old_timestamp = data->timestamp; 964 data->timestamp = iio_get_time_ns(indio_dev); 965 } 966 967 /* 968 * Approximate timestamps for each of the sample based on the sampling 969 * frequency, timestamp for last sample and number of samples. 970 * 971 * Note that we can't use the current bandwidth settings to compute the 972 * sample period because the sample rate varies with the device 973 * (e.g. between 31.70ms to 32.20ms for a bandwidth of 15.63HZ). That 974 * small variation adds when we store a large number of samples and 975 * creates significant jitter between the last and first samples in 976 * different batches (e.g. 32ms vs 21ms). 977 * 978 * To avoid this issue we compute the actual sample period ourselves 979 * based on the timestamp delta between the last two flush operations. 980 */ 981 sample_period = (data->timestamp - data->old_timestamp); 982 do_div(sample_period, count); 983 tstamp = data->timestamp - (count - 1) * sample_period; 984 985 if (samples && count > samples) 986 count = samples; 987 988 ret = bmc150_accel_fifo_transfer(data, (u8 *)buffer, count); 989 if (ret) 990 return ret; 991 992 /* 993 * Ideally we want the IIO core to handle the demux when running in fifo 994 * mode but not when running in triggered buffer mode. Unfortunately 995 * this does not seem to be possible, so stick with driver demux for 996 * now. 997 */ 998 for (i = 0; i < count; i++) { 999 int j, bit; 1000 1001 j = 0; 1002 for_each_set_bit(bit, indio_dev->active_scan_mask, 1003 indio_dev->masklength) 1004 memcpy(&data->scan.channels[j++], &buffer[i * 3 + bit], 1005 sizeof(data->scan.channels[0])); 1006 1007 iio_push_to_buffers_with_timestamp(indio_dev, &data->scan, 1008 tstamp); 1009 1010 tstamp += sample_period; 1011 } 1012 1013 return count; 1014 } 1015 1016 static int bmc150_accel_fifo_flush(struct iio_dev *indio_dev, unsigned samples) 1017 { 1018 struct bmc150_accel_data *data = iio_priv(indio_dev); 1019 int ret; 1020 1021 mutex_lock(&data->mutex); 1022 ret = __bmc150_accel_fifo_flush(indio_dev, samples, false); 1023 mutex_unlock(&data->mutex); 1024 1025 return ret; 1026 } 1027 1028 static IIO_CONST_ATTR_SAMP_FREQ_AVAIL( 1029 "15.620000 31.260000 62.50000 125 250 500 1000 2000"); 1030 1031 static struct attribute *bmc150_accel_attributes[] = { 1032 &iio_const_attr_sampling_frequency_available.dev_attr.attr, 1033 NULL, 1034 }; 1035 1036 static const struct attribute_group bmc150_accel_attrs_group = { 1037 .attrs = bmc150_accel_attributes, 1038 }; 1039 1040 static const struct iio_event_spec bmc150_accel_event = { 1041 .type = IIO_EV_TYPE_ROC, 1042 .dir = IIO_EV_DIR_EITHER, 1043 .mask_separate = BIT(IIO_EV_INFO_VALUE) | 1044 BIT(IIO_EV_INFO_ENABLE) | 1045 BIT(IIO_EV_INFO_PERIOD) 1046 }; 1047 1048 #define BMC150_ACCEL_CHANNEL(_axis, bits) { \ 1049 .type = IIO_ACCEL, \ 1050 .modified = 1, \ 1051 .channel2 = IIO_MOD_##_axis, \ 1052 .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \ 1053 .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) | \ 1054 BIT(IIO_CHAN_INFO_SAMP_FREQ), \ 1055 .scan_index = AXIS_##_axis, \ 1056 .scan_type = { \ 1057 .sign = 's', \ 1058 .realbits = (bits), \ 1059 .storagebits = 16, \ 1060 .shift = 16 - (bits), \ 1061 .endianness = IIO_LE, \ 1062 }, \ 1063 .ext_info = bmc150_accel_ext_info, \ 1064 .event_spec = &bmc150_accel_event, \ 1065 .num_event_specs = 1 \ 1066 } 1067 1068 #define BMC150_ACCEL_CHANNELS(bits) { \ 1069 { \ 1070 .type = IIO_TEMP, \ 1071 .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \ 1072 BIT(IIO_CHAN_INFO_SCALE) | \ 1073 BIT(IIO_CHAN_INFO_OFFSET), \ 1074 .scan_index = -1, \ 1075 }, \ 1076 BMC150_ACCEL_CHANNEL(X, bits), \ 1077 BMC150_ACCEL_CHANNEL(Y, bits), \ 1078 BMC150_ACCEL_CHANNEL(Z, bits), \ 1079 IIO_CHAN_SOFT_TIMESTAMP(3), \ 1080 } 1081 1082 static const struct iio_chan_spec bma222e_accel_channels[] = 1083 BMC150_ACCEL_CHANNELS(8); 1084 static const struct iio_chan_spec bma250e_accel_channels[] = 1085 BMC150_ACCEL_CHANNELS(10); 1086 static const struct iio_chan_spec bmc150_accel_channels[] = 1087 BMC150_ACCEL_CHANNELS(12); 1088 static const struct iio_chan_spec bma280_accel_channels[] = 1089 BMC150_ACCEL_CHANNELS(14); 1090 1091 /* 1092 * The range for the Bosch sensors is typically +-2g/4g/8g/16g, distributed 1093 * over the amount of bits (see above). The scale table can be calculated using 1094 * (range / 2^bits) * g = (range / 2^bits) * 9.80665 m/s^2 1095 * e.g. for +-2g and 12 bits: (4 / 2^12) * 9.80665 m/s^2 = 0.0095768... m/s^2 1096 * Multiply 10^6 and round to get the values listed below. 1097 */ 1098 static const struct bmc150_accel_chip_info bmc150_accel_chip_info_tbl[] = { 1099 { 1100 .name = "BMA222", 1101 .chip_id = 0x03, 1102 .channels = bma222e_accel_channels, 1103 .num_channels = ARRAY_SIZE(bma222e_accel_channels), 1104 .scale_table = { {153229, BMC150_ACCEL_DEF_RANGE_2G}, 1105 {306458, BMC150_ACCEL_DEF_RANGE_4G}, 1106 {612916, BMC150_ACCEL_DEF_RANGE_8G}, 1107 {1225831, BMC150_ACCEL_DEF_RANGE_16G} }, 1108 }, 1109 { 1110 .name = "BMA222E", 1111 .chip_id = 0xF8, 1112 .channels = bma222e_accel_channels, 1113 .num_channels = ARRAY_SIZE(bma222e_accel_channels), 1114 .scale_table = { {153229, BMC150_ACCEL_DEF_RANGE_2G}, 1115 {306458, BMC150_ACCEL_DEF_RANGE_4G}, 1116 {612916, BMC150_ACCEL_DEF_RANGE_8G}, 1117 {1225831, BMC150_ACCEL_DEF_RANGE_16G} }, 1118 }, 1119 { 1120 .name = "BMA250E", 1121 .chip_id = 0xF9, 1122 .channels = bma250e_accel_channels, 1123 .num_channels = ARRAY_SIZE(bma250e_accel_channels), 1124 .scale_table = { {38307, BMC150_ACCEL_DEF_RANGE_2G}, 1125 {76614, BMC150_ACCEL_DEF_RANGE_4G}, 1126 {153229, BMC150_ACCEL_DEF_RANGE_8G}, 1127 {306458, BMC150_ACCEL_DEF_RANGE_16G} }, 1128 }, 1129 { 1130 .name = "BMA253/BMA254/BMA255/BMC150/BMI055", 1131 .chip_id = 0xFA, 1132 .channels = bmc150_accel_channels, 1133 .num_channels = ARRAY_SIZE(bmc150_accel_channels), 1134 .scale_table = { {9577, BMC150_ACCEL_DEF_RANGE_2G}, 1135 {19154, BMC150_ACCEL_DEF_RANGE_4G}, 1136 {38307, BMC150_ACCEL_DEF_RANGE_8G}, 1137 {76614, BMC150_ACCEL_DEF_RANGE_16G} }, 1138 }, 1139 { 1140 .name = "BMA280", 1141 .chip_id = 0xFB, 1142 .channels = bma280_accel_channels, 1143 .num_channels = ARRAY_SIZE(bma280_accel_channels), 1144 .scale_table = { {2394, BMC150_ACCEL_DEF_RANGE_2G}, 1145 {4788, BMC150_ACCEL_DEF_RANGE_4G}, 1146 {9577, BMC150_ACCEL_DEF_RANGE_8G}, 1147 {19154, BMC150_ACCEL_DEF_RANGE_16G} }, 1148 }, 1149 }; 1150 1151 static const struct iio_info bmc150_accel_info = { 1152 .attrs = &bmc150_accel_attrs_group, 1153 .read_raw = bmc150_accel_read_raw, 1154 .write_raw = bmc150_accel_write_raw, 1155 .read_event_value = bmc150_accel_read_event, 1156 .write_event_value = bmc150_accel_write_event, 1157 .write_event_config = bmc150_accel_write_event_config, 1158 .read_event_config = bmc150_accel_read_event_config, 1159 }; 1160 1161 static const struct iio_info bmc150_accel_info_fifo = { 1162 .attrs = &bmc150_accel_attrs_group, 1163 .read_raw = bmc150_accel_read_raw, 1164 .write_raw = bmc150_accel_write_raw, 1165 .read_event_value = bmc150_accel_read_event, 1166 .write_event_value = bmc150_accel_write_event, 1167 .write_event_config = bmc150_accel_write_event_config, 1168 .read_event_config = bmc150_accel_read_event_config, 1169 .validate_trigger = bmc150_accel_validate_trigger, 1170 .hwfifo_set_watermark = bmc150_accel_set_watermark, 1171 .hwfifo_flush_to_buffer = bmc150_accel_fifo_flush, 1172 }; 1173 1174 static const unsigned long bmc150_accel_scan_masks[] = { 1175 BIT(AXIS_X) | BIT(AXIS_Y) | BIT(AXIS_Z), 1176 0}; 1177 1178 static irqreturn_t bmc150_accel_trigger_handler(int irq, void *p) 1179 { 1180 struct iio_poll_func *pf = p; 1181 struct iio_dev *indio_dev = pf->indio_dev; 1182 struct bmc150_accel_data *data = iio_priv(indio_dev); 1183 int ret; 1184 1185 mutex_lock(&data->mutex); 1186 ret = regmap_bulk_read(data->regmap, BMC150_ACCEL_REG_XOUT_L, 1187 data->buffer, AXIS_MAX * 2); 1188 mutex_unlock(&data->mutex); 1189 if (ret < 0) 1190 goto err_read; 1191 1192 iio_push_to_buffers_with_timestamp(indio_dev, data->buffer, 1193 pf->timestamp); 1194 err_read: 1195 iio_trigger_notify_done(indio_dev->trig); 1196 1197 return IRQ_HANDLED; 1198 } 1199 1200 static void bmc150_accel_trig_reen(struct iio_trigger *trig) 1201 { 1202 struct bmc150_accel_trigger *t = iio_trigger_get_drvdata(trig); 1203 struct bmc150_accel_data *data = t->data; 1204 struct device *dev = regmap_get_device(data->regmap); 1205 int ret; 1206 1207 /* new data interrupts don't need ack */ 1208 if (t == &t->data->triggers[BMC150_ACCEL_TRIGGER_DATA_READY]) 1209 return; 1210 1211 mutex_lock(&data->mutex); 1212 /* clear any latched interrupt */ 1213 ret = regmap_write(data->regmap, BMC150_ACCEL_REG_INT_RST_LATCH, 1214 BMC150_ACCEL_INT_MODE_LATCH_INT | 1215 BMC150_ACCEL_INT_MODE_LATCH_RESET); 1216 mutex_unlock(&data->mutex); 1217 if (ret < 0) 1218 dev_err(dev, "Error writing reg_int_rst_latch\n"); 1219 } 1220 1221 static int bmc150_accel_trigger_set_state(struct iio_trigger *trig, 1222 bool state) 1223 { 1224 struct bmc150_accel_trigger *t = iio_trigger_get_drvdata(trig); 1225 struct bmc150_accel_data *data = t->data; 1226 int ret; 1227 1228 mutex_lock(&data->mutex); 1229 1230 if (t->enabled == state) { 1231 mutex_unlock(&data->mutex); 1232 return 0; 1233 } 1234 1235 if (t->setup) { 1236 ret = t->setup(t, state); 1237 if (ret < 0) { 1238 mutex_unlock(&data->mutex); 1239 return ret; 1240 } 1241 } 1242 1243 ret = bmc150_accel_set_interrupt(data, t->intr, state); 1244 if (ret < 0) { 1245 mutex_unlock(&data->mutex); 1246 return ret; 1247 } 1248 1249 t->enabled = state; 1250 1251 mutex_unlock(&data->mutex); 1252 1253 return ret; 1254 } 1255 1256 static const struct iio_trigger_ops bmc150_accel_trigger_ops = { 1257 .set_trigger_state = bmc150_accel_trigger_set_state, 1258 .reenable = bmc150_accel_trig_reen, 1259 }; 1260 1261 static int bmc150_accel_handle_roc_event(struct iio_dev *indio_dev) 1262 { 1263 struct bmc150_accel_data *data = iio_priv(indio_dev); 1264 struct device *dev = regmap_get_device(data->regmap); 1265 int dir; 1266 int ret; 1267 unsigned int val; 1268 1269 ret = regmap_read(data->regmap, BMC150_ACCEL_REG_INT_STATUS_2, &val); 1270 if (ret < 0) { 1271 dev_err(dev, "Error reading reg_int_status_2\n"); 1272 return ret; 1273 } 1274 1275 if (val & BMC150_ACCEL_ANY_MOTION_BIT_SIGN) 1276 dir = IIO_EV_DIR_FALLING; 1277 else 1278 dir = IIO_EV_DIR_RISING; 1279 1280 if (val & BMC150_ACCEL_ANY_MOTION_BIT_X) 1281 iio_push_event(indio_dev, 1282 IIO_MOD_EVENT_CODE(IIO_ACCEL, 1283 0, 1284 IIO_MOD_X, 1285 IIO_EV_TYPE_ROC, 1286 dir), 1287 data->timestamp); 1288 1289 if (val & BMC150_ACCEL_ANY_MOTION_BIT_Y) 1290 iio_push_event(indio_dev, 1291 IIO_MOD_EVENT_CODE(IIO_ACCEL, 1292 0, 1293 IIO_MOD_Y, 1294 IIO_EV_TYPE_ROC, 1295 dir), 1296 data->timestamp); 1297 1298 if (val & BMC150_ACCEL_ANY_MOTION_BIT_Z) 1299 iio_push_event(indio_dev, 1300 IIO_MOD_EVENT_CODE(IIO_ACCEL, 1301 0, 1302 IIO_MOD_Z, 1303 IIO_EV_TYPE_ROC, 1304 dir), 1305 data->timestamp); 1306 1307 return ret; 1308 } 1309 1310 static irqreturn_t bmc150_accel_irq_thread_handler(int irq, void *private) 1311 { 1312 struct iio_dev *indio_dev = private; 1313 struct bmc150_accel_data *data = iio_priv(indio_dev); 1314 struct device *dev = regmap_get_device(data->regmap); 1315 bool ack = false; 1316 int ret; 1317 1318 mutex_lock(&data->mutex); 1319 1320 if (data->fifo_mode) { 1321 ret = __bmc150_accel_fifo_flush(indio_dev, 1322 BMC150_ACCEL_FIFO_LENGTH, true); 1323 if (ret > 0) 1324 ack = true; 1325 } 1326 1327 if (data->ev_enable_state) { 1328 ret = bmc150_accel_handle_roc_event(indio_dev); 1329 if (ret > 0) 1330 ack = true; 1331 } 1332 1333 if (ack) { 1334 ret = regmap_write(data->regmap, BMC150_ACCEL_REG_INT_RST_LATCH, 1335 BMC150_ACCEL_INT_MODE_LATCH_INT | 1336 BMC150_ACCEL_INT_MODE_LATCH_RESET); 1337 if (ret) 1338 dev_err(dev, "Error writing reg_int_rst_latch\n"); 1339 1340 ret = IRQ_HANDLED; 1341 } else { 1342 ret = IRQ_NONE; 1343 } 1344 1345 mutex_unlock(&data->mutex); 1346 1347 return ret; 1348 } 1349 1350 static irqreturn_t bmc150_accel_irq_handler(int irq, void *private) 1351 { 1352 struct iio_dev *indio_dev = private; 1353 struct bmc150_accel_data *data = iio_priv(indio_dev); 1354 bool ack = false; 1355 int i; 1356 1357 data->old_timestamp = data->timestamp; 1358 data->timestamp = iio_get_time_ns(indio_dev); 1359 1360 for (i = 0; i < BMC150_ACCEL_TRIGGERS; i++) { 1361 if (data->triggers[i].enabled) { 1362 iio_trigger_poll(data->triggers[i].indio_trig); 1363 ack = true; 1364 break; 1365 } 1366 } 1367 1368 if (data->ev_enable_state || data->fifo_mode) 1369 return IRQ_WAKE_THREAD; 1370 1371 if (ack) 1372 return IRQ_HANDLED; 1373 1374 return IRQ_NONE; 1375 } 1376 1377 static const struct { 1378 int intr; 1379 const char *name; 1380 int (*setup)(struct bmc150_accel_trigger *t, bool state); 1381 } bmc150_accel_triggers[BMC150_ACCEL_TRIGGERS] = { 1382 { 1383 .intr = 0, 1384 .name = "%s-dev%d", 1385 }, 1386 { 1387 .intr = 1, 1388 .name = "%s-any-motion-dev%d", 1389 .setup = bmc150_accel_any_motion_setup, 1390 }, 1391 }; 1392 1393 static void bmc150_accel_unregister_triggers(struct bmc150_accel_data *data, 1394 int from) 1395 { 1396 int i; 1397 1398 for (i = from; i >= 0; i--) { 1399 if (data->triggers[i].indio_trig) { 1400 iio_trigger_unregister(data->triggers[i].indio_trig); 1401 data->triggers[i].indio_trig = NULL; 1402 } 1403 } 1404 } 1405 1406 static int bmc150_accel_triggers_setup(struct iio_dev *indio_dev, 1407 struct bmc150_accel_data *data) 1408 { 1409 struct device *dev = regmap_get_device(data->regmap); 1410 int i, ret; 1411 1412 for (i = 0; i < BMC150_ACCEL_TRIGGERS; i++) { 1413 struct bmc150_accel_trigger *t = &data->triggers[i]; 1414 1415 t->indio_trig = devm_iio_trigger_alloc(dev, 1416 bmc150_accel_triggers[i].name, 1417 indio_dev->name, 1418 iio_device_id(indio_dev)); 1419 if (!t->indio_trig) { 1420 ret = -ENOMEM; 1421 break; 1422 } 1423 1424 t->indio_trig->ops = &bmc150_accel_trigger_ops; 1425 t->intr = bmc150_accel_triggers[i].intr; 1426 t->data = data; 1427 t->setup = bmc150_accel_triggers[i].setup; 1428 iio_trigger_set_drvdata(t->indio_trig, t); 1429 1430 ret = iio_trigger_register(t->indio_trig); 1431 if (ret) 1432 break; 1433 } 1434 1435 if (ret) 1436 bmc150_accel_unregister_triggers(data, i - 1); 1437 1438 return ret; 1439 } 1440 1441 #define BMC150_ACCEL_FIFO_MODE_STREAM 0x80 1442 #define BMC150_ACCEL_FIFO_MODE_FIFO 0x40 1443 #define BMC150_ACCEL_FIFO_MODE_BYPASS 0x00 1444 1445 static int bmc150_accel_fifo_set_mode(struct bmc150_accel_data *data) 1446 { 1447 struct device *dev = regmap_get_device(data->regmap); 1448 u8 reg = BMC150_ACCEL_REG_FIFO_CONFIG1; 1449 int ret; 1450 1451 ret = regmap_write(data->regmap, reg, data->fifo_mode); 1452 if (ret < 0) { 1453 dev_err(dev, "Error writing reg_fifo_config1\n"); 1454 return ret; 1455 } 1456 1457 if (!data->fifo_mode) 1458 return 0; 1459 1460 ret = regmap_write(data->regmap, BMC150_ACCEL_REG_FIFO_CONFIG0, 1461 data->watermark); 1462 if (ret < 0) 1463 dev_err(dev, "Error writing reg_fifo_config0\n"); 1464 1465 return ret; 1466 } 1467 1468 static int bmc150_accel_buffer_preenable(struct iio_dev *indio_dev) 1469 { 1470 struct bmc150_accel_data *data = iio_priv(indio_dev); 1471 1472 return bmc150_accel_set_power_state(data, true); 1473 } 1474 1475 static int bmc150_accel_buffer_postenable(struct iio_dev *indio_dev) 1476 { 1477 struct bmc150_accel_data *data = iio_priv(indio_dev); 1478 int ret = 0; 1479 1480 if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) 1481 return 0; 1482 1483 mutex_lock(&data->mutex); 1484 1485 if (!data->watermark) 1486 goto out; 1487 1488 ret = bmc150_accel_set_interrupt(data, BMC150_ACCEL_INT_WATERMARK, 1489 true); 1490 if (ret) 1491 goto out; 1492 1493 data->fifo_mode = BMC150_ACCEL_FIFO_MODE_FIFO; 1494 1495 ret = bmc150_accel_fifo_set_mode(data); 1496 if (ret) { 1497 data->fifo_mode = 0; 1498 bmc150_accel_set_interrupt(data, BMC150_ACCEL_INT_WATERMARK, 1499 false); 1500 } 1501 1502 out: 1503 mutex_unlock(&data->mutex); 1504 1505 return ret; 1506 } 1507 1508 static int bmc150_accel_buffer_predisable(struct iio_dev *indio_dev) 1509 { 1510 struct bmc150_accel_data *data = iio_priv(indio_dev); 1511 1512 if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) 1513 return 0; 1514 1515 mutex_lock(&data->mutex); 1516 1517 if (!data->fifo_mode) 1518 goto out; 1519 1520 bmc150_accel_set_interrupt(data, BMC150_ACCEL_INT_WATERMARK, false); 1521 __bmc150_accel_fifo_flush(indio_dev, BMC150_ACCEL_FIFO_LENGTH, false); 1522 data->fifo_mode = 0; 1523 bmc150_accel_fifo_set_mode(data); 1524 1525 out: 1526 mutex_unlock(&data->mutex); 1527 1528 return 0; 1529 } 1530 1531 static int bmc150_accel_buffer_postdisable(struct iio_dev *indio_dev) 1532 { 1533 struct bmc150_accel_data *data = iio_priv(indio_dev); 1534 1535 return bmc150_accel_set_power_state(data, false); 1536 } 1537 1538 static const struct iio_buffer_setup_ops bmc150_accel_buffer_ops = { 1539 .preenable = bmc150_accel_buffer_preenable, 1540 .postenable = bmc150_accel_buffer_postenable, 1541 .predisable = bmc150_accel_buffer_predisable, 1542 .postdisable = bmc150_accel_buffer_postdisable, 1543 }; 1544 1545 static int bmc150_accel_chip_init(struct bmc150_accel_data *data) 1546 { 1547 struct device *dev = regmap_get_device(data->regmap); 1548 int ret, i; 1549 unsigned int val; 1550 1551 /* 1552 * Reset chip to get it in a known good state. A delay of 1.8ms after 1553 * reset is required according to the data sheets of supported chips. 1554 */ 1555 regmap_write(data->regmap, BMC150_ACCEL_REG_RESET, 1556 BMC150_ACCEL_RESET_VAL); 1557 usleep_range(1800, 2500); 1558 1559 ret = regmap_read(data->regmap, BMC150_ACCEL_REG_CHIP_ID, &val); 1560 if (ret < 0) { 1561 dev_err(dev, "Error: Reading chip id\n"); 1562 return ret; 1563 } 1564 1565 dev_dbg(dev, "Chip Id %x\n", val); 1566 for (i = 0; i < ARRAY_SIZE(bmc150_accel_chip_info_tbl); i++) { 1567 if (bmc150_accel_chip_info_tbl[i].chip_id == val) { 1568 data->chip_info = &bmc150_accel_chip_info_tbl[i]; 1569 break; 1570 } 1571 } 1572 1573 if (!data->chip_info) { 1574 dev_err(dev, "Invalid chip %x\n", val); 1575 return -ENODEV; 1576 } 1577 1578 ret = bmc150_accel_set_mode(data, BMC150_ACCEL_SLEEP_MODE_NORMAL, 0); 1579 if (ret < 0) 1580 return ret; 1581 1582 /* Set Bandwidth */ 1583 ret = bmc150_accel_set_bw(data, BMC150_ACCEL_DEF_BW, 0); 1584 if (ret < 0) 1585 return ret; 1586 1587 /* Set Default Range */ 1588 ret = regmap_write(data->regmap, BMC150_ACCEL_REG_PMU_RANGE, 1589 BMC150_ACCEL_DEF_RANGE_4G); 1590 if (ret < 0) { 1591 dev_err(dev, "Error writing reg_pmu_range\n"); 1592 return ret; 1593 } 1594 1595 data->range = BMC150_ACCEL_DEF_RANGE_4G; 1596 1597 /* Set default slope duration and thresholds */ 1598 data->slope_thres = BMC150_ACCEL_DEF_SLOPE_THRESHOLD; 1599 data->slope_dur = BMC150_ACCEL_DEF_SLOPE_DURATION; 1600 ret = bmc150_accel_update_slope(data); 1601 if (ret < 0) 1602 return ret; 1603 1604 /* Set default as latched interrupts */ 1605 ret = regmap_write(data->regmap, BMC150_ACCEL_REG_INT_RST_LATCH, 1606 BMC150_ACCEL_INT_MODE_LATCH_INT | 1607 BMC150_ACCEL_INT_MODE_LATCH_RESET); 1608 if (ret < 0) { 1609 dev_err(dev, "Error writing reg_int_rst_latch\n"); 1610 return ret; 1611 } 1612 1613 return 0; 1614 } 1615 1616 int bmc150_accel_core_probe(struct device *dev, struct regmap *regmap, int irq, 1617 const char *name, bool block_supported) 1618 { 1619 const struct attribute **fifo_attrs; 1620 struct bmc150_accel_data *data; 1621 struct iio_dev *indio_dev; 1622 int ret; 1623 1624 indio_dev = devm_iio_device_alloc(dev, sizeof(*data)); 1625 if (!indio_dev) 1626 return -ENOMEM; 1627 1628 data = iio_priv(indio_dev); 1629 dev_set_drvdata(dev, indio_dev); 1630 1631 data->regmap = regmap; 1632 1633 if (!bmc150_apply_acpi_orientation(dev, &data->orientation)) { 1634 ret = iio_read_mount_matrix(dev, &data->orientation); 1635 if (ret) 1636 return ret; 1637 } 1638 1639 /* 1640 * VDD is the analog and digital domain voltage supply 1641 * VDDIO is the digital I/O voltage supply 1642 */ 1643 data->regulators[0].supply = "vdd"; 1644 data->regulators[1].supply = "vddio"; 1645 ret = devm_regulator_bulk_get(dev, 1646 ARRAY_SIZE(data->regulators), 1647 data->regulators); 1648 if (ret) 1649 return dev_err_probe(dev, ret, "failed to get regulators\n"); 1650 1651 ret = regulator_bulk_enable(ARRAY_SIZE(data->regulators), 1652 data->regulators); 1653 if (ret) { 1654 dev_err(dev, "failed to enable regulators: %d\n", ret); 1655 return ret; 1656 } 1657 /* 1658 * 2ms or 3ms power-on time according to datasheets, let's better 1659 * be safe than sorry and set this delay to 5ms. 1660 */ 1661 msleep(5); 1662 1663 ret = bmc150_accel_chip_init(data); 1664 if (ret < 0) 1665 goto err_disable_regulators; 1666 1667 mutex_init(&data->mutex); 1668 1669 indio_dev->channels = data->chip_info->channels; 1670 indio_dev->num_channels = data->chip_info->num_channels; 1671 indio_dev->name = name ? name : data->chip_info->name; 1672 indio_dev->available_scan_masks = bmc150_accel_scan_masks; 1673 indio_dev->modes = INDIO_DIRECT_MODE; 1674 indio_dev->info = &bmc150_accel_info; 1675 1676 if (block_supported) { 1677 indio_dev->modes |= INDIO_BUFFER_SOFTWARE; 1678 indio_dev->info = &bmc150_accel_info_fifo; 1679 fifo_attrs = bmc150_accel_fifo_attributes; 1680 } else { 1681 fifo_attrs = NULL; 1682 } 1683 1684 ret = iio_triggered_buffer_setup_ext(indio_dev, 1685 &iio_pollfunc_store_time, 1686 bmc150_accel_trigger_handler, 1687 &bmc150_accel_buffer_ops, 1688 fifo_attrs); 1689 if (ret < 0) { 1690 dev_err(dev, "Failed: iio triggered buffer setup\n"); 1691 goto err_disable_regulators; 1692 } 1693 1694 if (irq > 0) { 1695 ret = devm_request_threaded_irq(dev, irq, 1696 bmc150_accel_irq_handler, 1697 bmc150_accel_irq_thread_handler, 1698 IRQF_TRIGGER_RISING, 1699 BMC150_ACCEL_IRQ_NAME, 1700 indio_dev); 1701 if (ret) 1702 goto err_buffer_cleanup; 1703 1704 /* 1705 * Set latched mode interrupt. While certain interrupts are 1706 * non-latched regardless of this settings (e.g. new data) we 1707 * want to use latch mode when we can to prevent interrupt 1708 * flooding. 1709 */ 1710 ret = regmap_write(data->regmap, BMC150_ACCEL_REG_INT_RST_LATCH, 1711 BMC150_ACCEL_INT_MODE_LATCH_RESET); 1712 if (ret < 0) { 1713 dev_err(dev, "Error writing reg_int_rst_latch\n"); 1714 goto err_buffer_cleanup; 1715 } 1716 1717 bmc150_accel_interrupts_setup(indio_dev, data); 1718 1719 ret = bmc150_accel_triggers_setup(indio_dev, data); 1720 if (ret) 1721 goto err_buffer_cleanup; 1722 } 1723 1724 ret = pm_runtime_set_active(dev); 1725 if (ret) 1726 goto err_trigger_unregister; 1727 1728 pm_runtime_enable(dev); 1729 pm_runtime_set_autosuspend_delay(dev, BMC150_AUTO_SUSPEND_DELAY_MS); 1730 pm_runtime_use_autosuspend(dev); 1731 1732 ret = iio_device_register(indio_dev); 1733 if (ret < 0) { 1734 dev_err(dev, "Unable to register iio device\n"); 1735 goto err_trigger_unregister; 1736 } 1737 1738 return 0; 1739 1740 err_trigger_unregister: 1741 bmc150_accel_unregister_triggers(data, BMC150_ACCEL_TRIGGERS - 1); 1742 err_buffer_cleanup: 1743 iio_triggered_buffer_cleanup(indio_dev); 1744 err_disable_regulators: 1745 regulator_bulk_disable(ARRAY_SIZE(data->regulators), 1746 data->regulators); 1747 1748 return ret; 1749 } 1750 EXPORT_SYMBOL_GPL(bmc150_accel_core_probe); 1751 1752 int bmc150_accel_core_remove(struct device *dev) 1753 { 1754 struct iio_dev *indio_dev = dev_get_drvdata(dev); 1755 struct bmc150_accel_data *data = iio_priv(indio_dev); 1756 1757 iio_device_unregister(indio_dev); 1758 1759 pm_runtime_disable(dev); 1760 pm_runtime_set_suspended(dev); 1761 1762 bmc150_accel_unregister_triggers(data, BMC150_ACCEL_TRIGGERS - 1); 1763 1764 iio_triggered_buffer_cleanup(indio_dev); 1765 1766 mutex_lock(&data->mutex); 1767 bmc150_accel_set_mode(data, BMC150_ACCEL_SLEEP_MODE_DEEP_SUSPEND, 0); 1768 mutex_unlock(&data->mutex); 1769 1770 regulator_bulk_disable(ARRAY_SIZE(data->regulators), 1771 data->regulators); 1772 1773 return 0; 1774 } 1775 EXPORT_SYMBOL_GPL(bmc150_accel_core_remove); 1776 1777 #ifdef CONFIG_PM_SLEEP 1778 static int bmc150_accel_suspend(struct device *dev) 1779 { 1780 struct iio_dev *indio_dev = dev_get_drvdata(dev); 1781 struct bmc150_accel_data *data = iio_priv(indio_dev); 1782 1783 mutex_lock(&data->mutex); 1784 bmc150_accel_set_mode(data, BMC150_ACCEL_SLEEP_MODE_SUSPEND, 0); 1785 mutex_unlock(&data->mutex); 1786 1787 return 0; 1788 } 1789 1790 static int bmc150_accel_resume(struct device *dev) 1791 { 1792 struct iio_dev *indio_dev = dev_get_drvdata(dev); 1793 struct bmc150_accel_data *data = iio_priv(indio_dev); 1794 1795 mutex_lock(&data->mutex); 1796 bmc150_accel_set_mode(data, BMC150_ACCEL_SLEEP_MODE_NORMAL, 0); 1797 bmc150_accel_fifo_set_mode(data); 1798 mutex_unlock(&data->mutex); 1799 1800 if (data->resume_callback) 1801 data->resume_callback(dev); 1802 1803 return 0; 1804 } 1805 #endif 1806 1807 #ifdef CONFIG_PM 1808 static int bmc150_accel_runtime_suspend(struct device *dev) 1809 { 1810 struct iio_dev *indio_dev = dev_get_drvdata(dev); 1811 struct bmc150_accel_data *data = iio_priv(indio_dev); 1812 int ret; 1813 1814 ret = bmc150_accel_set_mode(data, BMC150_ACCEL_SLEEP_MODE_SUSPEND, 0); 1815 if (ret < 0) 1816 return -EAGAIN; 1817 1818 return 0; 1819 } 1820 1821 static int bmc150_accel_runtime_resume(struct device *dev) 1822 { 1823 struct iio_dev *indio_dev = dev_get_drvdata(dev); 1824 struct bmc150_accel_data *data = iio_priv(indio_dev); 1825 int ret; 1826 int sleep_val; 1827 1828 ret = bmc150_accel_set_mode(data, BMC150_ACCEL_SLEEP_MODE_NORMAL, 0); 1829 if (ret < 0) 1830 return ret; 1831 ret = bmc150_accel_fifo_set_mode(data); 1832 if (ret < 0) 1833 return ret; 1834 1835 sleep_val = bmc150_accel_get_startup_times(data); 1836 if (sleep_val < 20) 1837 usleep_range(sleep_val * 1000, 20000); 1838 else 1839 msleep_interruptible(sleep_val); 1840 1841 return 0; 1842 } 1843 #endif 1844 1845 const struct dev_pm_ops bmc150_accel_pm_ops = { 1846 SET_SYSTEM_SLEEP_PM_OPS(bmc150_accel_suspend, bmc150_accel_resume) 1847 SET_RUNTIME_PM_OPS(bmc150_accel_runtime_suspend, 1848 bmc150_accel_runtime_resume, NULL) 1849 }; 1850 EXPORT_SYMBOL_GPL(bmc150_accel_pm_ops); 1851 1852 MODULE_AUTHOR("Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>"); 1853 MODULE_LICENSE("GPL v2"); 1854 MODULE_DESCRIPTION("BMC150 accelerometer driver"); 1855