1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Intel Quadrature Encoder Peripheral driver 4 * 5 * Copyright (C) 2019-2021 Intel Corporation 6 * 7 * Author: Felipe Balbi (Intel) 8 * Author: Jarkko Nikula <jarkko.nikula@linux.intel.com> 9 * Author: Raymond Tan <raymond.tan@intel.com> 10 */ 11 #include <linux/counter.h> 12 #include <linux/kernel.h> 13 #include <linux/module.h> 14 #include <linux/mutex.h> 15 #include <linux/pci.h> 16 #include <linux/pm_runtime.h> 17 18 #define INTEL_QEPCON 0x00 19 #define INTEL_QEPFLT 0x04 20 #define INTEL_QEPCOUNT 0x08 21 #define INTEL_QEPMAX 0x0c 22 #define INTEL_QEPWDT 0x10 23 #define INTEL_QEPCAPDIV 0x14 24 #define INTEL_QEPCNTR 0x18 25 #define INTEL_QEPCAPBUF 0x1c 26 #define INTEL_QEPINT_STAT 0x20 27 #define INTEL_QEPINT_MASK 0x24 28 29 /* QEPCON */ 30 #define INTEL_QEPCON_EN BIT(0) 31 #define INTEL_QEPCON_FLT_EN BIT(1) 32 #define INTEL_QEPCON_EDGE_A BIT(2) 33 #define INTEL_QEPCON_EDGE_B BIT(3) 34 #define INTEL_QEPCON_EDGE_INDX BIT(4) 35 #define INTEL_QEPCON_SWPAB BIT(5) 36 #define INTEL_QEPCON_OP_MODE BIT(6) 37 #define INTEL_QEPCON_PH_ERR BIT(7) 38 #define INTEL_QEPCON_COUNT_RST_MODE BIT(8) 39 #define INTEL_QEPCON_INDX_GATING_MASK GENMASK(10, 9) 40 #define INTEL_QEPCON_INDX_GATING(n) (((n) & 3) << 9) 41 #define INTEL_QEPCON_INDX_PAL_PBL INTEL_QEPCON_INDX_GATING(0) 42 #define INTEL_QEPCON_INDX_PAL_PBH INTEL_QEPCON_INDX_GATING(1) 43 #define INTEL_QEPCON_INDX_PAH_PBL INTEL_QEPCON_INDX_GATING(2) 44 #define INTEL_QEPCON_INDX_PAH_PBH INTEL_QEPCON_INDX_GATING(3) 45 #define INTEL_QEPCON_CAP_MODE BIT(11) 46 #define INTEL_QEPCON_FIFO_THRE_MASK GENMASK(14, 12) 47 #define INTEL_QEPCON_FIFO_THRE(n) ((((n) - 1) & 7) << 12) 48 #define INTEL_QEPCON_FIFO_EMPTY BIT(15) 49 50 /* QEPFLT */ 51 #define INTEL_QEPFLT_MAX_COUNT(n) ((n) & 0x1fffff) 52 53 /* QEPINT */ 54 #define INTEL_QEPINT_FIFOCRIT BIT(5) 55 #define INTEL_QEPINT_FIFOENTRY BIT(4) 56 #define INTEL_QEPINT_QEPDIR BIT(3) 57 #define INTEL_QEPINT_QEPRST_UP BIT(2) 58 #define INTEL_QEPINT_QEPRST_DOWN BIT(1) 59 #define INTEL_QEPINT_WDT BIT(0) 60 61 #define INTEL_QEPINT_MASK_ALL GENMASK(5, 0) 62 63 #define INTEL_QEP_CLK_PERIOD_NS 10 64 65 struct intel_qep { 66 struct counter_device counter; 67 struct mutex lock; 68 struct device *dev; 69 void __iomem *regs; 70 bool enabled; 71 /* Context save registers */ 72 u32 qepcon; 73 u32 qepflt; 74 u32 qepmax; 75 }; 76 77 static inline u32 intel_qep_readl(struct intel_qep *qep, u32 offset) 78 { 79 return readl(qep->regs + offset); 80 } 81 82 static inline void intel_qep_writel(struct intel_qep *qep, 83 u32 offset, u32 value) 84 { 85 writel(value, qep->regs + offset); 86 } 87 88 static void intel_qep_init(struct intel_qep *qep) 89 { 90 u32 reg; 91 92 reg = intel_qep_readl(qep, INTEL_QEPCON); 93 reg &= ~INTEL_QEPCON_EN; 94 intel_qep_writel(qep, INTEL_QEPCON, reg); 95 qep->enabled = false; 96 /* 97 * Make sure peripheral is disabled by flushing the write with 98 * a dummy read 99 */ 100 reg = intel_qep_readl(qep, INTEL_QEPCON); 101 102 reg &= ~(INTEL_QEPCON_OP_MODE | INTEL_QEPCON_FLT_EN); 103 reg |= INTEL_QEPCON_EDGE_A | INTEL_QEPCON_EDGE_B | 104 INTEL_QEPCON_EDGE_INDX | INTEL_QEPCON_COUNT_RST_MODE; 105 intel_qep_writel(qep, INTEL_QEPCON, reg); 106 intel_qep_writel(qep, INTEL_QEPINT_MASK, INTEL_QEPINT_MASK_ALL); 107 } 108 109 static int intel_qep_count_read(struct counter_device *counter, 110 struct counter_count *count, u64 *val) 111 { 112 struct intel_qep *const qep = counter->priv; 113 114 pm_runtime_get_sync(qep->dev); 115 *val = intel_qep_readl(qep, INTEL_QEPCOUNT); 116 pm_runtime_put(qep->dev); 117 118 return 0; 119 } 120 121 static const enum counter_function intel_qep_count_functions[] = { 122 COUNTER_FUNCTION_QUADRATURE_X4, 123 }; 124 125 static int intel_qep_function_read(struct counter_device *counter, 126 struct counter_count *count, 127 enum counter_function *function) 128 { 129 *function = COUNTER_FUNCTION_QUADRATURE_X4; 130 131 return 0; 132 } 133 134 static const enum counter_synapse_action intel_qep_synapse_actions[] = { 135 COUNTER_SYNAPSE_ACTION_BOTH_EDGES, 136 }; 137 138 static int intel_qep_action_read(struct counter_device *counter, 139 struct counter_count *count, 140 struct counter_synapse *synapse, 141 enum counter_synapse_action *action) 142 { 143 *action = COUNTER_SYNAPSE_ACTION_BOTH_EDGES; 144 return 0; 145 } 146 147 static const struct counter_ops intel_qep_counter_ops = { 148 .count_read = intel_qep_count_read, 149 .function_read = intel_qep_function_read, 150 .action_read = intel_qep_action_read, 151 }; 152 153 #define INTEL_QEP_SIGNAL(_id, _name) { \ 154 .id = (_id), \ 155 .name = (_name), \ 156 } 157 158 static struct counter_signal intel_qep_signals[] = { 159 INTEL_QEP_SIGNAL(0, "Phase A"), 160 INTEL_QEP_SIGNAL(1, "Phase B"), 161 INTEL_QEP_SIGNAL(2, "Index"), 162 }; 163 164 #define INTEL_QEP_SYNAPSE(_signal_id) { \ 165 .actions_list = intel_qep_synapse_actions, \ 166 .num_actions = ARRAY_SIZE(intel_qep_synapse_actions), \ 167 .signal = &intel_qep_signals[(_signal_id)], \ 168 } 169 170 static struct counter_synapse intel_qep_count_synapses[] = { 171 INTEL_QEP_SYNAPSE(0), 172 INTEL_QEP_SYNAPSE(1), 173 INTEL_QEP_SYNAPSE(2), 174 }; 175 176 static int intel_qep_ceiling_read(struct counter_device *counter, 177 struct counter_count *count, u64 *ceiling) 178 { 179 struct intel_qep *qep = counter->priv; 180 181 pm_runtime_get_sync(qep->dev); 182 *ceiling = intel_qep_readl(qep, INTEL_QEPMAX); 183 pm_runtime_put(qep->dev); 184 185 return 0; 186 } 187 188 static int intel_qep_ceiling_write(struct counter_device *counter, 189 struct counter_count *count, u64 max) 190 { 191 struct intel_qep *qep = counter->priv; 192 int ret = 0; 193 194 /* Intel QEP ceiling configuration only supports 32-bit values */ 195 if (max != (u32)max) 196 return -ERANGE; 197 198 mutex_lock(&qep->lock); 199 if (qep->enabled) { 200 ret = -EBUSY; 201 goto out; 202 } 203 204 pm_runtime_get_sync(qep->dev); 205 intel_qep_writel(qep, INTEL_QEPMAX, max); 206 pm_runtime_put(qep->dev); 207 208 out: 209 mutex_unlock(&qep->lock); 210 return ret; 211 } 212 213 static int intel_qep_enable_read(struct counter_device *counter, 214 struct counter_count *count, u8 *enable) 215 { 216 struct intel_qep *qep = counter->priv; 217 218 *enable = qep->enabled; 219 220 return 0; 221 } 222 223 static int intel_qep_enable_write(struct counter_device *counter, 224 struct counter_count *count, u8 val) 225 { 226 struct intel_qep *qep = counter->priv; 227 u32 reg; 228 bool changed; 229 230 mutex_lock(&qep->lock); 231 changed = val ^ qep->enabled; 232 if (!changed) 233 goto out; 234 235 pm_runtime_get_sync(qep->dev); 236 reg = intel_qep_readl(qep, INTEL_QEPCON); 237 if (val) { 238 /* Enable peripheral and keep runtime PM always on */ 239 reg |= INTEL_QEPCON_EN; 240 pm_runtime_get_noresume(qep->dev); 241 } else { 242 /* Let runtime PM be idle and disable peripheral */ 243 pm_runtime_put_noidle(qep->dev); 244 reg &= ~INTEL_QEPCON_EN; 245 } 246 intel_qep_writel(qep, INTEL_QEPCON, reg); 247 pm_runtime_put(qep->dev); 248 qep->enabled = val; 249 250 out: 251 mutex_unlock(&qep->lock); 252 return 0; 253 } 254 255 static int intel_qep_spike_filter_ns_read(struct counter_device *counter, 256 struct counter_count *count, 257 u64 *length) 258 { 259 struct intel_qep *qep = counter->priv; 260 u32 reg; 261 262 pm_runtime_get_sync(qep->dev); 263 reg = intel_qep_readl(qep, INTEL_QEPCON); 264 if (!(reg & INTEL_QEPCON_FLT_EN)) { 265 pm_runtime_put(qep->dev); 266 return 0; 267 } 268 reg = INTEL_QEPFLT_MAX_COUNT(intel_qep_readl(qep, INTEL_QEPFLT)); 269 pm_runtime_put(qep->dev); 270 271 *length = (reg + 2) * INTEL_QEP_CLK_PERIOD_NS; 272 273 return 0; 274 } 275 276 static int intel_qep_spike_filter_ns_write(struct counter_device *counter, 277 struct counter_count *count, 278 u64 length) 279 { 280 struct intel_qep *qep = counter->priv; 281 u32 reg; 282 bool enable; 283 int ret = 0; 284 285 /* 286 * Spike filter length is (MAX_COUNT + 2) clock periods. 287 * Disable filter when userspace writes 0, enable for valid 288 * nanoseconds values and error out otherwise. 289 */ 290 do_div(length, INTEL_QEP_CLK_PERIOD_NS); 291 if (length == 0) { 292 enable = false; 293 length = 0; 294 } else if (length >= 2) { 295 enable = true; 296 length -= 2; 297 } else { 298 return -EINVAL; 299 } 300 301 if (length > INTEL_QEPFLT_MAX_COUNT(length)) 302 return -ERANGE; 303 304 mutex_lock(&qep->lock); 305 if (qep->enabled) { 306 ret = -EBUSY; 307 goto out; 308 } 309 310 pm_runtime_get_sync(qep->dev); 311 reg = intel_qep_readl(qep, INTEL_QEPCON); 312 if (enable) 313 reg |= INTEL_QEPCON_FLT_EN; 314 else 315 reg &= ~INTEL_QEPCON_FLT_EN; 316 intel_qep_writel(qep, INTEL_QEPFLT, length); 317 intel_qep_writel(qep, INTEL_QEPCON, reg); 318 pm_runtime_put(qep->dev); 319 320 out: 321 mutex_unlock(&qep->lock); 322 return ret; 323 } 324 325 static int intel_qep_preset_enable_read(struct counter_device *counter, 326 struct counter_count *count, 327 u8 *preset_enable) 328 { 329 struct intel_qep *qep = counter->priv; 330 u32 reg; 331 332 pm_runtime_get_sync(qep->dev); 333 reg = intel_qep_readl(qep, INTEL_QEPCON); 334 pm_runtime_put(qep->dev); 335 336 *preset_enable = !(reg & INTEL_QEPCON_COUNT_RST_MODE); 337 338 return 0; 339 } 340 341 static int intel_qep_preset_enable_write(struct counter_device *counter, 342 struct counter_count *count, u8 val) 343 { 344 struct intel_qep *qep = counter->priv; 345 u32 reg; 346 int ret = 0; 347 348 mutex_lock(&qep->lock); 349 if (qep->enabled) { 350 ret = -EBUSY; 351 goto out; 352 } 353 354 pm_runtime_get_sync(qep->dev); 355 reg = intel_qep_readl(qep, INTEL_QEPCON); 356 if (val) 357 reg &= ~INTEL_QEPCON_COUNT_RST_MODE; 358 else 359 reg |= INTEL_QEPCON_COUNT_RST_MODE; 360 361 intel_qep_writel(qep, INTEL_QEPCON, reg); 362 pm_runtime_put(qep->dev); 363 364 out: 365 mutex_unlock(&qep->lock); 366 367 return ret; 368 } 369 370 static struct counter_comp intel_qep_count_ext[] = { 371 COUNTER_COMP_ENABLE(intel_qep_enable_read, intel_qep_enable_write), 372 COUNTER_COMP_CEILING(intel_qep_ceiling_read, intel_qep_ceiling_write), 373 COUNTER_COMP_PRESET_ENABLE(intel_qep_preset_enable_read, 374 intel_qep_preset_enable_write), 375 COUNTER_COMP_COUNT_U64("spike_filter_ns", 376 intel_qep_spike_filter_ns_read, 377 intel_qep_spike_filter_ns_write), 378 }; 379 380 static struct counter_count intel_qep_counter_count[] = { 381 { 382 .id = 0, 383 .name = "Channel 1 Count", 384 .functions_list = intel_qep_count_functions, 385 .num_functions = ARRAY_SIZE(intel_qep_count_functions), 386 .synapses = intel_qep_count_synapses, 387 .num_synapses = ARRAY_SIZE(intel_qep_count_synapses), 388 .ext = intel_qep_count_ext, 389 .num_ext = ARRAY_SIZE(intel_qep_count_ext), 390 }, 391 }; 392 393 static int intel_qep_probe(struct pci_dev *pci, const struct pci_device_id *id) 394 { 395 struct intel_qep *qep; 396 struct device *dev = &pci->dev; 397 void __iomem *regs; 398 int ret; 399 400 qep = devm_kzalloc(dev, sizeof(*qep), GFP_KERNEL); 401 if (!qep) 402 return -ENOMEM; 403 404 ret = pcim_enable_device(pci); 405 if (ret) 406 return ret; 407 408 pci_set_master(pci); 409 410 ret = pcim_iomap_regions(pci, BIT(0), pci_name(pci)); 411 if (ret) 412 return ret; 413 414 regs = pcim_iomap_table(pci)[0]; 415 if (!regs) 416 return -ENOMEM; 417 418 qep->dev = dev; 419 qep->regs = regs; 420 mutex_init(&qep->lock); 421 422 intel_qep_init(qep); 423 pci_set_drvdata(pci, qep); 424 425 qep->counter.name = pci_name(pci); 426 qep->counter.parent = dev; 427 qep->counter.ops = &intel_qep_counter_ops; 428 qep->counter.counts = intel_qep_counter_count; 429 qep->counter.num_counts = ARRAY_SIZE(intel_qep_counter_count); 430 qep->counter.signals = intel_qep_signals; 431 qep->counter.num_signals = ARRAY_SIZE(intel_qep_signals); 432 qep->counter.priv = qep; 433 qep->enabled = false; 434 435 pm_runtime_put(dev); 436 pm_runtime_allow(dev); 437 438 return devm_counter_register(&pci->dev, &qep->counter); 439 } 440 441 static void intel_qep_remove(struct pci_dev *pci) 442 { 443 struct intel_qep *qep = pci_get_drvdata(pci); 444 struct device *dev = &pci->dev; 445 446 pm_runtime_forbid(dev); 447 if (!qep->enabled) 448 pm_runtime_get(dev); 449 450 intel_qep_writel(qep, INTEL_QEPCON, 0); 451 } 452 453 static int __maybe_unused intel_qep_suspend(struct device *dev) 454 { 455 struct pci_dev *pdev = to_pci_dev(dev); 456 struct intel_qep *qep = pci_get_drvdata(pdev); 457 458 qep->qepcon = intel_qep_readl(qep, INTEL_QEPCON); 459 qep->qepflt = intel_qep_readl(qep, INTEL_QEPFLT); 460 qep->qepmax = intel_qep_readl(qep, INTEL_QEPMAX); 461 462 return 0; 463 } 464 465 static int __maybe_unused intel_qep_resume(struct device *dev) 466 { 467 struct pci_dev *pdev = to_pci_dev(dev); 468 struct intel_qep *qep = pci_get_drvdata(pdev); 469 470 /* 471 * Make sure peripheral is disabled when restoring registers and 472 * control register bits that are writable only when the peripheral 473 * is disabled 474 */ 475 intel_qep_writel(qep, INTEL_QEPCON, 0); 476 intel_qep_readl(qep, INTEL_QEPCON); 477 478 intel_qep_writel(qep, INTEL_QEPFLT, qep->qepflt); 479 intel_qep_writel(qep, INTEL_QEPMAX, qep->qepmax); 480 intel_qep_writel(qep, INTEL_QEPINT_MASK, INTEL_QEPINT_MASK_ALL); 481 482 /* Restore all other control register bits except enable status */ 483 intel_qep_writel(qep, INTEL_QEPCON, qep->qepcon & ~INTEL_QEPCON_EN); 484 intel_qep_readl(qep, INTEL_QEPCON); 485 486 /* Restore enable status */ 487 intel_qep_writel(qep, INTEL_QEPCON, qep->qepcon); 488 489 return 0; 490 } 491 492 static UNIVERSAL_DEV_PM_OPS(intel_qep_pm_ops, 493 intel_qep_suspend, intel_qep_resume, NULL); 494 495 static const struct pci_device_id intel_qep_id_table[] = { 496 /* EHL */ 497 { PCI_VDEVICE(INTEL, 0x4bc3), }, 498 { PCI_VDEVICE(INTEL, 0x4b81), }, 499 { PCI_VDEVICE(INTEL, 0x4b82), }, 500 { PCI_VDEVICE(INTEL, 0x4b83), }, 501 { } /* Terminating Entry */ 502 }; 503 MODULE_DEVICE_TABLE(pci, intel_qep_id_table); 504 505 static struct pci_driver intel_qep_driver = { 506 .name = "intel-qep", 507 .id_table = intel_qep_id_table, 508 .probe = intel_qep_probe, 509 .remove = intel_qep_remove, 510 .driver = { 511 .pm = &intel_qep_pm_ops, 512 } 513 }; 514 515 module_pci_driver(intel_qep_driver); 516 517 MODULE_AUTHOR("Felipe Balbi (Intel)"); 518 MODULE_AUTHOR("Jarkko Nikula <jarkko.nikula@linux.intel.com>"); 519 MODULE_AUTHOR("Raymond Tan <raymond.tan@intel.com>"); 520 MODULE_LICENSE("GPL"); 521 MODULE_DESCRIPTION("Intel Quadrature Encoder Peripheral driver"); 522