1 /* 2 * Driver for Nuvoton Technology Corporation w83667hg/w83677hg-i CIR 3 * 4 * Copyright (C) 2010 Jarod Wilson <jarod@redhat.com> 5 * Copyright (C) 2009 Nuvoton PS Team 6 * 7 * Special thanks to Nuvoton for providing hardware, spec sheets and 8 * sample code upon which portions of this driver are based. Indirect 9 * thanks also to Maxim Levitsky, whose ene_ir driver this driver is 10 * modeled after. 11 * 12 * This program is free software; you can redistribute it and/or 13 * modify it under the terms of the GNU General Public License as 14 * published by the Free Software Foundation; either version 2 of the 15 * License, or (at your option) any later version. 16 * 17 * This program is distributed in the hope that it will be useful, but 18 * WITHOUT ANY WARRANTY; without even the implied warranty of 19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 20 * General Public License for more details. 21 */ 22 23 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 24 25 #include <linux/kernel.h> 26 #include <linux/module.h> 27 #include <linux/pnp.h> 28 #include <linux/io.h> 29 #include <linux/interrupt.h> 30 #include <linux/sched.h> 31 #include <linux/slab.h> 32 #include <media/rc-core.h> 33 #include <linux/pci_ids.h> 34 35 #include "nuvoton-cir.h" 36 37 static void nvt_clear_cir_wake_fifo(struct nvt_dev *nvt); 38 39 static const struct nvt_chip nvt_chips[] = { 40 { "w83667hg", NVT_W83667HG }, 41 { "NCT6775F", NVT_6775F }, 42 { "NCT6776F", NVT_6776F }, 43 { "NCT6779D", NVT_6779D }, 44 }; 45 46 static inline struct device *nvt_get_dev(const struct nvt_dev *nvt) 47 { 48 return nvt->rdev->dev.parent; 49 } 50 51 static inline bool is_w83667hg(struct nvt_dev *nvt) 52 { 53 return nvt->chip_ver == NVT_W83667HG; 54 } 55 56 /* write val to config reg */ 57 static inline void nvt_cr_write(struct nvt_dev *nvt, u8 val, u8 reg) 58 { 59 outb(reg, nvt->cr_efir); 60 outb(val, nvt->cr_efdr); 61 } 62 63 /* read val from config reg */ 64 static inline u8 nvt_cr_read(struct nvt_dev *nvt, u8 reg) 65 { 66 outb(reg, nvt->cr_efir); 67 return inb(nvt->cr_efdr); 68 } 69 70 /* update config register bit without changing other bits */ 71 static inline void nvt_set_reg_bit(struct nvt_dev *nvt, u8 val, u8 reg) 72 { 73 u8 tmp = nvt_cr_read(nvt, reg) | val; 74 nvt_cr_write(nvt, tmp, reg); 75 } 76 77 /* enter extended function mode */ 78 static inline int nvt_efm_enable(struct nvt_dev *nvt) 79 { 80 if (!request_muxed_region(nvt->cr_efir, 2, NVT_DRIVER_NAME)) 81 return -EBUSY; 82 83 /* Enabling Extended Function Mode explicitly requires writing 2x */ 84 outb(EFER_EFM_ENABLE, nvt->cr_efir); 85 outb(EFER_EFM_ENABLE, nvt->cr_efir); 86 87 return 0; 88 } 89 90 /* exit extended function mode */ 91 static inline void nvt_efm_disable(struct nvt_dev *nvt) 92 { 93 outb(EFER_EFM_DISABLE, nvt->cr_efir); 94 95 release_region(nvt->cr_efir, 2); 96 } 97 98 /* 99 * When you want to address a specific logical device, write its logical 100 * device number to CR_LOGICAL_DEV_SEL, then enable/disable by writing 101 * 0x1/0x0 respectively to CR_LOGICAL_DEV_EN. 102 */ 103 static inline void nvt_select_logical_dev(struct nvt_dev *nvt, u8 ldev) 104 { 105 nvt_cr_write(nvt, ldev, CR_LOGICAL_DEV_SEL); 106 } 107 108 /* select and enable logical device with setting EFM mode*/ 109 static inline void nvt_enable_logical_dev(struct nvt_dev *nvt, u8 ldev) 110 { 111 nvt_efm_enable(nvt); 112 nvt_select_logical_dev(nvt, ldev); 113 nvt_cr_write(nvt, LOGICAL_DEV_ENABLE, CR_LOGICAL_DEV_EN); 114 nvt_efm_disable(nvt); 115 } 116 117 /* select and disable logical device with setting EFM mode*/ 118 static inline void nvt_disable_logical_dev(struct nvt_dev *nvt, u8 ldev) 119 { 120 nvt_efm_enable(nvt); 121 nvt_select_logical_dev(nvt, ldev); 122 nvt_cr_write(nvt, LOGICAL_DEV_DISABLE, CR_LOGICAL_DEV_EN); 123 nvt_efm_disable(nvt); 124 } 125 126 /* write val to cir config register */ 127 static inline void nvt_cir_reg_write(struct nvt_dev *nvt, u8 val, u8 offset) 128 { 129 outb(val, nvt->cir_addr + offset); 130 } 131 132 /* read val from cir config register */ 133 static u8 nvt_cir_reg_read(struct nvt_dev *nvt, u8 offset) 134 { 135 return inb(nvt->cir_addr + offset); 136 } 137 138 /* write val to cir wake register */ 139 static inline void nvt_cir_wake_reg_write(struct nvt_dev *nvt, 140 u8 val, u8 offset) 141 { 142 outb(val, nvt->cir_wake_addr + offset); 143 } 144 145 /* read val from cir wake config register */ 146 static u8 nvt_cir_wake_reg_read(struct nvt_dev *nvt, u8 offset) 147 { 148 return inb(nvt->cir_wake_addr + offset); 149 } 150 151 /* don't override io address if one is set already */ 152 static void nvt_set_ioaddr(struct nvt_dev *nvt, unsigned long *ioaddr) 153 { 154 unsigned long old_addr; 155 156 old_addr = nvt_cr_read(nvt, CR_CIR_BASE_ADDR_HI) << 8; 157 old_addr |= nvt_cr_read(nvt, CR_CIR_BASE_ADDR_LO); 158 159 if (old_addr) 160 *ioaddr = old_addr; 161 else { 162 nvt_cr_write(nvt, *ioaddr >> 8, CR_CIR_BASE_ADDR_HI); 163 nvt_cr_write(nvt, *ioaddr & 0xff, CR_CIR_BASE_ADDR_LO); 164 } 165 } 166 167 static void nvt_write_wakeup_codes(struct rc_dev *dev, 168 const u8 *wbuf, int count) 169 { 170 u8 tolerance, config; 171 struct nvt_dev *nvt = dev->priv; 172 unsigned long flags; 173 int i; 174 175 /* hardcode the tolerance to 10% */ 176 tolerance = DIV_ROUND_UP(count, 10); 177 178 spin_lock_irqsave(&nvt->lock, flags); 179 180 nvt_clear_cir_wake_fifo(nvt); 181 nvt_cir_wake_reg_write(nvt, count, CIR_WAKE_FIFO_CMP_DEEP); 182 nvt_cir_wake_reg_write(nvt, tolerance, CIR_WAKE_FIFO_CMP_TOL); 183 184 config = nvt_cir_wake_reg_read(nvt, CIR_WAKE_IRCON); 185 186 /* enable writes to wake fifo */ 187 nvt_cir_wake_reg_write(nvt, config | CIR_WAKE_IRCON_MODE1, 188 CIR_WAKE_IRCON); 189 190 if (count) 191 pr_info("Wake samples (%d) =", count); 192 else 193 pr_info("Wake sample fifo cleared"); 194 195 for (i = 0; i < count; i++) 196 nvt_cir_wake_reg_write(nvt, wbuf[i], CIR_WAKE_WR_FIFO_DATA); 197 198 nvt_cir_wake_reg_write(nvt, config, CIR_WAKE_IRCON); 199 200 spin_unlock_irqrestore(&nvt->lock, flags); 201 } 202 203 static ssize_t wakeup_data_show(struct device *dev, 204 struct device_attribute *attr, 205 char *buf) 206 { 207 struct rc_dev *rc_dev = to_rc_dev(dev); 208 struct nvt_dev *nvt = rc_dev->priv; 209 int fifo_len, duration; 210 unsigned long flags; 211 ssize_t buf_len = 0; 212 int i; 213 214 spin_lock_irqsave(&nvt->lock, flags); 215 216 fifo_len = nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFO_COUNT); 217 fifo_len = min(fifo_len, WAKEUP_MAX_SIZE); 218 219 /* go to first element to be read */ 220 while (nvt_cir_wake_reg_read(nvt, CIR_WAKE_RD_FIFO_ONLY_IDX)) 221 nvt_cir_wake_reg_read(nvt, CIR_WAKE_RD_FIFO_ONLY); 222 223 for (i = 0; i < fifo_len; i++) { 224 duration = nvt_cir_wake_reg_read(nvt, CIR_WAKE_RD_FIFO_ONLY); 225 duration = (duration & BUF_LEN_MASK) * SAMPLE_PERIOD; 226 buf_len += scnprintf(buf + buf_len, PAGE_SIZE - buf_len, 227 "%d ", duration); 228 } 229 buf_len += scnprintf(buf + buf_len, PAGE_SIZE - buf_len, "\n"); 230 231 spin_unlock_irqrestore(&nvt->lock, flags); 232 233 return buf_len; 234 } 235 236 static ssize_t wakeup_data_store(struct device *dev, 237 struct device_attribute *attr, 238 const char *buf, size_t len) 239 { 240 struct rc_dev *rc_dev = to_rc_dev(dev); 241 u8 wake_buf[WAKEUP_MAX_SIZE]; 242 char **argv; 243 int i, count; 244 unsigned int val; 245 ssize_t ret; 246 247 argv = argv_split(GFP_KERNEL, buf, &count); 248 if (!argv) 249 return -ENOMEM; 250 if (!count || count > WAKEUP_MAX_SIZE) { 251 ret = -EINVAL; 252 goto out; 253 } 254 255 for (i = 0; i < count; i++) { 256 ret = kstrtouint(argv[i], 10, &val); 257 if (ret) 258 goto out; 259 val = DIV_ROUND_CLOSEST(val, SAMPLE_PERIOD); 260 if (!val || val > 0x7f) { 261 ret = -EINVAL; 262 goto out; 263 } 264 wake_buf[i] = val; 265 /* sequence must start with a pulse */ 266 if (i % 2 == 0) 267 wake_buf[i] |= BUF_PULSE_BIT; 268 } 269 270 nvt_write_wakeup_codes(rc_dev, wake_buf, count); 271 272 ret = len; 273 out: 274 argv_free(argv); 275 return ret; 276 } 277 static DEVICE_ATTR_RW(wakeup_data); 278 279 /* dump current cir register contents */ 280 static void cir_dump_regs(struct nvt_dev *nvt) 281 { 282 nvt_efm_enable(nvt); 283 nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR); 284 285 pr_info("%s: Dump CIR logical device registers:\n", NVT_DRIVER_NAME); 286 pr_info(" * CR CIR ACTIVE : 0x%x\n", 287 nvt_cr_read(nvt, CR_LOGICAL_DEV_EN)); 288 pr_info(" * CR CIR BASE ADDR: 0x%x\n", 289 (nvt_cr_read(nvt, CR_CIR_BASE_ADDR_HI) << 8) | 290 nvt_cr_read(nvt, CR_CIR_BASE_ADDR_LO)); 291 pr_info(" * CR CIR IRQ NUM: 0x%x\n", 292 nvt_cr_read(nvt, CR_CIR_IRQ_RSRC)); 293 294 nvt_efm_disable(nvt); 295 296 pr_info("%s: Dump CIR registers:\n", NVT_DRIVER_NAME); 297 pr_info(" * IRCON: 0x%x\n", nvt_cir_reg_read(nvt, CIR_IRCON)); 298 pr_info(" * IRSTS: 0x%x\n", nvt_cir_reg_read(nvt, CIR_IRSTS)); 299 pr_info(" * IREN: 0x%x\n", nvt_cir_reg_read(nvt, CIR_IREN)); 300 pr_info(" * RXFCONT: 0x%x\n", nvt_cir_reg_read(nvt, CIR_RXFCONT)); 301 pr_info(" * CP: 0x%x\n", nvt_cir_reg_read(nvt, CIR_CP)); 302 pr_info(" * CC: 0x%x\n", nvt_cir_reg_read(nvt, CIR_CC)); 303 pr_info(" * SLCH: 0x%x\n", nvt_cir_reg_read(nvt, CIR_SLCH)); 304 pr_info(" * SLCL: 0x%x\n", nvt_cir_reg_read(nvt, CIR_SLCL)); 305 pr_info(" * FIFOCON: 0x%x\n", nvt_cir_reg_read(nvt, CIR_FIFOCON)); 306 pr_info(" * IRFIFOSTS: 0x%x\n", nvt_cir_reg_read(nvt, CIR_IRFIFOSTS)); 307 pr_info(" * SRXFIFO: 0x%x\n", nvt_cir_reg_read(nvt, CIR_SRXFIFO)); 308 pr_info(" * TXFCONT: 0x%x\n", nvt_cir_reg_read(nvt, CIR_TXFCONT)); 309 pr_info(" * STXFIFO: 0x%x\n", nvt_cir_reg_read(nvt, CIR_STXFIFO)); 310 pr_info(" * FCCH: 0x%x\n", nvt_cir_reg_read(nvt, CIR_FCCH)); 311 pr_info(" * FCCL: 0x%x\n", nvt_cir_reg_read(nvt, CIR_FCCL)); 312 pr_info(" * IRFSM: 0x%x\n", nvt_cir_reg_read(nvt, CIR_IRFSM)); 313 } 314 315 /* dump current cir wake register contents */ 316 static void cir_wake_dump_regs(struct nvt_dev *nvt) 317 { 318 u8 i, fifo_len; 319 320 nvt_efm_enable(nvt); 321 nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR_WAKE); 322 323 pr_info("%s: Dump CIR WAKE logical device registers:\n", 324 NVT_DRIVER_NAME); 325 pr_info(" * CR CIR WAKE ACTIVE : 0x%x\n", 326 nvt_cr_read(nvt, CR_LOGICAL_DEV_EN)); 327 pr_info(" * CR CIR WAKE BASE ADDR: 0x%x\n", 328 (nvt_cr_read(nvt, CR_CIR_BASE_ADDR_HI) << 8) | 329 nvt_cr_read(nvt, CR_CIR_BASE_ADDR_LO)); 330 pr_info(" * CR CIR WAKE IRQ NUM: 0x%x\n", 331 nvt_cr_read(nvt, CR_CIR_IRQ_RSRC)); 332 333 nvt_efm_disable(nvt); 334 335 pr_info("%s: Dump CIR WAKE registers\n", NVT_DRIVER_NAME); 336 pr_info(" * IRCON: 0x%x\n", 337 nvt_cir_wake_reg_read(nvt, CIR_WAKE_IRCON)); 338 pr_info(" * IRSTS: 0x%x\n", 339 nvt_cir_wake_reg_read(nvt, CIR_WAKE_IRSTS)); 340 pr_info(" * IREN: 0x%x\n", 341 nvt_cir_wake_reg_read(nvt, CIR_WAKE_IREN)); 342 pr_info(" * FIFO CMP DEEP: 0x%x\n", 343 nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFO_CMP_DEEP)); 344 pr_info(" * FIFO CMP TOL: 0x%x\n", 345 nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFO_CMP_TOL)); 346 pr_info(" * FIFO COUNT: 0x%x\n", 347 nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFO_COUNT)); 348 pr_info(" * SLCH: 0x%x\n", 349 nvt_cir_wake_reg_read(nvt, CIR_WAKE_SLCH)); 350 pr_info(" * SLCL: 0x%x\n", 351 nvt_cir_wake_reg_read(nvt, CIR_WAKE_SLCL)); 352 pr_info(" * FIFOCON: 0x%x\n", 353 nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFOCON)); 354 pr_info(" * SRXFSTS: 0x%x\n", 355 nvt_cir_wake_reg_read(nvt, CIR_WAKE_SRXFSTS)); 356 pr_info(" * SAMPLE RX FIFO: 0x%x\n", 357 nvt_cir_wake_reg_read(nvt, CIR_WAKE_SAMPLE_RX_FIFO)); 358 pr_info(" * WR FIFO DATA: 0x%x\n", 359 nvt_cir_wake_reg_read(nvt, CIR_WAKE_WR_FIFO_DATA)); 360 pr_info(" * RD FIFO ONLY: 0x%x\n", 361 nvt_cir_wake_reg_read(nvt, CIR_WAKE_RD_FIFO_ONLY)); 362 pr_info(" * RD FIFO ONLY IDX: 0x%x\n", 363 nvt_cir_wake_reg_read(nvt, CIR_WAKE_RD_FIFO_ONLY_IDX)); 364 pr_info(" * FIFO IGNORE: 0x%x\n", 365 nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFO_IGNORE)); 366 pr_info(" * IRFSM: 0x%x\n", 367 nvt_cir_wake_reg_read(nvt, CIR_WAKE_IRFSM)); 368 369 fifo_len = nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFO_COUNT); 370 pr_info("%s: Dump CIR WAKE FIFO (len %d)\n", NVT_DRIVER_NAME, fifo_len); 371 pr_info("* Contents ="); 372 for (i = 0; i < fifo_len; i++) 373 pr_cont(" %02x", 374 nvt_cir_wake_reg_read(nvt, CIR_WAKE_RD_FIFO_ONLY)); 375 pr_cont("\n"); 376 } 377 378 static inline const char *nvt_find_chip(struct nvt_dev *nvt, int id) 379 { 380 int i; 381 382 for (i = 0; i < ARRAY_SIZE(nvt_chips); i++) 383 if ((id & SIO_ID_MASK) == nvt_chips[i].chip_ver) { 384 nvt->chip_ver = nvt_chips[i].chip_ver; 385 return nvt_chips[i].name; 386 } 387 388 return NULL; 389 } 390 391 392 /* detect hardware features */ 393 static int nvt_hw_detect(struct nvt_dev *nvt) 394 { 395 struct device *dev = nvt_get_dev(nvt); 396 const char *chip_name; 397 int chip_id; 398 399 nvt_efm_enable(nvt); 400 401 /* Check if we're wired for the alternate EFER setup */ 402 nvt->chip_major = nvt_cr_read(nvt, CR_CHIP_ID_HI); 403 if (nvt->chip_major == 0xff) { 404 nvt_efm_disable(nvt); 405 nvt->cr_efir = CR_EFIR2; 406 nvt->cr_efdr = CR_EFDR2; 407 nvt_efm_enable(nvt); 408 nvt->chip_major = nvt_cr_read(nvt, CR_CHIP_ID_HI); 409 } 410 nvt->chip_minor = nvt_cr_read(nvt, CR_CHIP_ID_LO); 411 412 nvt_efm_disable(nvt); 413 414 chip_id = nvt->chip_major << 8 | nvt->chip_minor; 415 if (chip_id == NVT_INVALID) { 416 dev_err(dev, "No device found on either EFM port\n"); 417 return -ENODEV; 418 } 419 420 chip_name = nvt_find_chip(nvt, chip_id); 421 422 /* warn, but still let the driver load, if we don't know this chip */ 423 if (!chip_name) 424 dev_warn(dev, 425 "unknown chip, id: 0x%02x 0x%02x, it may not work...", 426 nvt->chip_major, nvt->chip_minor); 427 else 428 dev_info(dev, "found %s or compatible: chip id: 0x%02x 0x%02x", 429 chip_name, nvt->chip_major, nvt->chip_minor); 430 431 return 0; 432 } 433 434 static void nvt_cir_ldev_init(struct nvt_dev *nvt) 435 { 436 u8 val, psreg, psmask, psval; 437 438 if (is_w83667hg(nvt)) { 439 psreg = CR_MULTIFUNC_PIN_SEL; 440 psmask = MULTIFUNC_PIN_SEL_MASK; 441 psval = MULTIFUNC_ENABLE_CIR | MULTIFUNC_ENABLE_CIRWB; 442 } else { 443 psreg = CR_OUTPUT_PIN_SEL; 444 psmask = OUTPUT_PIN_SEL_MASK; 445 psval = OUTPUT_ENABLE_CIR | OUTPUT_ENABLE_CIRWB; 446 } 447 448 /* output pin selection: enable CIR, with WB sensor enabled */ 449 val = nvt_cr_read(nvt, psreg); 450 val &= psmask; 451 val |= psval; 452 nvt_cr_write(nvt, val, psreg); 453 454 /* Select CIR logical device */ 455 nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR); 456 457 nvt_set_ioaddr(nvt, &nvt->cir_addr); 458 459 nvt_cr_write(nvt, nvt->cir_irq, CR_CIR_IRQ_RSRC); 460 461 nvt_dbg("CIR initialized, base io port address: 0x%lx, irq: %d", 462 nvt->cir_addr, nvt->cir_irq); 463 } 464 465 static void nvt_cir_wake_ldev_init(struct nvt_dev *nvt) 466 { 467 /* Select ACPI logical device and anable it */ 468 nvt_select_logical_dev(nvt, LOGICAL_DEV_ACPI); 469 nvt_cr_write(nvt, LOGICAL_DEV_ENABLE, CR_LOGICAL_DEV_EN); 470 471 /* Enable CIR Wake via PSOUT# (Pin60) */ 472 nvt_set_reg_bit(nvt, CIR_WAKE_ENABLE_BIT, CR_ACPI_CIR_WAKE); 473 474 /* enable pme interrupt of cir wakeup event */ 475 nvt_set_reg_bit(nvt, PME_INTR_CIR_PASS_BIT, CR_ACPI_IRQ_EVENTS2); 476 477 /* Select CIR Wake logical device */ 478 nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR_WAKE); 479 480 nvt_set_ioaddr(nvt, &nvt->cir_wake_addr); 481 482 nvt_dbg("CIR Wake initialized, base io port address: 0x%lx", 483 nvt->cir_wake_addr); 484 } 485 486 /* clear out the hardware's cir rx fifo */ 487 static void nvt_clear_cir_fifo(struct nvt_dev *nvt) 488 { 489 u8 val = nvt_cir_reg_read(nvt, CIR_FIFOCON); 490 nvt_cir_reg_write(nvt, val | CIR_FIFOCON_RXFIFOCLR, CIR_FIFOCON); 491 } 492 493 /* clear out the hardware's cir wake rx fifo */ 494 static void nvt_clear_cir_wake_fifo(struct nvt_dev *nvt) 495 { 496 u8 val, config; 497 498 config = nvt_cir_wake_reg_read(nvt, CIR_WAKE_IRCON); 499 500 /* clearing wake fifo works in learning mode only */ 501 nvt_cir_wake_reg_write(nvt, config & ~CIR_WAKE_IRCON_MODE0, 502 CIR_WAKE_IRCON); 503 504 val = nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFOCON); 505 nvt_cir_wake_reg_write(nvt, val | CIR_WAKE_FIFOCON_RXFIFOCLR, 506 CIR_WAKE_FIFOCON); 507 508 nvt_cir_wake_reg_write(nvt, config, CIR_WAKE_IRCON); 509 } 510 511 /* clear out the hardware's cir tx fifo */ 512 static void nvt_clear_tx_fifo(struct nvt_dev *nvt) 513 { 514 u8 val; 515 516 val = nvt_cir_reg_read(nvt, CIR_FIFOCON); 517 nvt_cir_reg_write(nvt, val | CIR_FIFOCON_TXFIFOCLR, CIR_FIFOCON); 518 } 519 520 /* enable RX Trigger Level Reach and Packet End interrupts */ 521 static void nvt_set_cir_iren(struct nvt_dev *nvt) 522 { 523 u8 iren; 524 525 iren = CIR_IREN_RTR | CIR_IREN_PE | CIR_IREN_RFO; 526 nvt_cir_reg_write(nvt, iren, CIR_IREN); 527 } 528 529 static void nvt_cir_regs_init(struct nvt_dev *nvt) 530 { 531 nvt_enable_logical_dev(nvt, LOGICAL_DEV_CIR); 532 533 /* set sample limit count (PE interrupt raised when reached) */ 534 nvt_cir_reg_write(nvt, CIR_RX_LIMIT_COUNT >> 8, CIR_SLCH); 535 nvt_cir_reg_write(nvt, CIR_RX_LIMIT_COUNT & 0xff, CIR_SLCL); 536 537 /* set fifo irq trigger levels */ 538 nvt_cir_reg_write(nvt, CIR_FIFOCON_TX_TRIGGER_LEV | 539 CIR_FIFOCON_RX_TRIGGER_LEV, CIR_FIFOCON); 540 541 /* clear hardware rx and tx fifos */ 542 nvt_clear_cir_fifo(nvt); 543 nvt_clear_tx_fifo(nvt); 544 545 nvt_disable_logical_dev(nvt, LOGICAL_DEV_CIR); 546 } 547 548 static void nvt_cir_wake_regs_init(struct nvt_dev *nvt) 549 { 550 nvt_enable_logical_dev(nvt, LOGICAL_DEV_CIR_WAKE); 551 552 /* 553 * Disable RX, set specific carrier on = low, off = high, 554 * and sample period (currently 50us) 555 */ 556 nvt_cir_wake_reg_write(nvt, CIR_WAKE_IRCON_MODE0 | 557 CIR_WAKE_IRCON_R | CIR_WAKE_IRCON_RXINV | 558 CIR_WAKE_IRCON_SAMPLE_PERIOD_SEL, 559 CIR_WAKE_IRCON); 560 561 /* clear any and all stray interrupts */ 562 nvt_cir_wake_reg_write(nvt, 0xff, CIR_WAKE_IRSTS); 563 } 564 565 static void nvt_enable_wake(struct nvt_dev *nvt) 566 { 567 unsigned long flags; 568 569 nvt_efm_enable(nvt); 570 571 nvt_select_logical_dev(nvt, LOGICAL_DEV_ACPI); 572 nvt_set_reg_bit(nvt, CIR_WAKE_ENABLE_BIT, CR_ACPI_CIR_WAKE); 573 nvt_set_reg_bit(nvt, PME_INTR_CIR_PASS_BIT, CR_ACPI_IRQ_EVENTS2); 574 575 nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR_WAKE); 576 nvt_cr_write(nvt, LOGICAL_DEV_ENABLE, CR_LOGICAL_DEV_EN); 577 578 nvt_efm_disable(nvt); 579 580 spin_lock_irqsave(&nvt->lock, flags); 581 582 nvt_cir_wake_reg_write(nvt, CIR_WAKE_IRCON_MODE0 | CIR_WAKE_IRCON_RXEN | 583 CIR_WAKE_IRCON_R | CIR_WAKE_IRCON_RXINV | 584 CIR_WAKE_IRCON_SAMPLE_PERIOD_SEL, 585 CIR_WAKE_IRCON); 586 nvt_cir_wake_reg_write(nvt, 0xff, CIR_WAKE_IRSTS); 587 nvt_cir_wake_reg_write(nvt, 0, CIR_WAKE_IREN); 588 589 spin_unlock_irqrestore(&nvt->lock, flags); 590 } 591 592 #if 0 /* Currently unused */ 593 /* rx carrier detect only works in learning mode, must be called w/lock */ 594 static u32 nvt_rx_carrier_detect(struct nvt_dev *nvt) 595 { 596 u32 count, carrier, duration = 0; 597 int i; 598 599 count = nvt_cir_reg_read(nvt, CIR_FCCL) | 600 nvt_cir_reg_read(nvt, CIR_FCCH) << 8; 601 602 for (i = 0; i < nvt->pkts; i++) { 603 if (nvt->buf[i] & BUF_PULSE_BIT) 604 duration += nvt->buf[i] & BUF_LEN_MASK; 605 } 606 607 duration *= SAMPLE_PERIOD; 608 609 if (!count || !duration) { 610 dev_notice(nvt_get_dev(nvt), 611 "Unable to determine carrier! (c:%u, d:%u)", 612 count, duration); 613 return 0; 614 } 615 616 carrier = MS_TO_NS(count) / duration; 617 618 if ((carrier > MAX_CARRIER) || (carrier < MIN_CARRIER)) 619 nvt_dbg("WTF? Carrier frequency out of range!"); 620 621 nvt_dbg("Carrier frequency: %u (count %u, duration %u)", 622 carrier, count, duration); 623 624 return carrier; 625 } 626 #endif 627 628 static int nvt_ir_raw_set_wakeup_filter(struct rc_dev *dev, 629 struct rc_scancode_filter *sc_filter) 630 { 631 u8 buf_val; 632 int i, ret, count; 633 unsigned int val; 634 struct ir_raw_event *raw; 635 u8 wake_buf[WAKEUP_MAX_SIZE]; 636 bool complete; 637 638 /* Require mask to be set */ 639 if (!sc_filter->mask) 640 return 0; 641 642 raw = kmalloc_array(WAKEUP_MAX_SIZE, sizeof(*raw), GFP_KERNEL); 643 if (!raw) 644 return -ENOMEM; 645 646 ret = ir_raw_encode_scancode(dev->wakeup_protocol, sc_filter->data, 647 raw, WAKEUP_MAX_SIZE); 648 complete = (ret != -ENOBUFS); 649 if (!complete) 650 ret = WAKEUP_MAX_SIZE; 651 else if (ret < 0) 652 goto out_raw; 653 654 /* Inspect the ir samples */ 655 for (i = 0, count = 0; i < ret && count < WAKEUP_MAX_SIZE; ++i) { 656 /* NS to US */ 657 val = DIV_ROUND_UP(raw[i].duration, 1000L) / SAMPLE_PERIOD; 658 659 /* Split too large values into several smaller ones */ 660 while (val > 0 && count < WAKEUP_MAX_SIZE) { 661 /* Skip last value for better comparison tolerance */ 662 if (complete && i == ret - 1 && val < BUF_LEN_MASK) 663 break; 664 665 /* Clamp values to BUF_LEN_MASK at most */ 666 buf_val = (val > BUF_LEN_MASK) ? BUF_LEN_MASK : val; 667 668 wake_buf[count] = buf_val; 669 val -= buf_val; 670 if ((raw[i]).pulse) 671 wake_buf[count] |= BUF_PULSE_BIT; 672 count++; 673 } 674 } 675 676 nvt_write_wakeup_codes(dev, wake_buf, count); 677 ret = 0; 678 out_raw: 679 kfree(raw); 680 681 return ret; 682 } 683 684 /* dump contents of the last rx buffer we got from the hw rx fifo */ 685 static void nvt_dump_rx_buf(struct nvt_dev *nvt) 686 { 687 int i; 688 689 printk(KERN_DEBUG "%s (len %d): ", __func__, nvt->pkts); 690 for (i = 0; (i < nvt->pkts) && (i < RX_BUF_LEN); i++) 691 printk(KERN_CONT "0x%02x ", nvt->buf[i]); 692 printk(KERN_CONT "\n"); 693 } 694 695 /* 696 * Process raw data in rx driver buffer, store it in raw IR event kfifo, 697 * trigger decode when appropriate. 698 * 699 * We get IR data samples one byte at a time. If the msb is set, its a pulse, 700 * otherwise its a space. The lower 7 bits are the count of SAMPLE_PERIOD 701 * (default 50us) intervals for that pulse/space. A discrete signal is 702 * followed by a series of 0x7f packets, then either 0x7<something> or 0x80 703 * to signal more IR coming (repeats) or end of IR, respectively. We store 704 * sample data in the raw event kfifo until we see 0x7<something> (except f) 705 * or 0x80, at which time, we trigger a decode operation. 706 */ 707 static void nvt_process_rx_ir_data(struct nvt_dev *nvt) 708 { 709 struct ir_raw_event rawir = {}; 710 u8 sample; 711 int i; 712 713 nvt_dbg_verbose("%s firing", __func__); 714 715 if (debug) 716 nvt_dump_rx_buf(nvt); 717 718 nvt_dbg_verbose("Processing buffer of len %d", nvt->pkts); 719 720 for (i = 0; i < nvt->pkts; i++) { 721 sample = nvt->buf[i]; 722 723 rawir.pulse = ((sample & BUF_PULSE_BIT) != 0); 724 rawir.duration = US_TO_NS((sample & BUF_LEN_MASK) 725 * SAMPLE_PERIOD); 726 727 nvt_dbg("Storing %s with duration %d", 728 rawir.pulse ? "pulse" : "space", rawir.duration); 729 730 ir_raw_event_store_with_filter(nvt->rdev, &rawir); 731 } 732 733 nvt->pkts = 0; 734 735 nvt_dbg("Calling ir_raw_event_handle\n"); 736 ir_raw_event_handle(nvt->rdev); 737 738 nvt_dbg_verbose("%s done", __func__); 739 } 740 741 static void nvt_handle_rx_fifo_overrun(struct nvt_dev *nvt) 742 { 743 dev_warn(nvt_get_dev(nvt), "RX FIFO overrun detected, flushing data!"); 744 745 nvt->pkts = 0; 746 nvt_clear_cir_fifo(nvt); 747 ir_raw_event_reset(nvt->rdev); 748 } 749 750 /* copy data from hardware rx fifo into driver buffer */ 751 static void nvt_get_rx_ir_data(struct nvt_dev *nvt) 752 { 753 u8 fifocount; 754 int i; 755 756 /* Get count of how many bytes to read from RX FIFO */ 757 fifocount = nvt_cir_reg_read(nvt, CIR_RXFCONT); 758 759 nvt_dbg("attempting to fetch %u bytes from hw rx fifo", fifocount); 760 761 /* Read fifocount bytes from CIR Sample RX FIFO register */ 762 for (i = 0; i < fifocount; i++) 763 nvt->buf[i] = nvt_cir_reg_read(nvt, CIR_SRXFIFO); 764 765 nvt->pkts = fifocount; 766 nvt_dbg("%s: pkts now %d", __func__, nvt->pkts); 767 768 nvt_process_rx_ir_data(nvt); 769 } 770 771 static void nvt_cir_log_irqs(u8 status, u8 iren) 772 { 773 nvt_dbg("IRQ 0x%02x (IREN 0x%02x) :%s%s%s%s%s%s%s%s%s", 774 status, iren, 775 status & CIR_IRSTS_RDR ? " RDR" : "", 776 status & CIR_IRSTS_RTR ? " RTR" : "", 777 status & CIR_IRSTS_PE ? " PE" : "", 778 status & CIR_IRSTS_RFO ? " RFO" : "", 779 status & CIR_IRSTS_TE ? " TE" : "", 780 status & CIR_IRSTS_TTR ? " TTR" : "", 781 status & CIR_IRSTS_TFU ? " TFU" : "", 782 status & CIR_IRSTS_GH ? " GH" : "", 783 status & ~(CIR_IRSTS_RDR | CIR_IRSTS_RTR | CIR_IRSTS_PE | 784 CIR_IRSTS_RFO | CIR_IRSTS_TE | CIR_IRSTS_TTR | 785 CIR_IRSTS_TFU | CIR_IRSTS_GH) ? " ?" : ""); 786 } 787 788 /* interrupt service routine for incoming and outgoing CIR data */ 789 static irqreturn_t nvt_cir_isr(int irq, void *data) 790 { 791 struct nvt_dev *nvt = data; 792 u8 status, iren; 793 794 nvt_dbg_verbose("%s firing", __func__); 795 796 spin_lock(&nvt->lock); 797 798 /* 799 * Get IR Status register contents. Write 1 to ack/clear 800 * 801 * bit: reg name - description 802 * 7: CIR_IRSTS_RDR - RX Data Ready 803 * 6: CIR_IRSTS_RTR - RX FIFO Trigger Level Reach 804 * 5: CIR_IRSTS_PE - Packet End 805 * 4: CIR_IRSTS_RFO - RX FIFO Overrun (RDR will also be set) 806 * 3: CIR_IRSTS_TE - TX FIFO Empty 807 * 2: CIR_IRSTS_TTR - TX FIFO Trigger Level Reach 808 * 1: CIR_IRSTS_TFU - TX FIFO Underrun 809 * 0: CIR_IRSTS_GH - Min Length Detected 810 */ 811 status = nvt_cir_reg_read(nvt, CIR_IRSTS); 812 iren = nvt_cir_reg_read(nvt, CIR_IREN); 813 814 /* At least NCT6779D creates a spurious interrupt when the 815 * logical device is being disabled. 816 */ 817 if (status == 0xff && iren == 0xff) { 818 spin_unlock(&nvt->lock); 819 nvt_dbg_verbose("Spurious interrupt detected"); 820 return IRQ_HANDLED; 821 } 822 823 /* IRQ may be shared with CIR WAKE, therefore check for each 824 * status bit whether the related interrupt source is enabled 825 */ 826 if (!(status & iren)) { 827 spin_unlock(&nvt->lock); 828 nvt_dbg_verbose("%s exiting, IRSTS 0x0", __func__); 829 return IRQ_NONE; 830 } 831 832 /* ack/clear all irq flags we've got */ 833 nvt_cir_reg_write(nvt, status, CIR_IRSTS); 834 nvt_cir_reg_write(nvt, 0, CIR_IRSTS); 835 836 nvt_cir_log_irqs(status, iren); 837 838 if (status & CIR_IRSTS_RFO) 839 nvt_handle_rx_fifo_overrun(nvt); 840 else if (status & (CIR_IRSTS_RTR | CIR_IRSTS_PE)) 841 nvt_get_rx_ir_data(nvt); 842 843 spin_unlock(&nvt->lock); 844 845 nvt_dbg_verbose("%s done", __func__); 846 return IRQ_HANDLED; 847 } 848 849 static void nvt_enable_cir(struct nvt_dev *nvt) 850 { 851 unsigned long flags; 852 853 /* enable the CIR logical device */ 854 nvt_enable_logical_dev(nvt, LOGICAL_DEV_CIR); 855 856 spin_lock_irqsave(&nvt->lock, flags); 857 858 /* 859 * Enable TX and RX, specify carrier on = low, off = high, and set 860 * sample period (currently 50us) 861 */ 862 nvt_cir_reg_write(nvt, CIR_IRCON_TXEN | CIR_IRCON_RXEN | 863 CIR_IRCON_RXINV | CIR_IRCON_SAMPLE_PERIOD_SEL, 864 CIR_IRCON); 865 866 /* clear all pending interrupts */ 867 nvt_cir_reg_write(nvt, 0xff, CIR_IRSTS); 868 869 /* enable interrupts */ 870 nvt_set_cir_iren(nvt); 871 872 spin_unlock_irqrestore(&nvt->lock, flags); 873 } 874 875 static void nvt_disable_cir(struct nvt_dev *nvt) 876 { 877 unsigned long flags; 878 879 spin_lock_irqsave(&nvt->lock, flags); 880 881 /* disable CIR interrupts */ 882 nvt_cir_reg_write(nvt, 0, CIR_IREN); 883 884 /* clear any and all pending interrupts */ 885 nvt_cir_reg_write(nvt, 0xff, CIR_IRSTS); 886 887 /* clear all function enable flags */ 888 nvt_cir_reg_write(nvt, 0, CIR_IRCON); 889 890 /* clear hardware rx and tx fifos */ 891 nvt_clear_cir_fifo(nvt); 892 nvt_clear_tx_fifo(nvt); 893 894 spin_unlock_irqrestore(&nvt->lock, flags); 895 896 /* disable the CIR logical device */ 897 nvt_disable_logical_dev(nvt, LOGICAL_DEV_CIR); 898 } 899 900 static int nvt_open(struct rc_dev *dev) 901 { 902 struct nvt_dev *nvt = dev->priv; 903 904 nvt_enable_cir(nvt); 905 906 return 0; 907 } 908 909 static void nvt_close(struct rc_dev *dev) 910 { 911 struct nvt_dev *nvt = dev->priv; 912 913 nvt_disable_cir(nvt); 914 } 915 916 /* Allocate memory, probe hardware, and initialize everything */ 917 static int nvt_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id) 918 { 919 struct nvt_dev *nvt; 920 struct rc_dev *rdev; 921 int ret; 922 923 nvt = devm_kzalloc(&pdev->dev, sizeof(struct nvt_dev), GFP_KERNEL); 924 if (!nvt) 925 return -ENOMEM; 926 927 /* input device for IR remote */ 928 nvt->rdev = devm_rc_allocate_device(&pdev->dev, RC_DRIVER_IR_RAW); 929 if (!nvt->rdev) 930 return -ENOMEM; 931 rdev = nvt->rdev; 932 933 /* activate pnp device */ 934 ret = pnp_activate_dev(pdev); 935 if (ret) { 936 dev_err(&pdev->dev, "Could not activate PNP device!\n"); 937 return ret; 938 } 939 940 /* validate pnp resources */ 941 if (!pnp_port_valid(pdev, 0) || 942 pnp_port_len(pdev, 0) < CIR_IOREG_LENGTH) { 943 dev_err(&pdev->dev, "IR PNP Port not valid!\n"); 944 return -EINVAL; 945 } 946 947 if (!pnp_irq_valid(pdev, 0)) { 948 dev_err(&pdev->dev, "PNP IRQ not valid!\n"); 949 return -EINVAL; 950 } 951 952 if (!pnp_port_valid(pdev, 1) || 953 pnp_port_len(pdev, 1) < CIR_IOREG_LENGTH) { 954 dev_err(&pdev->dev, "Wake PNP Port not valid!\n"); 955 return -EINVAL; 956 } 957 958 nvt->cir_addr = pnp_port_start(pdev, 0); 959 nvt->cir_irq = pnp_irq(pdev, 0); 960 961 nvt->cir_wake_addr = pnp_port_start(pdev, 1); 962 963 nvt->cr_efir = CR_EFIR; 964 nvt->cr_efdr = CR_EFDR; 965 966 spin_lock_init(&nvt->lock); 967 968 pnp_set_drvdata(pdev, nvt); 969 970 ret = nvt_hw_detect(nvt); 971 if (ret) 972 return ret; 973 974 /* Initialize CIR & CIR Wake Logical Devices */ 975 nvt_efm_enable(nvt); 976 nvt_cir_ldev_init(nvt); 977 nvt_cir_wake_ldev_init(nvt); 978 nvt_efm_disable(nvt); 979 980 /* 981 * Initialize CIR & CIR Wake Config Registers 982 * and enable logical devices 983 */ 984 nvt_cir_regs_init(nvt); 985 nvt_cir_wake_regs_init(nvt); 986 987 /* Set up the rc device */ 988 rdev->priv = nvt; 989 rdev->allowed_protocols = RC_PROTO_BIT_ALL_IR_DECODER; 990 rdev->allowed_wakeup_protocols = RC_PROTO_BIT_ALL_IR_ENCODER; 991 rdev->encode_wakeup = true; 992 rdev->open = nvt_open; 993 rdev->close = nvt_close; 994 rdev->s_wakeup_filter = nvt_ir_raw_set_wakeup_filter; 995 rdev->device_name = "Nuvoton w836x7hg Infrared Remote Transceiver"; 996 rdev->input_phys = "nuvoton/cir0"; 997 rdev->input_id.bustype = BUS_HOST; 998 rdev->input_id.vendor = PCI_VENDOR_ID_WINBOND2; 999 rdev->input_id.product = nvt->chip_major; 1000 rdev->input_id.version = nvt->chip_minor; 1001 rdev->driver_name = NVT_DRIVER_NAME; 1002 rdev->map_name = RC_MAP_RC6_MCE; 1003 rdev->timeout = MS_TO_NS(100); 1004 /* rx resolution is hardwired to 50us atm, 1, 25, 100 also possible */ 1005 rdev->rx_resolution = US_TO_NS(CIR_SAMPLE_PERIOD); 1006 #if 0 1007 rdev->min_timeout = XYZ; 1008 rdev->max_timeout = XYZ; 1009 #endif 1010 ret = devm_rc_register_device(&pdev->dev, rdev); 1011 if (ret) 1012 return ret; 1013 1014 /* now claim resources */ 1015 if (!devm_request_region(&pdev->dev, nvt->cir_addr, 1016 CIR_IOREG_LENGTH, NVT_DRIVER_NAME)) 1017 return -EBUSY; 1018 1019 ret = devm_request_irq(&pdev->dev, nvt->cir_irq, nvt_cir_isr, 1020 IRQF_SHARED, NVT_DRIVER_NAME, nvt); 1021 if (ret) 1022 return ret; 1023 1024 if (!devm_request_region(&pdev->dev, nvt->cir_wake_addr, 1025 CIR_IOREG_LENGTH, NVT_DRIVER_NAME "-wake")) 1026 return -EBUSY; 1027 1028 ret = device_create_file(&rdev->dev, &dev_attr_wakeup_data); 1029 if (ret) 1030 return ret; 1031 1032 device_init_wakeup(&pdev->dev, true); 1033 1034 dev_notice(&pdev->dev, "driver has been successfully loaded\n"); 1035 if (debug) { 1036 cir_dump_regs(nvt); 1037 cir_wake_dump_regs(nvt); 1038 } 1039 1040 return 0; 1041 } 1042 1043 static void nvt_remove(struct pnp_dev *pdev) 1044 { 1045 struct nvt_dev *nvt = pnp_get_drvdata(pdev); 1046 1047 device_remove_file(&nvt->rdev->dev, &dev_attr_wakeup_data); 1048 1049 nvt_disable_cir(nvt); 1050 1051 /* enable CIR Wake (for IR power-on) */ 1052 nvt_enable_wake(nvt); 1053 } 1054 1055 static int nvt_suspend(struct pnp_dev *pdev, pm_message_t state) 1056 { 1057 struct nvt_dev *nvt = pnp_get_drvdata(pdev); 1058 1059 nvt_dbg("%s called", __func__); 1060 1061 mutex_lock(&nvt->rdev->lock); 1062 if (nvt->rdev->users) 1063 nvt_disable_cir(nvt); 1064 mutex_unlock(&nvt->rdev->lock); 1065 1066 /* make sure wake is enabled */ 1067 nvt_enable_wake(nvt); 1068 1069 return 0; 1070 } 1071 1072 static int nvt_resume(struct pnp_dev *pdev) 1073 { 1074 struct nvt_dev *nvt = pnp_get_drvdata(pdev); 1075 1076 nvt_dbg("%s called", __func__); 1077 1078 nvt_cir_regs_init(nvt); 1079 nvt_cir_wake_regs_init(nvt); 1080 1081 mutex_lock(&nvt->rdev->lock); 1082 if (nvt->rdev->users) 1083 nvt_enable_cir(nvt); 1084 mutex_unlock(&nvt->rdev->lock); 1085 1086 return 0; 1087 } 1088 1089 static void nvt_shutdown(struct pnp_dev *pdev) 1090 { 1091 struct nvt_dev *nvt = pnp_get_drvdata(pdev); 1092 1093 nvt_enable_wake(nvt); 1094 } 1095 1096 static const struct pnp_device_id nvt_ids[] = { 1097 { "WEC0530", 0 }, /* CIR */ 1098 { "NTN0530", 0 }, /* CIR for new chip's pnp id*/ 1099 { "", 0 }, 1100 }; 1101 1102 static struct pnp_driver nvt_driver = { 1103 .name = NVT_DRIVER_NAME, 1104 .id_table = nvt_ids, 1105 .flags = PNP_DRIVER_RES_DO_NOT_CHANGE, 1106 .probe = nvt_probe, 1107 .remove = nvt_remove, 1108 .suspend = nvt_suspend, 1109 .resume = nvt_resume, 1110 .shutdown = nvt_shutdown, 1111 }; 1112 1113 module_param(debug, int, S_IRUGO | S_IWUSR); 1114 MODULE_PARM_DESC(debug, "Enable debugging output"); 1115 1116 MODULE_DEVICE_TABLE(pnp, nvt_ids); 1117 MODULE_DESCRIPTION("Nuvoton W83667HG-A & W83677HG-I CIR driver"); 1118 1119 MODULE_AUTHOR("Jarod Wilson <jarod@redhat.com>"); 1120 MODULE_LICENSE("GPL"); 1121 1122 module_pnp_driver(nvt_driver); 1123