xref: /openbmc/linux/drivers/media/rc/nuvoton-cir.c (revision 4f2c0a4acffbec01079c28f839422e64ddeff004)
1  /*
2   * Driver for Nuvoton Technology Corporation w83667hg/w83677hg-i CIR
3   *
4   * Copyright (C) 2010 Jarod Wilson <jarod@redhat.com>
5   * Copyright (C) 2009 Nuvoton PS Team
6   *
7   * Special thanks to Nuvoton for providing hardware, spec sheets and
8   * sample code upon which portions of this driver are based. Indirect
9   * thanks also to Maxim Levitsky, whose ene_ir driver this driver is
10   * modeled after.
11   *
12   * This program is free software; you can redistribute it and/or
13   * modify it under the terms of the GNU General Public License as
14   * published by the Free Software Foundation; either version 2 of the
15   * License, or (at your option) any later version.
16   *
17   * This program is distributed in the hope that it will be useful, but
18   * WITHOUT ANY WARRANTY; without even the implied warranty of
19   * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
20   * General Public License for more details.
21   */
22  
23  #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
24  
25  #include <linux/kernel.h>
26  #include <linux/module.h>
27  #include <linux/pnp.h>
28  #include <linux/io.h>
29  #include <linux/interrupt.h>
30  #include <linux/sched.h>
31  #include <linux/slab.h>
32  #include <media/rc-core.h>
33  #include <linux/pci_ids.h>
34  
35  #include "nuvoton-cir.h"
36  
37  static void nvt_clear_cir_wake_fifo(struct nvt_dev *nvt);
38  
39  static const struct nvt_chip nvt_chips[] = {
40  	{ "w83667hg", NVT_W83667HG },
41  	{ "NCT6775F", NVT_6775F },
42  	{ "NCT6776F", NVT_6776F },
43  	{ "NCT6779D", NVT_6779D },
44  };
45  
nvt_get_dev(const struct nvt_dev * nvt)46  static inline struct device *nvt_get_dev(const struct nvt_dev *nvt)
47  {
48  	return nvt->rdev->dev.parent;
49  }
50  
is_w83667hg(struct nvt_dev * nvt)51  static inline bool is_w83667hg(struct nvt_dev *nvt)
52  {
53  	return nvt->chip_ver == NVT_W83667HG;
54  }
55  
56  /* write val to config reg */
nvt_cr_write(struct nvt_dev * nvt,u8 val,u8 reg)57  static inline void nvt_cr_write(struct nvt_dev *nvt, u8 val, u8 reg)
58  {
59  	outb(reg, nvt->cr_efir);
60  	outb(val, nvt->cr_efdr);
61  }
62  
63  /* read val from config reg */
nvt_cr_read(struct nvt_dev * nvt,u8 reg)64  static inline u8 nvt_cr_read(struct nvt_dev *nvt, u8 reg)
65  {
66  	outb(reg, nvt->cr_efir);
67  	return inb(nvt->cr_efdr);
68  }
69  
70  /* update config register bit without changing other bits */
nvt_set_reg_bit(struct nvt_dev * nvt,u8 val,u8 reg)71  static inline void nvt_set_reg_bit(struct nvt_dev *nvt, u8 val, u8 reg)
72  {
73  	u8 tmp = nvt_cr_read(nvt, reg) | val;
74  	nvt_cr_write(nvt, tmp, reg);
75  }
76  
77  /* enter extended function mode */
nvt_efm_enable(struct nvt_dev * nvt)78  static inline int nvt_efm_enable(struct nvt_dev *nvt)
79  {
80  	if (!request_muxed_region(nvt->cr_efir, 2, NVT_DRIVER_NAME))
81  		return -EBUSY;
82  
83  	/* Enabling Extended Function Mode explicitly requires writing 2x */
84  	outb(EFER_EFM_ENABLE, nvt->cr_efir);
85  	outb(EFER_EFM_ENABLE, nvt->cr_efir);
86  
87  	return 0;
88  }
89  
90  /* exit extended function mode */
nvt_efm_disable(struct nvt_dev * nvt)91  static inline void nvt_efm_disable(struct nvt_dev *nvt)
92  {
93  	outb(EFER_EFM_DISABLE, nvt->cr_efir);
94  
95  	release_region(nvt->cr_efir, 2);
96  }
97  
98  /*
99   * When you want to address a specific logical device, write its logical
100   * device number to CR_LOGICAL_DEV_SEL, then enable/disable by writing
101   * 0x1/0x0 respectively to CR_LOGICAL_DEV_EN.
102   */
nvt_select_logical_dev(struct nvt_dev * nvt,u8 ldev)103  static inline void nvt_select_logical_dev(struct nvt_dev *nvt, u8 ldev)
104  {
105  	nvt_cr_write(nvt, ldev, CR_LOGICAL_DEV_SEL);
106  }
107  
108  /* select and enable logical device with setting EFM mode*/
nvt_enable_logical_dev(struct nvt_dev * nvt,u8 ldev)109  static inline void nvt_enable_logical_dev(struct nvt_dev *nvt, u8 ldev)
110  {
111  	nvt_efm_enable(nvt);
112  	nvt_select_logical_dev(nvt, ldev);
113  	nvt_cr_write(nvt, LOGICAL_DEV_ENABLE, CR_LOGICAL_DEV_EN);
114  	nvt_efm_disable(nvt);
115  }
116  
117  /* select and disable logical device with setting EFM mode*/
nvt_disable_logical_dev(struct nvt_dev * nvt,u8 ldev)118  static inline void nvt_disable_logical_dev(struct nvt_dev *nvt, u8 ldev)
119  {
120  	nvt_efm_enable(nvt);
121  	nvt_select_logical_dev(nvt, ldev);
122  	nvt_cr_write(nvt, LOGICAL_DEV_DISABLE, CR_LOGICAL_DEV_EN);
123  	nvt_efm_disable(nvt);
124  }
125  
126  /* write val to cir config register */
nvt_cir_reg_write(struct nvt_dev * nvt,u8 val,u8 offset)127  static inline void nvt_cir_reg_write(struct nvt_dev *nvt, u8 val, u8 offset)
128  {
129  	outb(val, nvt->cir_addr + offset);
130  }
131  
132  /* read val from cir config register */
nvt_cir_reg_read(struct nvt_dev * nvt,u8 offset)133  static u8 nvt_cir_reg_read(struct nvt_dev *nvt, u8 offset)
134  {
135  	return inb(nvt->cir_addr + offset);
136  }
137  
138  /* write val to cir wake register */
nvt_cir_wake_reg_write(struct nvt_dev * nvt,u8 val,u8 offset)139  static inline void nvt_cir_wake_reg_write(struct nvt_dev *nvt,
140  					  u8 val, u8 offset)
141  {
142  	outb(val, nvt->cir_wake_addr + offset);
143  }
144  
145  /* read val from cir wake config register */
nvt_cir_wake_reg_read(struct nvt_dev * nvt,u8 offset)146  static u8 nvt_cir_wake_reg_read(struct nvt_dev *nvt, u8 offset)
147  {
148  	return inb(nvt->cir_wake_addr + offset);
149  }
150  
151  /* don't override io address if one is set already */
nvt_set_ioaddr(struct nvt_dev * nvt,unsigned long * ioaddr)152  static void nvt_set_ioaddr(struct nvt_dev *nvt, unsigned long *ioaddr)
153  {
154  	unsigned long old_addr;
155  
156  	old_addr = nvt_cr_read(nvt, CR_CIR_BASE_ADDR_HI) << 8;
157  	old_addr |= nvt_cr_read(nvt, CR_CIR_BASE_ADDR_LO);
158  
159  	if (old_addr)
160  		*ioaddr = old_addr;
161  	else {
162  		nvt_cr_write(nvt, *ioaddr >> 8, CR_CIR_BASE_ADDR_HI);
163  		nvt_cr_write(nvt, *ioaddr & 0xff, CR_CIR_BASE_ADDR_LO);
164  	}
165  }
166  
nvt_write_wakeup_codes(struct rc_dev * dev,const u8 * wbuf,int count)167  static void nvt_write_wakeup_codes(struct rc_dev *dev,
168  				   const u8 *wbuf, int count)
169  {
170  	u8 tolerance, config;
171  	struct nvt_dev *nvt = dev->priv;
172  	unsigned long flags;
173  	int i;
174  
175  	/* hardcode the tolerance to 10% */
176  	tolerance = DIV_ROUND_UP(count, 10);
177  
178  	spin_lock_irqsave(&nvt->lock, flags);
179  
180  	nvt_clear_cir_wake_fifo(nvt);
181  	nvt_cir_wake_reg_write(nvt, count, CIR_WAKE_FIFO_CMP_DEEP);
182  	nvt_cir_wake_reg_write(nvt, tolerance, CIR_WAKE_FIFO_CMP_TOL);
183  
184  	config = nvt_cir_wake_reg_read(nvt, CIR_WAKE_IRCON);
185  
186  	/* enable writes to wake fifo */
187  	nvt_cir_wake_reg_write(nvt, config | CIR_WAKE_IRCON_MODE1,
188  			       CIR_WAKE_IRCON);
189  
190  	if (count)
191  		pr_info("Wake samples (%d) =", count);
192  	else
193  		pr_info("Wake sample fifo cleared");
194  
195  	for (i = 0; i < count; i++)
196  		nvt_cir_wake_reg_write(nvt, wbuf[i], CIR_WAKE_WR_FIFO_DATA);
197  
198  	nvt_cir_wake_reg_write(nvt, config, CIR_WAKE_IRCON);
199  
200  	spin_unlock_irqrestore(&nvt->lock, flags);
201  }
202  
wakeup_data_show(struct device * dev,struct device_attribute * attr,char * buf)203  static ssize_t wakeup_data_show(struct device *dev,
204  				struct device_attribute *attr,
205  				char *buf)
206  {
207  	struct rc_dev *rc_dev = to_rc_dev(dev);
208  	struct nvt_dev *nvt = rc_dev->priv;
209  	int fifo_len, duration;
210  	unsigned long flags;
211  	ssize_t buf_len = 0;
212  	int i;
213  
214  	spin_lock_irqsave(&nvt->lock, flags);
215  
216  	fifo_len = nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFO_COUNT);
217  	fifo_len = min(fifo_len, WAKEUP_MAX_SIZE);
218  
219  	/* go to first element to be read */
220  	while (nvt_cir_wake_reg_read(nvt, CIR_WAKE_RD_FIFO_ONLY_IDX))
221  		nvt_cir_wake_reg_read(nvt, CIR_WAKE_RD_FIFO_ONLY);
222  
223  	for (i = 0; i < fifo_len; i++) {
224  		duration = nvt_cir_wake_reg_read(nvt, CIR_WAKE_RD_FIFO_ONLY);
225  		duration = (duration & BUF_LEN_MASK) * SAMPLE_PERIOD;
226  		buf_len += scnprintf(buf + buf_len, PAGE_SIZE - buf_len,
227  				    "%d ", duration);
228  	}
229  	buf_len += scnprintf(buf + buf_len, PAGE_SIZE - buf_len, "\n");
230  
231  	spin_unlock_irqrestore(&nvt->lock, flags);
232  
233  	return buf_len;
234  }
235  
wakeup_data_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)236  static ssize_t wakeup_data_store(struct device *dev,
237  				 struct device_attribute *attr,
238  				 const char *buf, size_t len)
239  {
240  	struct rc_dev *rc_dev = to_rc_dev(dev);
241  	u8 wake_buf[WAKEUP_MAX_SIZE];
242  	char **argv;
243  	int i, count;
244  	unsigned int val;
245  	ssize_t ret;
246  
247  	argv = argv_split(GFP_KERNEL, buf, &count);
248  	if (!argv)
249  		return -ENOMEM;
250  	if (!count || count > WAKEUP_MAX_SIZE) {
251  		ret = -EINVAL;
252  		goto out;
253  	}
254  
255  	for (i = 0; i < count; i++) {
256  		ret = kstrtouint(argv[i], 10, &val);
257  		if (ret)
258  			goto out;
259  		val = DIV_ROUND_CLOSEST(val, SAMPLE_PERIOD);
260  		if (!val || val > 0x7f) {
261  			ret = -EINVAL;
262  			goto out;
263  		}
264  		wake_buf[i] = val;
265  		/* sequence must start with a pulse */
266  		if (i % 2 == 0)
267  			wake_buf[i] |= BUF_PULSE_BIT;
268  	}
269  
270  	nvt_write_wakeup_codes(rc_dev, wake_buf, count);
271  
272  	ret = len;
273  out:
274  	argv_free(argv);
275  	return ret;
276  }
277  static DEVICE_ATTR_RW(wakeup_data);
278  
279  /* dump current cir register contents */
cir_dump_regs(struct nvt_dev * nvt)280  static void cir_dump_regs(struct nvt_dev *nvt)
281  {
282  	nvt_efm_enable(nvt);
283  	nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR);
284  
285  	pr_info("%s: Dump CIR logical device registers:\n", NVT_DRIVER_NAME);
286  	pr_info(" * CR CIR ACTIVE :   0x%x\n",
287  		nvt_cr_read(nvt, CR_LOGICAL_DEV_EN));
288  	pr_info(" * CR CIR BASE ADDR: 0x%x\n",
289  		(nvt_cr_read(nvt, CR_CIR_BASE_ADDR_HI) << 8) |
290  		nvt_cr_read(nvt, CR_CIR_BASE_ADDR_LO));
291  	pr_info(" * CR CIR IRQ NUM:   0x%x\n",
292  		nvt_cr_read(nvt, CR_CIR_IRQ_RSRC));
293  
294  	nvt_efm_disable(nvt);
295  
296  	pr_info("%s: Dump CIR registers:\n", NVT_DRIVER_NAME);
297  	pr_info(" * IRCON:     0x%x\n", nvt_cir_reg_read(nvt, CIR_IRCON));
298  	pr_info(" * IRSTS:     0x%x\n", nvt_cir_reg_read(nvt, CIR_IRSTS));
299  	pr_info(" * IREN:      0x%x\n", nvt_cir_reg_read(nvt, CIR_IREN));
300  	pr_info(" * RXFCONT:   0x%x\n", nvt_cir_reg_read(nvt, CIR_RXFCONT));
301  	pr_info(" * CP:        0x%x\n", nvt_cir_reg_read(nvt, CIR_CP));
302  	pr_info(" * CC:        0x%x\n", nvt_cir_reg_read(nvt, CIR_CC));
303  	pr_info(" * SLCH:      0x%x\n", nvt_cir_reg_read(nvt, CIR_SLCH));
304  	pr_info(" * SLCL:      0x%x\n", nvt_cir_reg_read(nvt, CIR_SLCL));
305  	pr_info(" * FIFOCON:   0x%x\n", nvt_cir_reg_read(nvt, CIR_FIFOCON));
306  	pr_info(" * IRFIFOSTS: 0x%x\n", nvt_cir_reg_read(nvt, CIR_IRFIFOSTS));
307  	pr_info(" * SRXFIFO:   0x%x\n", nvt_cir_reg_read(nvt, CIR_SRXFIFO));
308  	pr_info(" * TXFCONT:   0x%x\n", nvt_cir_reg_read(nvt, CIR_TXFCONT));
309  	pr_info(" * STXFIFO:   0x%x\n", nvt_cir_reg_read(nvt, CIR_STXFIFO));
310  	pr_info(" * FCCH:      0x%x\n", nvt_cir_reg_read(nvt, CIR_FCCH));
311  	pr_info(" * FCCL:      0x%x\n", nvt_cir_reg_read(nvt, CIR_FCCL));
312  	pr_info(" * IRFSM:     0x%x\n", nvt_cir_reg_read(nvt, CIR_IRFSM));
313  }
314  
315  /* dump current cir wake register contents */
cir_wake_dump_regs(struct nvt_dev * nvt)316  static void cir_wake_dump_regs(struct nvt_dev *nvt)
317  {
318  	u8 i, fifo_len;
319  
320  	nvt_efm_enable(nvt);
321  	nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR_WAKE);
322  
323  	pr_info("%s: Dump CIR WAKE logical device registers:\n",
324  		NVT_DRIVER_NAME);
325  	pr_info(" * CR CIR WAKE ACTIVE :   0x%x\n",
326  		nvt_cr_read(nvt, CR_LOGICAL_DEV_EN));
327  	pr_info(" * CR CIR WAKE BASE ADDR: 0x%x\n",
328  		(nvt_cr_read(nvt, CR_CIR_BASE_ADDR_HI) << 8) |
329  		nvt_cr_read(nvt, CR_CIR_BASE_ADDR_LO));
330  	pr_info(" * CR CIR WAKE IRQ NUM:   0x%x\n",
331  		nvt_cr_read(nvt, CR_CIR_IRQ_RSRC));
332  
333  	nvt_efm_disable(nvt);
334  
335  	pr_info("%s: Dump CIR WAKE registers\n", NVT_DRIVER_NAME);
336  	pr_info(" * IRCON:          0x%x\n",
337  		nvt_cir_wake_reg_read(nvt, CIR_WAKE_IRCON));
338  	pr_info(" * IRSTS:          0x%x\n",
339  		nvt_cir_wake_reg_read(nvt, CIR_WAKE_IRSTS));
340  	pr_info(" * IREN:           0x%x\n",
341  		nvt_cir_wake_reg_read(nvt, CIR_WAKE_IREN));
342  	pr_info(" * FIFO CMP DEEP:  0x%x\n",
343  		nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFO_CMP_DEEP));
344  	pr_info(" * FIFO CMP TOL:   0x%x\n",
345  		nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFO_CMP_TOL));
346  	pr_info(" * FIFO COUNT:     0x%x\n",
347  		nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFO_COUNT));
348  	pr_info(" * SLCH:           0x%x\n",
349  		nvt_cir_wake_reg_read(nvt, CIR_WAKE_SLCH));
350  	pr_info(" * SLCL:           0x%x\n",
351  		nvt_cir_wake_reg_read(nvt, CIR_WAKE_SLCL));
352  	pr_info(" * FIFOCON:        0x%x\n",
353  		nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFOCON));
354  	pr_info(" * SRXFSTS:        0x%x\n",
355  		nvt_cir_wake_reg_read(nvt, CIR_WAKE_SRXFSTS));
356  	pr_info(" * SAMPLE RX FIFO: 0x%x\n",
357  		nvt_cir_wake_reg_read(nvt, CIR_WAKE_SAMPLE_RX_FIFO));
358  	pr_info(" * WR FIFO DATA:   0x%x\n",
359  		nvt_cir_wake_reg_read(nvt, CIR_WAKE_WR_FIFO_DATA));
360  	pr_info(" * RD FIFO ONLY:   0x%x\n",
361  		nvt_cir_wake_reg_read(nvt, CIR_WAKE_RD_FIFO_ONLY));
362  	pr_info(" * RD FIFO ONLY IDX: 0x%x\n",
363  		nvt_cir_wake_reg_read(nvt, CIR_WAKE_RD_FIFO_ONLY_IDX));
364  	pr_info(" * FIFO IGNORE:    0x%x\n",
365  		nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFO_IGNORE));
366  	pr_info(" * IRFSM:          0x%x\n",
367  		nvt_cir_wake_reg_read(nvt, CIR_WAKE_IRFSM));
368  
369  	fifo_len = nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFO_COUNT);
370  	pr_info("%s: Dump CIR WAKE FIFO (len %d)\n", NVT_DRIVER_NAME, fifo_len);
371  	pr_info("* Contents =");
372  	for (i = 0; i < fifo_len; i++)
373  		pr_cont(" %02x",
374  			nvt_cir_wake_reg_read(nvt, CIR_WAKE_RD_FIFO_ONLY));
375  	pr_cont("\n");
376  }
377  
nvt_find_chip(struct nvt_dev * nvt,int id)378  static inline const char *nvt_find_chip(struct nvt_dev *nvt, int id)
379  {
380  	int i;
381  
382  	for (i = 0; i < ARRAY_SIZE(nvt_chips); i++)
383  		if ((id & SIO_ID_MASK) == nvt_chips[i].chip_ver) {
384  			nvt->chip_ver = nvt_chips[i].chip_ver;
385  			return nvt_chips[i].name;
386  		}
387  
388  	return NULL;
389  }
390  
391  
392  /* detect hardware features */
nvt_hw_detect(struct nvt_dev * nvt)393  static int nvt_hw_detect(struct nvt_dev *nvt)
394  {
395  	struct device *dev = nvt_get_dev(nvt);
396  	const char *chip_name;
397  	int chip_id;
398  
399  	nvt_efm_enable(nvt);
400  
401  	/* Check if we're wired for the alternate EFER setup */
402  	nvt->chip_major = nvt_cr_read(nvt, CR_CHIP_ID_HI);
403  	if (nvt->chip_major == 0xff) {
404  		nvt_efm_disable(nvt);
405  		nvt->cr_efir = CR_EFIR2;
406  		nvt->cr_efdr = CR_EFDR2;
407  		nvt_efm_enable(nvt);
408  		nvt->chip_major = nvt_cr_read(nvt, CR_CHIP_ID_HI);
409  	}
410  	nvt->chip_minor = nvt_cr_read(nvt, CR_CHIP_ID_LO);
411  
412  	nvt_efm_disable(nvt);
413  
414  	chip_id = nvt->chip_major << 8 | nvt->chip_minor;
415  	if (chip_id == NVT_INVALID) {
416  		dev_err(dev, "No device found on either EFM port\n");
417  		return -ENODEV;
418  	}
419  
420  	chip_name = nvt_find_chip(nvt, chip_id);
421  
422  	/* warn, but still let the driver load, if we don't know this chip */
423  	if (!chip_name)
424  		dev_warn(dev,
425  			 "unknown chip, id: 0x%02x 0x%02x, it may not work...",
426  			 nvt->chip_major, nvt->chip_minor);
427  	else
428  		dev_info(dev, "found %s or compatible: chip id: 0x%02x 0x%02x",
429  			 chip_name, nvt->chip_major, nvt->chip_minor);
430  
431  	return 0;
432  }
433  
nvt_cir_ldev_init(struct nvt_dev * nvt)434  static void nvt_cir_ldev_init(struct nvt_dev *nvt)
435  {
436  	u8 val, psreg, psmask, psval;
437  
438  	if (is_w83667hg(nvt)) {
439  		psreg = CR_MULTIFUNC_PIN_SEL;
440  		psmask = MULTIFUNC_PIN_SEL_MASK;
441  		psval = MULTIFUNC_ENABLE_CIR | MULTIFUNC_ENABLE_CIRWB;
442  	} else {
443  		psreg = CR_OUTPUT_PIN_SEL;
444  		psmask = OUTPUT_PIN_SEL_MASK;
445  		psval = OUTPUT_ENABLE_CIR | OUTPUT_ENABLE_CIRWB;
446  	}
447  
448  	/* output pin selection: enable CIR, with WB sensor enabled */
449  	val = nvt_cr_read(nvt, psreg);
450  	val &= psmask;
451  	val |= psval;
452  	nvt_cr_write(nvt, val, psreg);
453  
454  	/* Select CIR logical device */
455  	nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR);
456  
457  	nvt_set_ioaddr(nvt, &nvt->cir_addr);
458  
459  	nvt_cr_write(nvt, nvt->cir_irq, CR_CIR_IRQ_RSRC);
460  
461  	nvt_dbg("CIR initialized, base io port address: 0x%lx, irq: %d",
462  		nvt->cir_addr, nvt->cir_irq);
463  }
464  
nvt_cir_wake_ldev_init(struct nvt_dev * nvt)465  static void nvt_cir_wake_ldev_init(struct nvt_dev *nvt)
466  {
467  	/* Select ACPI logical device and anable it */
468  	nvt_select_logical_dev(nvt, LOGICAL_DEV_ACPI);
469  	nvt_cr_write(nvt, LOGICAL_DEV_ENABLE, CR_LOGICAL_DEV_EN);
470  
471  	/* Enable CIR Wake via PSOUT# (Pin60) */
472  	nvt_set_reg_bit(nvt, CIR_WAKE_ENABLE_BIT, CR_ACPI_CIR_WAKE);
473  
474  	/* enable pme interrupt of cir wakeup event */
475  	nvt_set_reg_bit(nvt, PME_INTR_CIR_PASS_BIT, CR_ACPI_IRQ_EVENTS2);
476  
477  	/* Select CIR Wake logical device */
478  	nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR_WAKE);
479  
480  	nvt_set_ioaddr(nvt, &nvt->cir_wake_addr);
481  
482  	nvt_dbg("CIR Wake initialized, base io port address: 0x%lx",
483  		nvt->cir_wake_addr);
484  }
485  
486  /* clear out the hardware's cir rx fifo */
nvt_clear_cir_fifo(struct nvt_dev * nvt)487  static void nvt_clear_cir_fifo(struct nvt_dev *nvt)
488  {
489  	u8 val = nvt_cir_reg_read(nvt, CIR_FIFOCON);
490  	nvt_cir_reg_write(nvt, val | CIR_FIFOCON_RXFIFOCLR, CIR_FIFOCON);
491  }
492  
493  /* clear out the hardware's cir wake rx fifo */
nvt_clear_cir_wake_fifo(struct nvt_dev * nvt)494  static void nvt_clear_cir_wake_fifo(struct nvt_dev *nvt)
495  {
496  	u8 val, config;
497  
498  	config = nvt_cir_wake_reg_read(nvt, CIR_WAKE_IRCON);
499  
500  	/* clearing wake fifo works in learning mode only */
501  	nvt_cir_wake_reg_write(nvt, config & ~CIR_WAKE_IRCON_MODE0,
502  			       CIR_WAKE_IRCON);
503  
504  	val = nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFOCON);
505  	nvt_cir_wake_reg_write(nvt, val | CIR_WAKE_FIFOCON_RXFIFOCLR,
506  			       CIR_WAKE_FIFOCON);
507  
508  	nvt_cir_wake_reg_write(nvt, config, CIR_WAKE_IRCON);
509  }
510  
511  /* clear out the hardware's cir tx fifo */
nvt_clear_tx_fifo(struct nvt_dev * nvt)512  static void nvt_clear_tx_fifo(struct nvt_dev *nvt)
513  {
514  	u8 val;
515  
516  	val = nvt_cir_reg_read(nvt, CIR_FIFOCON);
517  	nvt_cir_reg_write(nvt, val | CIR_FIFOCON_TXFIFOCLR, CIR_FIFOCON);
518  }
519  
520  /* enable RX Trigger Level Reach and Packet End interrupts */
nvt_set_cir_iren(struct nvt_dev * nvt)521  static void nvt_set_cir_iren(struct nvt_dev *nvt)
522  {
523  	u8 iren;
524  
525  	iren = CIR_IREN_RTR | CIR_IREN_PE | CIR_IREN_RFO;
526  	nvt_cir_reg_write(nvt, iren, CIR_IREN);
527  }
528  
nvt_cir_regs_init(struct nvt_dev * nvt)529  static void nvt_cir_regs_init(struct nvt_dev *nvt)
530  {
531  	nvt_enable_logical_dev(nvt, LOGICAL_DEV_CIR);
532  
533  	/* set sample limit count (PE interrupt raised when reached) */
534  	nvt_cir_reg_write(nvt, CIR_RX_LIMIT_COUNT >> 8, CIR_SLCH);
535  	nvt_cir_reg_write(nvt, CIR_RX_LIMIT_COUNT & 0xff, CIR_SLCL);
536  
537  	/* set fifo irq trigger levels */
538  	nvt_cir_reg_write(nvt, CIR_FIFOCON_TX_TRIGGER_LEV |
539  			  CIR_FIFOCON_RX_TRIGGER_LEV, CIR_FIFOCON);
540  
541  	/* clear hardware rx and tx fifos */
542  	nvt_clear_cir_fifo(nvt);
543  	nvt_clear_tx_fifo(nvt);
544  
545  	nvt_disable_logical_dev(nvt, LOGICAL_DEV_CIR);
546  }
547  
nvt_cir_wake_regs_init(struct nvt_dev * nvt)548  static void nvt_cir_wake_regs_init(struct nvt_dev *nvt)
549  {
550  	nvt_enable_logical_dev(nvt, LOGICAL_DEV_CIR_WAKE);
551  
552  	/*
553  	 * Disable RX, set specific carrier on = low, off = high,
554  	 * and sample period (currently 50us)
555  	 */
556  	nvt_cir_wake_reg_write(nvt, CIR_WAKE_IRCON_MODE0 |
557  			       CIR_WAKE_IRCON_R | CIR_WAKE_IRCON_RXINV |
558  			       CIR_WAKE_IRCON_SAMPLE_PERIOD_SEL,
559  			       CIR_WAKE_IRCON);
560  
561  	/* clear any and all stray interrupts */
562  	nvt_cir_wake_reg_write(nvt, 0xff, CIR_WAKE_IRSTS);
563  }
564  
nvt_enable_wake(struct nvt_dev * nvt)565  static void nvt_enable_wake(struct nvt_dev *nvt)
566  {
567  	unsigned long flags;
568  
569  	nvt_efm_enable(nvt);
570  
571  	nvt_select_logical_dev(nvt, LOGICAL_DEV_ACPI);
572  	nvt_set_reg_bit(nvt, CIR_WAKE_ENABLE_BIT, CR_ACPI_CIR_WAKE);
573  	nvt_set_reg_bit(nvt, PME_INTR_CIR_PASS_BIT, CR_ACPI_IRQ_EVENTS2);
574  
575  	nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR_WAKE);
576  	nvt_cr_write(nvt, LOGICAL_DEV_ENABLE, CR_LOGICAL_DEV_EN);
577  
578  	nvt_efm_disable(nvt);
579  
580  	spin_lock_irqsave(&nvt->lock, flags);
581  
582  	nvt_cir_wake_reg_write(nvt, CIR_WAKE_IRCON_MODE0 | CIR_WAKE_IRCON_RXEN |
583  			       CIR_WAKE_IRCON_R | CIR_WAKE_IRCON_RXINV |
584  			       CIR_WAKE_IRCON_SAMPLE_PERIOD_SEL,
585  			       CIR_WAKE_IRCON);
586  	nvt_cir_wake_reg_write(nvt, 0xff, CIR_WAKE_IRSTS);
587  	nvt_cir_wake_reg_write(nvt, 0, CIR_WAKE_IREN);
588  
589  	spin_unlock_irqrestore(&nvt->lock, flags);
590  }
591  
592  #if 0 /* Currently unused */
593  /* rx carrier detect only works in learning mode, must be called w/lock */
594  static u32 nvt_rx_carrier_detect(struct nvt_dev *nvt)
595  {
596  	u32 count, carrier, duration = 0;
597  	int i;
598  
599  	count = nvt_cir_reg_read(nvt, CIR_FCCL) |
600  		nvt_cir_reg_read(nvt, CIR_FCCH) << 8;
601  
602  	for (i = 0; i < nvt->pkts; i++) {
603  		if (nvt->buf[i] & BUF_PULSE_BIT)
604  			duration += nvt->buf[i] & BUF_LEN_MASK;
605  	}
606  
607  	duration *= SAMPLE_PERIOD;
608  
609  	if (!count || !duration) {
610  		dev_notice(nvt_get_dev(nvt),
611  			   "Unable to determine carrier! (c:%u, d:%u)",
612  			   count, duration);
613  		return 0;
614  	}
615  
616  	carrier = MS_TO_NS(count) / duration;
617  
618  	if ((carrier > MAX_CARRIER) || (carrier < MIN_CARRIER))
619  		nvt_dbg("WTF? Carrier frequency out of range!");
620  
621  	nvt_dbg("Carrier frequency: %u (count %u, duration %u)",
622  		carrier, count, duration);
623  
624  	return carrier;
625  }
626  #endif
627  
nvt_ir_raw_set_wakeup_filter(struct rc_dev * dev,struct rc_scancode_filter * sc_filter)628  static int nvt_ir_raw_set_wakeup_filter(struct rc_dev *dev,
629  					struct rc_scancode_filter *sc_filter)
630  {
631  	u8 buf_val;
632  	int i, ret, count;
633  	unsigned int val;
634  	struct ir_raw_event *raw;
635  	u8 wake_buf[WAKEUP_MAX_SIZE];
636  	bool complete;
637  
638  	/* Require mask to be set */
639  	if (!sc_filter->mask)
640  		return 0;
641  
642  	raw = kmalloc_array(WAKEUP_MAX_SIZE, sizeof(*raw), GFP_KERNEL);
643  	if (!raw)
644  		return -ENOMEM;
645  
646  	ret = ir_raw_encode_scancode(dev->wakeup_protocol, sc_filter->data,
647  				     raw, WAKEUP_MAX_SIZE);
648  	complete = (ret != -ENOBUFS);
649  	if (!complete)
650  		ret = WAKEUP_MAX_SIZE;
651  	else if (ret < 0)
652  		goto out_raw;
653  
654  	/* Inspect the ir samples */
655  	for (i = 0, count = 0; i < ret && count < WAKEUP_MAX_SIZE; ++i) {
656  		val = raw[i].duration / SAMPLE_PERIOD;
657  
658  		/* Split too large values into several smaller ones */
659  		while (val > 0 && count < WAKEUP_MAX_SIZE) {
660  			/* Skip last value for better comparison tolerance */
661  			if (complete && i == ret - 1 && val < BUF_LEN_MASK)
662  				break;
663  
664  			/* Clamp values to BUF_LEN_MASK at most */
665  			buf_val = (val > BUF_LEN_MASK) ? BUF_LEN_MASK : val;
666  
667  			wake_buf[count] = buf_val;
668  			val -= buf_val;
669  			if ((raw[i]).pulse)
670  				wake_buf[count] |= BUF_PULSE_BIT;
671  			count++;
672  		}
673  	}
674  
675  	nvt_write_wakeup_codes(dev, wake_buf, count);
676  	ret = 0;
677  out_raw:
678  	kfree(raw);
679  
680  	return ret;
681  }
682  
683  /* dump contents of the last rx buffer we got from the hw rx fifo */
nvt_dump_rx_buf(struct nvt_dev * nvt)684  static void nvt_dump_rx_buf(struct nvt_dev *nvt)
685  {
686  	int i;
687  
688  	printk(KERN_DEBUG "%s (len %d): ", __func__, nvt->pkts);
689  	for (i = 0; (i < nvt->pkts) && (i < RX_BUF_LEN); i++)
690  		printk(KERN_CONT "0x%02x ", nvt->buf[i]);
691  	printk(KERN_CONT "\n");
692  }
693  
694  /*
695   * Process raw data in rx driver buffer, store it in raw IR event kfifo,
696   * trigger decode when appropriate.
697   *
698   * We get IR data samples one byte at a time. If the msb is set, its a pulse,
699   * otherwise its a space. The lower 7 bits are the count of SAMPLE_PERIOD
700   * (default 50us) intervals for that pulse/space. A discrete signal is
701   * followed by a series of 0x7f packets, then either 0x7<something> or 0x80
702   * to signal more IR coming (repeats) or end of IR, respectively. We store
703   * sample data in the raw event kfifo until we see 0x7<something> (except f)
704   * or 0x80, at which time, we trigger a decode operation.
705   */
nvt_process_rx_ir_data(struct nvt_dev * nvt)706  static void nvt_process_rx_ir_data(struct nvt_dev *nvt)
707  {
708  	struct ir_raw_event rawir = {};
709  	u8 sample;
710  	int i;
711  
712  	nvt_dbg_verbose("%s firing", __func__);
713  
714  	if (debug)
715  		nvt_dump_rx_buf(nvt);
716  
717  	nvt_dbg_verbose("Processing buffer of len %d", nvt->pkts);
718  
719  	for (i = 0; i < nvt->pkts; i++) {
720  		sample = nvt->buf[i];
721  
722  		rawir.pulse = ((sample & BUF_PULSE_BIT) != 0);
723  		rawir.duration = (sample & BUF_LEN_MASK) * SAMPLE_PERIOD;
724  
725  		nvt_dbg("Storing %s with duration %d",
726  			rawir.pulse ? "pulse" : "space", rawir.duration);
727  
728  		ir_raw_event_store_with_filter(nvt->rdev, &rawir);
729  	}
730  
731  	nvt->pkts = 0;
732  
733  	nvt_dbg("Calling ir_raw_event_handle\n");
734  	ir_raw_event_handle(nvt->rdev);
735  
736  	nvt_dbg_verbose("%s done", __func__);
737  }
738  
nvt_handle_rx_fifo_overrun(struct nvt_dev * nvt)739  static void nvt_handle_rx_fifo_overrun(struct nvt_dev *nvt)
740  {
741  	dev_warn(nvt_get_dev(nvt), "RX FIFO overrun detected, flushing data!");
742  
743  	nvt->pkts = 0;
744  	nvt_clear_cir_fifo(nvt);
745  	ir_raw_event_overflow(nvt->rdev);
746  }
747  
748  /* copy data from hardware rx fifo into driver buffer */
nvt_get_rx_ir_data(struct nvt_dev * nvt)749  static void nvt_get_rx_ir_data(struct nvt_dev *nvt)
750  {
751  	u8 fifocount;
752  	int i;
753  
754  	/* Get count of how many bytes to read from RX FIFO */
755  	fifocount = nvt_cir_reg_read(nvt, CIR_RXFCONT);
756  
757  	nvt_dbg("attempting to fetch %u bytes from hw rx fifo", fifocount);
758  
759  	/* Read fifocount bytes from CIR Sample RX FIFO register */
760  	for (i = 0; i < fifocount; i++)
761  		nvt->buf[i] = nvt_cir_reg_read(nvt, CIR_SRXFIFO);
762  
763  	nvt->pkts = fifocount;
764  	nvt_dbg("%s: pkts now %d", __func__, nvt->pkts);
765  
766  	nvt_process_rx_ir_data(nvt);
767  }
768  
nvt_cir_log_irqs(u8 status,u8 iren)769  static void nvt_cir_log_irqs(u8 status, u8 iren)
770  {
771  	nvt_dbg("IRQ 0x%02x (IREN 0x%02x) :%s%s%s%s%s%s%s%s%s",
772  		status, iren,
773  		status & CIR_IRSTS_RDR	? " RDR"	: "",
774  		status & CIR_IRSTS_RTR	? " RTR"	: "",
775  		status & CIR_IRSTS_PE	? " PE"		: "",
776  		status & CIR_IRSTS_RFO	? " RFO"	: "",
777  		status & CIR_IRSTS_TE	? " TE"		: "",
778  		status & CIR_IRSTS_TTR	? " TTR"	: "",
779  		status & CIR_IRSTS_TFU	? " TFU"	: "",
780  		status & CIR_IRSTS_GH	? " GH"		: "",
781  		status & ~(CIR_IRSTS_RDR | CIR_IRSTS_RTR | CIR_IRSTS_PE |
782  			   CIR_IRSTS_RFO | CIR_IRSTS_TE | CIR_IRSTS_TTR |
783  			   CIR_IRSTS_TFU | CIR_IRSTS_GH) ? " ?" : "");
784  }
785  
786  /* interrupt service routine for incoming and outgoing CIR data */
nvt_cir_isr(int irq,void * data)787  static irqreturn_t nvt_cir_isr(int irq, void *data)
788  {
789  	struct nvt_dev *nvt = data;
790  	u8 status, iren;
791  
792  	nvt_dbg_verbose("%s firing", __func__);
793  
794  	spin_lock(&nvt->lock);
795  
796  	/*
797  	 * Get IR Status register contents. Write 1 to ack/clear
798  	 *
799  	 * bit: reg name      - description
800  	 *   7: CIR_IRSTS_RDR - RX Data Ready
801  	 *   6: CIR_IRSTS_RTR - RX FIFO Trigger Level Reach
802  	 *   5: CIR_IRSTS_PE  - Packet End
803  	 *   4: CIR_IRSTS_RFO - RX FIFO Overrun (RDR will also be set)
804  	 *   3: CIR_IRSTS_TE  - TX FIFO Empty
805  	 *   2: CIR_IRSTS_TTR - TX FIFO Trigger Level Reach
806  	 *   1: CIR_IRSTS_TFU - TX FIFO Underrun
807  	 *   0: CIR_IRSTS_GH  - Min Length Detected
808  	 */
809  	status = nvt_cir_reg_read(nvt, CIR_IRSTS);
810  	iren = nvt_cir_reg_read(nvt, CIR_IREN);
811  
812  	/* At least NCT6779D creates a spurious interrupt when the
813  	 * logical device is being disabled.
814  	 */
815  	if (status == 0xff && iren == 0xff) {
816  		spin_unlock(&nvt->lock);
817  		nvt_dbg_verbose("Spurious interrupt detected");
818  		return IRQ_HANDLED;
819  	}
820  
821  	/* IRQ may be shared with CIR WAKE, therefore check for each
822  	 * status bit whether the related interrupt source is enabled
823  	 */
824  	if (!(status & iren)) {
825  		spin_unlock(&nvt->lock);
826  		nvt_dbg_verbose("%s exiting, IRSTS 0x0", __func__);
827  		return IRQ_NONE;
828  	}
829  
830  	/* ack/clear all irq flags we've got */
831  	nvt_cir_reg_write(nvt, status, CIR_IRSTS);
832  	nvt_cir_reg_write(nvt, 0, CIR_IRSTS);
833  
834  	nvt_cir_log_irqs(status, iren);
835  
836  	if (status & CIR_IRSTS_RFO)
837  		nvt_handle_rx_fifo_overrun(nvt);
838  	else if (status & (CIR_IRSTS_RTR | CIR_IRSTS_PE))
839  		nvt_get_rx_ir_data(nvt);
840  
841  	spin_unlock(&nvt->lock);
842  
843  	nvt_dbg_verbose("%s done", __func__);
844  	return IRQ_HANDLED;
845  }
846  
nvt_enable_cir(struct nvt_dev * nvt)847  static void nvt_enable_cir(struct nvt_dev *nvt)
848  {
849  	unsigned long flags;
850  
851  	/* enable the CIR logical device */
852  	nvt_enable_logical_dev(nvt, LOGICAL_DEV_CIR);
853  
854  	spin_lock_irqsave(&nvt->lock, flags);
855  
856  	/*
857  	 * Enable TX and RX, specify carrier on = low, off = high, and set
858  	 * sample period (currently 50us)
859  	 */
860  	nvt_cir_reg_write(nvt, CIR_IRCON_TXEN | CIR_IRCON_RXEN |
861  			  CIR_IRCON_RXINV | CIR_IRCON_SAMPLE_PERIOD_SEL,
862  			  CIR_IRCON);
863  
864  	/* clear all pending interrupts */
865  	nvt_cir_reg_write(nvt, 0xff, CIR_IRSTS);
866  
867  	/* enable interrupts */
868  	nvt_set_cir_iren(nvt);
869  
870  	spin_unlock_irqrestore(&nvt->lock, flags);
871  }
872  
nvt_disable_cir(struct nvt_dev * nvt)873  static void nvt_disable_cir(struct nvt_dev *nvt)
874  {
875  	unsigned long flags;
876  
877  	spin_lock_irqsave(&nvt->lock, flags);
878  
879  	/* disable CIR interrupts */
880  	nvt_cir_reg_write(nvt, 0, CIR_IREN);
881  
882  	/* clear any and all pending interrupts */
883  	nvt_cir_reg_write(nvt, 0xff, CIR_IRSTS);
884  
885  	/* clear all function enable flags */
886  	nvt_cir_reg_write(nvt, 0, CIR_IRCON);
887  
888  	/* clear hardware rx and tx fifos */
889  	nvt_clear_cir_fifo(nvt);
890  	nvt_clear_tx_fifo(nvt);
891  
892  	spin_unlock_irqrestore(&nvt->lock, flags);
893  
894  	/* disable the CIR logical device */
895  	nvt_disable_logical_dev(nvt, LOGICAL_DEV_CIR);
896  }
897  
nvt_open(struct rc_dev * dev)898  static int nvt_open(struct rc_dev *dev)
899  {
900  	struct nvt_dev *nvt = dev->priv;
901  
902  	nvt_enable_cir(nvt);
903  
904  	return 0;
905  }
906  
nvt_close(struct rc_dev * dev)907  static void nvt_close(struct rc_dev *dev)
908  {
909  	struct nvt_dev *nvt = dev->priv;
910  
911  	nvt_disable_cir(nvt);
912  }
913  
914  /* Allocate memory, probe hardware, and initialize everything */
nvt_probe(struct pnp_dev * pdev,const struct pnp_device_id * dev_id)915  static int nvt_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id)
916  {
917  	struct nvt_dev *nvt;
918  	struct rc_dev *rdev;
919  	int ret;
920  
921  	nvt = devm_kzalloc(&pdev->dev, sizeof(struct nvt_dev), GFP_KERNEL);
922  	if (!nvt)
923  		return -ENOMEM;
924  
925  	/* input device for IR remote */
926  	nvt->rdev = devm_rc_allocate_device(&pdev->dev, RC_DRIVER_IR_RAW);
927  	if (!nvt->rdev)
928  		return -ENOMEM;
929  	rdev = nvt->rdev;
930  
931  	/* activate pnp device */
932  	ret = pnp_activate_dev(pdev);
933  	if (ret) {
934  		dev_err(&pdev->dev, "Could not activate PNP device!\n");
935  		return ret;
936  	}
937  
938  	/* validate pnp resources */
939  	if (!pnp_port_valid(pdev, 0) ||
940  	    pnp_port_len(pdev, 0) < CIR_IOREG_LENGTH) {
941  		dev_err(&pdev->dev, "IR PNP Port not valid!\n");
942  		return -EINVAL;
943  	}
944  
945  	if (!pnp_irq_valid(pdev, 0)) {
946  		dev_err(&pdev->dev, "PNP IRQ not valid!\n");
947  		return -EINVAL;
948  	}
949  
950  	if (!pnp_port_valid(pdev, 1) ||
951  	    pnp_port_len(pdev, 1) < CIR_IOREG_LENGTH) {
952  		dev_err(&pdev->dev, "Wake PNP Port not valid!\n");
953  		return -EINVAL;
954  	}
955  
956  	nvt->cir_addr = pnp_port_start(pdev, 0);
957  	nvt->cir_irq  = pnp_irq(pdev, 0);
958  
959  	nvt->cir_wake_addr = pnp_port_start(pdev, 1);
960  
961  	nvt->cr_efir = CR_EFIR;
962  	nvt->cr_efdr = CR_EFDR;
963  
964  	spin_lock_init(&nvt->lock);
965  
966  	pnp_set_drvdata(pdev, nvt);
967  
968  	ret = nvt_hw_detect(nvt);
969  	if (ret)
970  		return ret;
971  
972  	/* Initialize CIR & CIR Wake Logical Devices */
973  	nvt_efm_enable(nvt);
974  	nvt_cir_ldev_init(nvt);
975  	nvt_cir_wake_ldev_init(nvt);
976  	nvt_efm_disable(nvt);
977  
978  	/*
979  	 * Initialize CIR & CIR Wake Config Registers
980  	 * and enable logical devices
981  	 */
982  	nvt_cir_regs_init(nvt);
983  	nvt_cir_wake_regs_init(nvt);
984  
985  	/* Set up the rc device */
986  	rdev->priv = nvt;
987  	rdev->allowed_protocols = RC_PROTO_BIT_ALL_IR_DECODER;
988  	rdev->allowed_wakeup_protocols = RC_PROTO_BIT_ALL_IR_ENCODER;
989  	rdev->encode_wakeup = true;
990  	rdev->open = nvt_open;
991  	rdev->close = nvt_close;
992  	rdev->s_wakeup_filter = nvt_ir_raw_set_wakeup_filter;
993  	rdev->device_name = "Nuvoton w836x7hg Infrared Remote Transceiver";
994  	rdev->input_phys = "nuvoton/cir0";
995  	rdev->input_id.bustype = BUS_HOST;
996  	rdev->input_id.vendor = PCI_VENDOR_ID_WINBOND2;
997  	rdev->input_id.product = nvt->chip_major;
998  	rdev->input_id.version = nvt->chip_minor;
999  	rdev->driver_name = NVT_DRIVER_NAME;
1000  	rdev->map_name = RC_MAP_RC6_MCE;
1001  	rdev->timeout = MS_TO_US(100);
1002  	/* rx resolution is hardwired to 50us atm, 1, 25, 100 also possible */
1003  	rdev->rx_resolution = CIR_SAMPLE_PERIOD;
1004  #if 0
1005  	rdev->min_timeout = XYZ;
1006  	rdev->max_timeout = XYZ;
1007  #endif
1008  	ret = devm_rc_register_device(&pdev->dev, rdev);
1009  	if (ret)
1010  		return ret;
1011  
1012  	/* now claim resources */
1013  	if (!devm_request_region(&pdev->dev, nvt->cir_addr,
1014  			    CIR_IOREG_LENGTH, NVT_DRIVER_NAME))
1015  		return -EBUSY;
1016  
1017  	ret = devm_request_irq(&pdev->dev, nvt->cir_irq, nvt_cir_isr,
1018  			       IRQF_SHARED, NVT_DRIVER_NAME, nvt);
1019  	if (ret)
1020  		return ret;
1021  
1022  	if (!devm_request_region(&pdev->dev, nvt->cir_wake_addr,
1023  			    CIR_IOREG_LENGTH, NVT_DRIVER_NAME "-wake"))
1024  		return -EBUSY;
1025  
1026  	ret = device_create_file(&rdev->dev, &dev_attr_wakeup_data);
1027  	if (ret)
1028  		return ret;
1029  
1030  	device_init_wakeup(&pdev->dev, true);
1031  
1032  	dev_notice(&pdev->dev, "driver has been successfully loaded\n");
1033  	if (debug) {
1034  		cir_dump_regs(nvt);
1035  		cir_wake_dump_regs(nvt);
1036  	}
1037  
1038  	return 0;
1039  }
1040  
nvt_remove(struct pnp_dev * pdev)1041  static void nvt_remove(struct pnp_dev *pdev)
1042  {
1043  	struct nvt_dev *nvt = pnp_get_drvdata(pdev);
1044  
1045  	device_remove_file(&nvt->rdev->dev, &dev_attr_wakeup_data);
1046  
1047  	nvt_disable_cir(nvt);
1048  
1049  	/* enable CIR Wake (for IR power-on) */
1050  	nvt_enable_wake(nvt);
1051  }
1052  
nvt_suspend(struct pnp_dev * pdev,pm_message_t state)1053  static int nvt_suspend(struct pnp_dev *pdev, pm_message_t state)
1054  {
1055  	struct nvt_dev *nvt = pnp_get_drvdata(pdev);
1056  
1057  	nvt_dbg("%s called", __func__);
1058  
1059  	mutex_lock(&nvt->rdev->lock);
1060  	if (nvt->rdev->users)
1061  		nvt_disable_cir(nvt);
1062  	mutex_unlock(&nvt->rdev->lock);
1063  
1064  	/* make sure wake is enabled */
1065  	nvt_enable_wake(nvt);
1066  
1067  	return 0;
1068  }
1069  
nvt_resume(struct pnp_dev * pdev)1070  static int nvt_resume(struct pnp_dev *pdev)
1071  {
1072  	struct nvt_dev *nvt = pnp_get_drvdata(pdev);
1073  
1074  	nvt_dbg("%s called", __func__);
1075  
1076  	nvt_cir_regs_init(nvt);
1077  	nvt_cir_wake_regs_init(nvt);
1078  
1079  	mutex_lock(&nvt->rdev->lock);
1080  	if (nvt->rdev->users)
1081  		nvt_enable_cir(nvt);
1082  	mutex_unlock(&nvt->rdev->lock);
1083  
1084  	return 0;
1085  }
1086  
nvt_shutdown(struct pnp_dev * pdev)1087  static void nvt_shutdown(struct pnp_dev *pdev)
1088  {
1089  	struct nvt_dev *nvt = pnp_get_drvdata(pdev);
1090  
1091  	nvt_enable_wake(nvt);
1092  }
1093  
1094  static const struct pnp_device_id nvt_ids[] = {
1095  	{ "WEC0530", 0 },   /* CIR */
1096  	{ "NTN0530", 0 },   /* CIR for new chip's pnp id*/
1097  	{ "", 0 },
1098  };
1099  
1100  static struct pnp_driver nvt_driver = {
1101  	.name		= NVT_DRIVER_NAME,
1102  	.id_table	= nvt_ids,
1103  	.flags		= PNP_DRIVER_RES_DO_NOT_CHANGE,
1104  	.probe		= nvt_probe,
1105  	.remove		= nvt_remove,
1106  	.suspend	= nvt_suspend,
1107  	.resume		= nvt_resume,
1108  	.shutdown	= nvt_shutdown,
1109  };
1110  
1111  module_param(debug, int, S_IRUGO | S_IWUSR);
1112  MODULE_PARM_DESC(debug, "Enable debugging output");
1113  
1114  MODULE_DEVICE_TABLE(pnp, nvt_ids);
1115  MODULE_DESCRIPTION("Nuvoton W83667HG-A & W83677HG-I CIR driver");
1116  
1117  MODULE_AUTHOR("Jarod Wilson <jarod@redhat.com>");
1118  MODULE_LICENSE("GPL");
1119  
1120  module_pnp_driver(nvt_driver);
1121