1*5823d089SAndy Shevchenko /* 2*5823d089SAndy Shevchenko * Intel MID Power Management Unit (PWRMU) device driver 3*5823d089SAndy Shevchenko * 4*5823d089SAndy Shevchenko * Copyright (C) 2016, Intel Corporation 5*5823d089SAndy Shevchenko * 6*5823d089SAndy Shevchenko * Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com> 7*5823d089SAndy Shevchenko * 8*5823d089SAndy Shevchenko * This program is free software; you can redistribute it and/or modify it 9*5823d089SAndy Shevchenko * under the terms and conditions of the GNU General Public License, 10*5823d089SAndy Shevchenko * version 2, as published by the Free Software Foundation. 11*5823d089SAndy Shevchenko * 12*5823d089SAndy Shevchenko * Intel MID Power Management Unit device driver handles the South Complex PCI 13*5823d089SAndy Shevchenko * devices such as GPDMA, SPI, I2C, PWM, and so on. By default PCI core 14*5823d089SAndy Shevchenko * modifies bits in PMCSR register in the PCI configuration space. This is not 15*5823d089SAndy Shevchenko * enough on some SoCs like Intel Tangier. In such case PCI core sets a new 16*5823d089SAndy Shevchenko * power state of the device in question through a PM hook registered in struct 17*5823d089SAndy Shevchenko * pci_platform_pm_ops (see drivers/pci/pci-mid.c). 18*5823d089SAndy Shevchenko */ 19*5823d089SAndy Shevchenko 20*5823d089SAndy Shevchenko #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 21*5823d089SAndy Shevchenko 22*5823d089SAndy Shevchenko #include <linux/delay.h> 23*5823d089SAndy Shevchenko #include <linux/errno.h> 24*5823d089SAndy Shevchenko #include <linux/init.h> 25*5823d089SAndy Shevchenko #include <linux/interrupt.h> 26*5823d089SAndy Shevchenko #include <linux/kernel.h> 27*5823d089SAndy Shevchenko #include <linux/module.h> 28*5823d089SAndy Shevchenko #include <linux/mutex.h> 29*5823d089SAndy Shevchenko #include <linux/pci.h> 30*5823d089SAndy Shevchenko 31*5823d089SAndy Shevchenko #include <asm/intel-mid.h> 32*5823d089SAndy Shevchenko 33*5823d089SAndy Shevchenko /* Registers */ 34*5823d089SAndy Shevchenko #define PM_STS 0x00 35*5823d089SAndy Shevchenko #define PM_CMD 0x04 36*5823d089SAndy Shevchenko #define PM_ICS 0x08 37*5823d089SAndy Shevchenko #define PM_WKC(x) (0x10 + (x) * 4) 38*5823d089SAndy Shevchenko #define PM_WKS(x) (0x18 + (x) * 4) 39*5823d089SAndy Shevchenko #define PM_SSC(x) (0x20 + (x) * 4) 40*5823d089SAndy Shevchenko #define PM_SSS(x) (0x30 + (x) * 4) 41*5823d089SAndy Shevchenko 42*5823d089SAndy Shevchenko /* Bits in PM_STS */ 43*5823d089SAndy Shevchenko #define PM_STS_BUSY (1 << 8) 44*5823d089SAndy Shevchenko 45*5823d089SAndy Shevchenko /* Bits in PM_CMD */ 46*5823d089SAndy Shevchenko #define PM_CMD_CMD(x) ((x) << 0) 47*5823d089SAndy Shevchenko #define PM_CMD_IOC (1 << 8) 48*5823d089SAndy Shevchenko #define PM_CMD_D3cold (1 << 21) 49*5823d089SAndy Shevchenko 50*5823d089SAndy Shevchenko /* List of commands */ 51*5823d089SAndy Shevchenko #define CMD_SET_CFG 0x01 52*5823d089SAndy Shevchenko 53*5823d089SAndy Shevchenko /* Bits in PM_ICS */ 54*5823d089SAndy Shevchenko #define PM_ICS_INT_STATUS(x) ((x) & 0xff) 55*5823d089SAndy Shevchenko #define PM_ICS_IE (1 << 8) 56*5823d089SAndy Shevchenko #define PM_ICS_IP (1 << 9) 57*5823d089SAndy Shevchenko #define PM_ICS_SW_INT_STS (1 << 10) 58*5823d089SAndy Shevchenko 59*5823d089SAndy Shevchenko /* List of interrupts */ 60*5823d089SAndy Shevchenko #define INT_INVALID 0 61*5823d089SAndy Shevchenko #define INT_CMD_COMPLETE 1 62*5823d089SAndy Shevchenko #define INT_CMD_ERR 2 63*5823d089SAndy Shevchenko #define INT_WAKE_EVENT 3 64*5823d089SAndy Shevchenko #define INT_LSS_POWER_ERR 4 65*5823d089SAndy Shevchenko #define INT_S0iX_MSG_ERR 5 66*5823d089SAndy Shevchenko #define INT_NO_C6 6 67*5823d089SAndy Shevchenko #define INT_TRIGGER_ERR 7 68*5823d089SAndy Shevchenko #define INT_INACTIVITY 8 69*5823d089SAndy Shevchenko 70*5823d089SAndy Shevchenko /* South Complex devices */ 71*5823d089SAndy Shevchenko #define LSS_MAX_SHARED_DEVS 4 72*5823d089SAndy Shevchenko #define LSS_MAX_DEVS 64 73*5823d089SAndy Shevchenko 74*5823d089SAndy Shevchenko #define LSS_WS_BITS 1 /* wake state width */ 75*5823d089SAndy Shevchenko #define LSS_PWS_BITS 2 /* power state width */ 76*5823d089SAndy Shevchenko 77*5823d089SAndy Shevchenko /* Supported device IDs */ 78*5823d089SAndy Shevchenko #define PCI_DEVICE_ID_TANGIER 0x11a1 79*5823d089SAndy Shevchenko 80*5823d089SAndy Shevchenko struct mid_pwr_dev { 81*5823d089SAndy Shevchenko struct pci_dev *pdev; 82*5823d089SAndy Shevchenko pci_power_t state; 83*5823d089SAndy Shevchenko }; 84*5823d089SAndy Shevchenko 85*5823d089SAndy Shevchenko struct mid_pwr { 86*5823d089SAndy Shevchenko struct device *dev; 87*5823d089SAndy Shevchenko void __iomem *regs; 88*5823d089SAndy Shevchenko int irq; 89*5823d089SAndy Shevchenko bool available; 90*5823d089SAndy Shevchenko 91*5823d089SAndy Shevchenko struct mutex lock; 92*5823d089SAndy Shevchenko struct mid_pwr_dev lss[LSS_MAX_DEVS][LSS_MAX_SHARED_DEVS]; 93*5823d089SAndy Shevchenko }; 94*5823d089SAndy Shevchenko 95*5823d089SAndy Shevchenko static struct mid_pwr *midpwr; 96*5823d089SAndy Shevchenko 97*5823d089SAndy Shevchenko static u32 mid_pwr_get_state(struct mid_pwr *pwr, int reg) 98*5823d089SAndy Shevchenko { 99*5823d089SAndy Shevchenko return readl(pwr->regs + PM_SSS(reg)); 100*5823d089SAndy Shevchenko } 101*5823d089SAndy Shevchenko 102*5823d089SAndy Shevchenko static void mid_pwr_set_state(struct mid_pwr *pwr, int reg, u32 value) 103*5823d089SAndy Shevchenko { 104*5823d089SAndy Shevchenko writel(value, pwr->regs + PM_SSC(reg)); 105*5823d089SAndy Shevchenko } 106*5823d089SAndy Shevchenko 107*5823d089SAndy Shevchenko static void mid_pwr_set_wake(struct mid_pwr *pwr, int reg, u32 value) 108*5823d089SAndy Shevchenko { 109*5823d089SAndy Shevchenko writel(value, pwr->regs + PM_WKC(reg)); 110*5823d089SAndy Shevchenko } 111*5823d089SAndy Shevchenko 112*5823d089SAndy Shevchenko static void mid_pwr_interrupt_disable(struct mid_pwr *pwr) 113*5823d089SAndy Shevchenko { 114*5823d089SAndy Shevchenko writel(~PM_ICS_IE, pwr->regs + PM_ICS); 115*5823d089SAndy Shevchenko } 116*5823d089SAndy Shevchenko 117*5823d089SAndy Shevchenko static bool mid_pwr_is_busy(struct mid_pwr *pwr) 118*5823d089SAndy Shevchenko { 119*5823d089SAndy Shevchenko return !!(readl(pwr->regs + PM_STS) & PM_STS_BUSY); 120*5823d089SAndy Shevchenko } 121*5823d089SAndy Shevchenko 122*5823d089SAndy Shevchenko /* Wait 500ms that the latest PWRMU command finished */ 123*5823d089SAndy Shevchenko static int mid_pwr_wait(struct mid_pwr *pwr) 124*5823d089SAndy Shevchenko { 125*5823d089SAndy Shevchenko unsigned int count = 500000; 126*5823d089SAndy Shevchenko bool busy; 127*5823d089SAndy Shevchenko 128*5823d089SAndy Shevchenko do { 129*5823d089SAndy Shevchenko busy = mid_pwr_is_busy(pwr); 130*5823d089SAndy Shevchenko if (!busy) 131*5823d089SAndy Shevchenko return 0; 132*5823d089SAndy Shevchenko udelay(1); 133*5823d089SAndy Shevchenko } while (--count); 134*5823d089SAndy Shevchenko 135*5823d089SAndy Shevchenko return -EBUSY; 136*5823d089SAndy Shevchenko } 137*5823d089SAndy Shevchenko 138*5823d089SAndy Shevchenko static int mid_pwr_wait_for_cmd(struct mid_pwr *pwr, u8 cmd) 139*5823d089SAndy Shevchenko { 140*5823d089SAndy Shevchenko writel(PM_CMD_CMD(cmd), pwr->regs + PM_CMD); 141*5823d089SAndy Shevchenko return mid_pwr_wait(pwr); 142*5823d089SAndy Shevchenko } 143*5823d089SAndy Shevchenko 144*5823d089SAndy Shevchenko static int __update_power_state(struct mid_pwr *pwr, int reg, int bit, int new) 145*5823d089SAndy Shevchenko { 146*5823d089SAndy Shevchenko int curstate; 147*5823d089SAndy Shevchenko u32 power; 148*5823d089SAndy Shevchenko int ret; 149*5823d089SAndy Shevchenko 150*5823d089SAndy Shevchenko /* Check if the device is already in desired state */ 151*5823d089SAndy Shevchenko power = mid_pwr_get_state(pwr, reg); 152*5823d089SAndy Shevchenko curstate = (power >> bit) & 3; 153*5823d089SAndy Shevchenko if (curstate == new) 154*5823d089SAndy Shevchenko return 0; 155*5823d089SAndy Shevchenko 156*5823d089SAndy Shevchenko /* Update the power state */ 157*5823d089SAndy Shevchenko mid_pwr_set_state(pwr, reg, (power & ~(3 << bit)) | (new << bit)); 158*5823d089SAndy Shevchenko 159*5823d089SAndy Shevchenko /* Send command to SCU */ 160*5823d089SAndy Shevchenko ret = mid_pwr_wait_for_cmd(pwr, CMD_SET_CFG); 161*5823d089SAndy Shevchenko if (ret) 162*5823d089SAndy Shevchenko return ret; 163*5823d089SAndy Shevchenko 164*5823d089SAndy Shevchenko /* Check if the device is already in desired state */ 165*5823d089SAndy Shevchenko power = mid_pwr_get_state(pwr, reg); 166*5823d089SAndy Shevchenko curstate = (power >> bit) & 3; 167*5823d089SAndy Shevchenko if (curstate != new) 168*5823d089SAndy Shevchenko return -EAGAIN; 169*5823d089SAndy Shevchenko 170*5823d089SAndy Shevchenko return 0; 171*5823d089SAndy Shevchenko } 172*5823d089SAndy Shevchenko 173*5823d089SAndy Shevchenko static pci_power_t __find_weakest_power_state(struct mid_pwr_dev *lss, 174*5823d089SAndy Shevchenko struct pci_dev *pdev, 175*5823d089SAndy Shevchenko pci_power_t state) 176*5823d089SAndy Shevchenko { 177*5823d089SAndy Shevchenko pci_power_t weakest = PCI_D3hot; 178*5823d089SAndy Shevchenko unsigned int j; 179*5823d089SAndy Shevchenko 180*5823d089SAndy Shevchenko /* Find device in cache or first free cell */ 181*5823d089SAndy Shevchenko for (j = 0; j < LSS_MAX_SHARED_DEVS; j++) { 182*5823d089SAndy Shevchenko if (lss[j].pdev == pdev || !lss[j].pdev) 183*5823d089SAndy Shevchenko break; 184*5823d089SAndy Shevchenko } 185*5823d089SAndy Shevchenko 186*5823d089SAndy Shevchenko /* Store the desired state in cache */ 187*5823d089SAndy Shevchenko if (j < LSS_MAX_SHARED_DEVS) { 188*5823d089SAndy Shevchenko lss[j].pdev = pdev; 189*5823d089SAndy Shevchenko lss[j].state = state; 190*5823d089SAndy Shevchenko } else { 191*5823d089SAndy Shevchenko dev_WARN(&pdev->dev, "No room for device in PWRMU LSS cache\n"); 192*5823d089SAndy Shevchenko weakest = state; 193*5823d089SAndy Shevchenko } 194*5823d089SAndy Shevchenko 195*5823d089SAndy Shevchenko /* Find the power state we may use */ 196*5823d089SAndy Shevchenko for (j = 0; j < LSS_MAX_SHARED_DEVS; j++) { 197*5823d089SAndy Shevchenko if (lss[j].state < weakest) 198*5823d089SAndy Shevchenko weakest = lss[j].state; 199*5823d089SAndy Shevchenko } 200*5823d089SAndy Shevchenko 201*5823d089SAndy Shevchenko return weakest; 202*5823d089SAndy Shevchenko } 203*5823d089SAndy Shevchenko 204*5823d089SAndy Shevchenko static int __set_power_state(struct mid_pwr *pwr, struct pci_dev *pdev, 205*5823d089SAndy Shevchenko pci_power_t state, int id, int reg, int bit) 206*5823d089SAndy Shevchenko { 207*5823d089SAndy Shevchenko const char *name; 208*5823d089SAndy Shevchenko int ret; 209*5823d089SAndy Shevchenko 210*5823d089SAndy Shevchenko state = __find_weakest_power_state(pwr->lss[id], pdev, state); 211*5823d089SAndy Shevchenko name = pci_power_name(state); 212*5823d089SAndy Shevchenko 213*5823d089SAndy Shevchenko ret = __update_power_state(pwr, reg, bit, (__force int)state); 214*5823d089SAndy Shevchenko if (ret) { 215*5823d089SAndy Shevchenko dev_warn(&pdev->dev, "Can't set power state %s: %d\n", name, ret); 216*5823d089SAndy Shevchenko return ret; 217*5823d089SAndy Shevchenko } 218*5823d089SAndy Shevchenko 219*5823d089SAndy Shevchenko dev_vdbg(&pdev->dev, "Set power state %s\n", name); 220*5823d089SAndy Shevchenko return 0; 221*5823d089SAndy Shevchenko } 222*5823d089SAndy Shevchenko 223*5823d089SAndy Shevchenko static int mid_pwr_set_power_state(struct mid_pwr *pwr, struct pci_dev *pdev, 224*5823d089SAndy Shevchenko pci_power_t state) 225*5823d089SAndy Shevchenko { 226*5823d089SAndy Shevchenko int id, reg, bit; 227*5823d089SAndy Shevchenko int ret; 228*5823d089SAndy Shevchenko 229*5823d089SAndy Shevchenko id = intel_mid_pwr_get_lss_id(pdev); 230*5823d089SAndy Shevchenko if (id < 0) 231*5823d089SAndy Shevchenko return id; 232*5823d089SAndy Shevchenko 233*5823d089SAndy Shevchenko reg = (id * LSS_PWS_BITS) / 32; 234*5823d089SAndy Shevchenko bit = (id * LSS_PWS_BITS) % 32; 235*5823d089SAndy Shevchenko 236*5823d089SAndy Shevchenko /* We support states between PCI_D0 and PCI_D3hot */ 237*5823d089SAndy Shevchenko if (state < PCI_D0) 238*5823d089SAndy Shevchenko state = PCI_D0; 239*5823d089SAndy Shevchenko if (state > PCI_D3hot) 240*5823d089SAndy Shevchenko state = PCI_D3hot; 241*5823d089SAndy Shevchenko 242*5823d089SAndy Shevchenko mutex_lock(&pwr->lock); 243*5823d089SAndy Shevchenko ret = __set_power_state(pwr, pdev, state, id, reg, bit); 244*5823d089SAndy Shevchenko mutex_unlock(&pwr->lock); 245*5823d089SAndy Shevchenko return ret; 246*5823d089SAndy Shevchenko } 247*5823d089SAndy Shevchenko 248*5823d089SAndy Shevchenko int intel_mid_pci_set_power_state(struct pci_dev *pdev, pci_power_t state) 249*5823d089SAndy Shevchenko { 250*5823d089SAndy Shevchenko struct mid_pwr *pwr = midpwr; 251*5823d089SAndy Shevchenko int ret = 0; 252*5823d089SAndy Shevchenko 253*5823d089SAndy Shevchenko might_sleep(); 254*5823d089SAndy Shevchenko 255*5823d089SAndy Shevchenko if (pwr && pwr->available) 256*5823d089SAndy Shevchenko ret = mid_pwr_set_power_state(pwr, pdev, state); 257*5823d089SAndy Shevchenko dev_vdbg(&pdev->dev, "set_power_state() returns %d\n", ret); 258*5823d089SAndy Shevchenko 259*5823d089SAndy Shevchenko return 0; 260*5823d089SAndy Shevchenko } 261*5823d089SAndy Shevchenko EXPORT_SYMBOL_GPL(intel_mid_pci_set_power_state); 262*5823d089SAndy Shevchenko 263*5823d089SAndy Shevchenko int intel_mid_pwr_get_lss_id(struct pci_dev *pdev) 264*5823d089SAndy Shevchenko { 265*5823d089SAndy Shevchenko int vndr; 266*5823d089SAndy Shevchenko u8 id; 267*5823d089SAndy Shevchenko 268*5823d089SAndy Shevchenko /* 269*5823d089SAndy Shevchenko * Mapping to PWRMU index is kept in the Logical SubSystem ID byte of 270*5823d089SAndy Shevchenko * Vendor capability. 271*5823d089SAndy Shevchenko */ 272*5823d089SAndy Shevchenko vndr = pci_find_capability(pdev, PCI_CAP_ID_VNDR); 273*5823d089SAndy Shevchenko if (!vndr) 274*5823d089SAndy Shevchenko return -EINVAL; 275*5823d089SAndy Shevchenko 276*5823d089SAndy Shevchenko /* Read the Logical SubSystem ID byte */ 277*5823d089SAndy Shevchenko pci_read_config_byte(pdev, vndr + INTEL_MID_PWR_LSS_OFFSET, &id); 278*5823d089SAndy Shevchenko if (!(id & INTEL_MID_PWR_LSS_TYPE)) 279*5823d089SAndy Shevchenko return -ENODEV; 280*5823d089SAndy Shevchenko 281*5823d089SAndy Shevchenko id &= ~INTEL_MID_PWR_LSS_TYPE; 282*5823d089SAndy Shevchenko if (id >= LSS_MAX_DEVS) 283*5823d089SAndy Shevchenko return -ERANGE; 284*5823d089SAndy Shevchenko 285*5823d089SAndy Shevchenko return id; 286*5823d089SAndy Shevchenko } 287*5823d089SAndy Shevchenko 288*5823d089SAndy Shevchenko static irqreturn_t mid_pwr_irq_handler(int irq, void *dev_id) 289*5823d089SAndy Shevchenko { 290*5823d089SAndy Shevchenko struct mid_pwr *pwr = dev_id; 291*5823d089SAndy Shevchenko u32 ics; 292*5823d089SAndy Shevchenko 293*5823d089SAndy Shevchenko ics = readl(pwr->regs + PM_ICS); 294*5823d089SAndy Shevchenko if (!(ics & PM_ICS_IP)) 295*5823d089SAndy Shevchenko return IRQ_NONE; 296*5823d089SAndy Shevchenko 297*5823d089SAndy Shevchenko writel(ics | PM_ICS_IP, pwr->regs + PM_ICS); 298*5823d089SAndy Shevchenko 299*5823d089SAndy Shevchenko dev_warn(pwr->dev, "Unexpected IRQ: %#x\n", PM_ICS_INT_STATUS(ics)); 300*5823d089SAndy Shevchenko return IRQ_HANDLED; 301*5823d089SAndy Shevchenko } 302*5823d089SAndy Shevchenko 303*5823d089SAndy Shevchenko struct mid_pwr_device_info { 304*5823d089SAndy Shevchenko int (*set_initial_state)(struct mid_pwr *pwr); 305*5823d089SAndy Shevchenko }; 306*5823d089SAndy Shevchenko 307*5823d089SAndy Shevchenko static int mid_pwr_probe(struct pci_dev *pdev, const struct pci_device_id *id) 308*5823d089SAndy Shevchenko { 309*5823d089SAndy Shevchenko struct mid_pwr_device_info *info = (void *)id->driver_data; 310*5823d089SAndy Shevchenko struct device *dev = &pdev->dev; 311*5823d089SAndy Shevchenko struct mid_pwr *pwr; 312*5823d089SAndy Shevchenko int ret; 313*5823d089SAndy Shevchenko 314*5823d089SAndy Shevchenko ret = pcim_enable_device(pdev); 315*5823d089SAndy Shevchenko if (ret < 0) { 316*5823d089SAndy Shevchenko dev_err(&pdev->dev, "error: could not enable device\n"); 317*5823d089SAndy Shevchenko return ret; 318*5823d089SAndy Shevchenko } 319*5823d089SAndy Shevchenko 320*5823d089SAndy Shevchenko ret = pcim_iomap_regions(pdev, 1 << 0, pci_name(pdev)); 321*5823d089SAndy Shevchenko if (ret) { 322*5823d089SAndy Shevchenko dev_err(&pdev->dev, "I/O memory remapping failed\n"); 323*5823d089SAndy Shevchenko return ret; 324*5823d089SAndy Shevchenko } 325*5823d089SAndy Shevchenko 326*5823d089SAndy Shevchenko pwr = devm_kzalloc(dev, sizeof(*pwr), GFP_KERNEL); 327*5823d089SAndy Shevchenko if (!pwr) 328*5823d089SAndy Shevchenko return -ENOMEM; 329*5823d089SAndy Shevchenko 330*5823d089SAndy Shevchenko pwr->dev = dev; 331*5823d089SAndy Shevchenko pwr->regs = pcim_iomap_table(pdev)[0]; 332*5823d089SAndy Shevchenko pwr->irq = pdev->irq; 333*5823d089SAndy Shevchenko 334*5823d089SAndy Shevchenko mutex_init(&pwr->lock); 335*5823d089SAndy Shevchenko 336*5823d089SAndy Shevchenko /* Disable interrupts */ 337*5823d089SAndy Shevchenko mid_pwr_interrupt_disable(pwr); 338*5823d089SAndy Shevchenko 339*5823d089SAndy Shevchenko if (info && info->set_initial_state) { 340*5823d089SAndy Shevchenko ret = info->set_initial_state(pwr); 341*5823d089SAndy Shevchenko if (ret) 342*5823d089SAndy Shevchenko dev_warn(dev, "Can't set initial state: %d\n", ret); 343*5823d089SAndy Shevchenko } 344*5823d089SAndy Shevchenko 345*5823d089SAndy Shevchenko ret = devm_request_irq(dev, pdev->irq, mid_pwr_irq_handler, 346*5823d089SAndy Shevchenko IRQF_NO_SUSPEND, pci_name(pdev), pwr); 347*5823d089SAndy Shevchenko if (ret) 348*5823d089SAndy Shevchenko return ret; 349*5823d089SAndy Shevchenko 350*5823d089SAndy Shevchenko pwr->available = true; 351*5823d089SAndy Shevchenko midpwr = pwr; 352*5823d089SAndy Shevchenko 353*5823d089SAndy Shevchenko pci_set_drvdata(pdev, pwr); 354*5823d089SAndy Shevchenko return 0; 355*5823d089SAndy Shevchenko } 356*5823d089SAndy Shevchenko 357*5823d089SAndy Shevchenko static int tng_set_initial_state(struct mid_pwr *pwr) 358*5823d089SAndy Shevchenko { 359*5823d089SAndy Shevchenko unsigned int i, j; 360*5823d089SAndy Shevchenko int ret; 361*5823d089SAndy Shevchenko 362*5823d089SAndy Shevchenko /* 363*5823d089SAndy Shevchenko * Enable wake events. 364*5823d089SAndy Shevchenko * 365*5823d089SAndy Shevchenko * PWRMU supports up to 32 sources for wake up the system. Ungate them 366*5823d089SAndy Shevchenko * all here. 367*5823d089SAndy Shevchenko */ 368*5823d089SAndy Shevchenko mid_pwr_set_wake(pwr, 0, 0xffffffff); 369*5823d089SAndy Shevchenko mid_pwr_set_wake(pwr, 1, 0xffffffff); 370*5823d089SAndy Shevchenko 371*5823d089SAndy Shevchenko /* 372*5823d089SAndy Shevchenko * Power off South Complex devices. 373*5823d089SAndy Shevchenko * 374*5823d089SAndy Shevchenko * There is a map (see a note below) of 64 devices with 2 bits per each 375*5823d089SAndy Shevchenko * on 32-bit HW registers. The following calls set all devices to one 376*5823d089SAndy Shevchenko * known initial state, i.e. PCI_D3hot. This is done in conjunction 377*5823d089SAndy Shevchenko * with PMCSR setting in arch/x86/pci/intel_mid_pci.c. 378*5823d089SAndy Shevchenko * 379*5823d089SAndy Shevchenko * NOTE: The actual device mapping is provided by a platform at run 380*5823d089SAndy Shevchenko * time using vendor capability of PCI configuration space. 381*5823d089SAndy Shevchenko */ 382*5823d089SAndy Shevchenko mid_pwr_set_state(pwr, 0, 0xffffffff); 383*5823d089SAndy Shevchenko mid_pwr_set_state(pwr, 1, 0xffffffff); 384*5823d089SAndy Shevchenko mid_pwr_set_state(pwr, 2, 0xffffffff); 385*5823d089SAndy Shevchenko mid_pwr_set_state(pwr, 3, 0xffffffff); 386*5823d089SAndy Shevchenko 387*5823d089SAndy Shevchenko /* Send command to SCU */ 388*5823d089SAndy Shevchenko ret = mid_pwr_wait_for_cmd(pwr, CMD_SET_CFG); 389*5823d089SAndy Shevchenko if (ret) 390*5823d089SAndy Shevchenko return ret; 391*5823d089SAndy Shevchenko 392*5823d089SAndy Shevchenko for (i = 0; i < LSS_MAX_DEVS; i++) { 393*5823d089SAndy Shevchenko for (j = 0; j < LSS_MAX_SHARED_DEVS; j++) 394*5823d089SAndy Shevchenko pwr->lss[i][j].state = PCI_D3hot; 395*5823d089SAndy Shevchenko } 396*5823d089SAndy Shevchenko 397*5823d089SAndy Shevchenko return 0; 398*5823d089SAndy Shevchenko } 399*5823d089SAndy Shevchenko 400*5823d089SAndy Shevchenko static const struct mid_pwr_device_info tng_info = { 401*5823d089SAndy Shevchenko .set_initial_state = tng_set_initial_state, 402*5823d089SAndy Shevchenko }; 403*5823d089SAndy Shevchenko 404*5823d089SAndy Shevchenko static const struct pci_device_id mid_pwr_pci_ids[] = { 405*5823d089SAndy Shevchenko { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_TANGIER), (kernel_ulong_t)&tng_info }, 406*5823d089SAndy Shevchenko {} 407*5823d089SAndy Shevchenko }; 408*5823d089SAndy Shevchenko MODULE_DEVICE_TABLE(pci, mid_pwr_pci_ids); 409*5823d089SAndy Shevchenko 410*5823d089SAndy Shevchenko static struct pci_driver mid_pwr_pci_driver = { 411*5823d089SAndy Shevchenko .name = "intel_mid_pwr", 412*5823d089SAndy Shevchenko .probe = mid_pwr_probe, 413*5823d089SAndy Shevchenko .id_table = mid_pwr_pci_ids, 414*5823d089SAndy Shevchenko }; 415*5823d089SAndy Shevchenko 416*5823d089SAndy Shevchenko builtin_pci_driver(mid_pwr_pci_driver); 417