1*06533002SEric Biggers // SPDX-License-Identifier: GPL-2.0-only 2*06533002SEric Biggers /* Copyright (c) 2015, The Linux Foundation. All rights reserved. 3*06533002SEric Biggers */ 4*06533002SEric Biggers 5*06533002SEric Biggers #include <linux/delay.h> 6*06533002SEric Biggers #include <linux/highmem.h> 7*06533002SEric Biggers #include <linux/io.h> 8*06533002SEric Biggers #include <linux/iopoll.h> 9*06533002SEric Biggers #include <linux/module.h> 10*06533002SEric Biggers #include <linux/dma-mapping.h> 11*06533002SEric Biggers #include <linux/slab.h> 12*06533002SEric Biggers #include <linux/scatterlist.h> 13*06533002SEric Biggers #include <linux/platform_device.h> 14*06533002SEric Biggers #include <linux/ktime.h> 15*06533002SEric Biggers 16*06533002SEric Biggers #include <linux/mmc/mmc.h> 17*06533002SEric Biggers #include <linux/mmc/host.h> 18*06533002SEric Biggers #include <linux/mmc/card.h> 19*06533002SEric Biggers 20*06533002SEric Biggers #include "cqhci.h" 21*06533002SEric Biggers 22*06533002SEric Biggers #define DCMD_SLOT 31 23*06533002SEric Biggers #define NUM_SLOTS 32 24*06533002SEric Biggers 25*06533002SEric Biggers struct cqhci_slot { 26*06533002SEric Biggers struct mmc_request *mrq; 27*06533002SEric Biggers unsigned int flags; 28*06533002SEric Biggers #define CQHCI_EXTERNAL_TIMEOUT BIT(0) 29*06533002SEric Biggers #define CQHCI_COMPLETED BIT(1) 30*06533002SEric Biggers #define CQHCI_HOST_CRC BIT(2) 31*06533002SEric Biggers #define CQHCI_HOST_TIMEOUT BIT(3) 32*06533002SEric Biggers #define CQHCI_HOST_OTHER BIT(4) 33*06533002SEric Biggers }; 34*06533002SEric Biggers 35*06533002SEric Biggers static inline u8 *get_desc(struct cqhci_host *cq_host, u8 tag) 36*06533002SEric Biggers { 37*06533002SEric Biggers return cq_host->desc_base + (tag * cq_host->slot_sz); 38*06533002SEric Biggers } 39*06533002SEric Biggers 40*06533002SEric Biggers static inline u8 *get_link_desc(struct cqhci_host *cq_host, u8 tag) 41*06533002SEric Biggers { 42*06533002SEric Biggers u8 *desc = get_desc(cq_host, tag); 43*06533002SEric Biggers 44*06533002SEric Biggers return desc + cq_host->task_desc_len; 45*06533002SEric Biggers } 46*06533002SEric Biggers 47*06533002SEric Biggers static inline dma_addr_t get_trans_desc_dma(struct cqhci_host *cq_host, u8 tag) 48*06533002SEric Biggers { 49*06533002SEric Biggers return cq_host->trans_desc_dma_base + 50*06533002SEric Biggers (cq_host->mmc->max_segs * tag * 51*06533002SEric Biggers cq_host->trans_desc_len); 52*06533002SEric Biggers } 53*06533002SEric Biggers 54*06533002SEric Biggers static inline u8 *get_trans_desc(struct cqhci_host *cq_host, u8 tag) 55*06533002SEric Biggers { 56*06533002SEric Biggers return cq_host->trans_desc_base + 57*06533002SEric Biggers (cq_host->trans_desc_len * cq_host->mmc->max_segs * tag); 58*06533002SEric Biggers } 59*06533002SEric Biggers 60*06533002SEric Biggers static void setup_trans_desc(struct cqhci_host *cq_host, u8 tag) 61*06533002SEric Biggers { 62*06533002SEric Biggers u8 *link_temp; 63*06533002SEric Biggers dma_addr_t trans_temp; 64*06533002SEric Biggers 65*06533002SEric Biggers link_temp = get_link_desc(cq_host, tag); 66*06533002SEric Biggers trans_temp = get_trans_desc_dma(cq_host, tag); 67*06533002SEric Biggers 68*06533002SEric Biggers memset(link_temp, 0, cq_host->link_desc_len); 69*06533002SEric Biggers if (cq_host->link_desc_len > 8) 70*06533002SEric Biggers *(link_temp + 8) = 0; 71*06533002SEric Biggers 72*06533002SEric Biggers if (tag == DCMD_SLOT && (cq_host->mmc->caps2 & MMC_CAP2_CQE_DCMD)) { 73*06533002SEric Biggers *link_temp = CQHCI_VALID(0) | CQHCI_ACT(0) | CQHCI_END(1); 74*06533002SEric Biggers return; 75*06533002SEric Biggers } 76*06533002SEric Biggers 77*06533002SEric Biggers *link_temp = CQHCI_VALID(1) | CQHCI_ACT(0x6) | CQHCI_END(0); 78*06533002SEric Biggers 79*06533002SEric Biggers if (cq_host->dma64) { 80*06533002SEric Biggers __le64 *data_addr = (__le64 __force *)(link_temp + 4); 81*06533002SEric Biggers 82*06533002SEric Biggers data_addr[0] = cpu_to_le64(trans_temp); 83*06533002SEric Biggers } else { 84*06533002SEric Biggers __le32 *data_addr = (__le32 __force *)(link_temp + 4); 85*06533002SEric Biggers 86*06533002SEric Biggers data_addr[0] = cpu_to_le32(trans_temp); 87*06533002SEric Biggers } 88*06533002SEric Biggers } 89*06533002SEric Biggers 90*06533002SEric Biggers static void cqhci_set_irqs(struct cqhci_host *cq_host, u32 set) 91*06533002SEric Biggers { 92*06533002SEric Biggers cqhci_writel(cq_host, set, CQHCI_ISTE); 93*06533002SEric Biggers cqhci_writel(cq_host, set, CQHCI_ISGE); 94*06533002SEric Biggers } 95*06533002SEric Biggers 96*06533002SEric Biggers #define DRV_NAME "cqhci" 97*06533002SEric Biggers 98*06533002SEric Biggers #define CQHCI_DUMP(f, x...) \ 99*06533002SEric Biggers pr_err("%s: " DRV_NAME ": " f, mmc_hostname(mmc), ## x) 100*06533002SEric Biggers 101*06533002SEric Biggers static void cqhci_dumpregs(struct cqhci_host *cq_host) 102*06533002SEric Biggers { 103*06533002SEric Biggers struct mmc_host *mmc = cq_host->mmc; 104*06533002SEric Biggers 105*06533002SEric Biggers CQHCI_DUMP("============ CQHCI REGISTER DUMP ===========\n"); 106*06533002SEric Biggers 107*06533002SEric Biggers CQHCI_DUMP("Caps: 0x%08x | Version: 0x%08x\n", 108*06533002SEric Biggers cqhci_readl(cq_host, CQHCI_CAP), 109*06533002SEric Biggers cqhci_readl(cq_host, CQHCI_VER)); 110*06533002SEric Biggers CQHCI_DUMP("Config: 0x%08x | Control: 0x%08x\n", 111*06533002SEric Biggers cqhci_readl(cq_host, CQHCI_CFG), 112*06533002SEric Biggers cqhci_readl(cq_host, CQHCI_CTL)); 113*06533002SEric Biggers CQHCI_DUMP("Int stat: 0x%08x | Int enab: 0x%08x\n", 114*06533002SEric Biggers cqhci_readl(cq_host, CQHCI_IS), 115*06533002SEric Biggers cqhci_readl(cq_host, CQHCI_ISTE)); 116*06533002SEric Biggers CQHCI_DUMP("Int sig: 0x%08x | Int Coal: 0x%08x\n", 117*06533002SEric Biggers cqhci_readl(cq_host, CQHCI_ISGE), 118*06533002SEric Biggers cqhci_readl(cq_host, CQHCI_IC)); 119*06533002SEric Biggers CQHCI_DUMP("TDL base: 0x%08x | TDL up32: 0x%08x\n", 120*06533002SEric Biggers cqhci_readl(cq_host, CQHCI_TDLBA), 121*06533002SEric Biggers cqhci_readl(cq_host, CQHCI_TDLBAU)); 122*06533002SEric Biggers CQHCI_DUMP("Doorbell: 0x%08x | TCN: 0x%08x\n", 123*06533002SEric Biggers cqhci_readl(cq_host, CQHCI_TDBR), 124*06533002SEric Biggers cqhci_readl(cq_host, CQHCI_TCN)); 125*06533002SEric Biggers CQHCI_DUMP("Dev queue: 0x%08x | Dev Pend: 0x%08x\n", 126*06533002SEric Biggers cqhci_readl(cq_host, CQHCI_DQS), 127*06533002SEric Biggers cqhci_readl(cq_host, CQHCI_DPT)); 128*06533002SEric Biggers CQHCI_DUMP("Task clr: 0x%08x | SSC1: 0x%08x\n", 129*06533002SEric Biggers cqhci_readl(cq_host, CQHCI_TCLR), 130*06533002SEric Biggers cqhci_readl(cq_host, CQHCI_SSC1)); 131*06533002SEric Biggers CQHCI_DUMP("SSC2: 0x%08x | DCMD rsp: 0x%08x\n", 132*06533002SEric Biggers cqhci_readl(cq_host, CQHCI_SSC2), 133*06533002SEric Biggers cqhci_readl(cq_host, CQHCI_CRDCT)); 134*06533002SEric Biggers CQHCI_DUMP("RED mask: 0x%08x | TERRI: 0x%08x\n", 135*06533002SEric Biggers cqhci_readl(cq_host, CQHCI_RMEM), 136*06533002SEric Biggers cqhci_readl(cq_host, CQHCI_TERRI)); 137*06533002SEric Biggers CQHCI_DUMP("Resp idx: 0x%08x | Resp arg: 0x%08x\n", 138*06533002SEric Biggers cqhci_readl(cq_host, CQHCI_CRI), 139*06533002SEric Biggers cqhci_readl(cq_host, CQHCI_CRA)); 140*06533002SEric Biggers 141*06533002SEric Biggers if (cq_host->ops->dumpregs) 142*06533002SEric Biggers cq_host->ops->dumpregs(mmc); 143*06533002SEric Biggers else 144*06533002SEric Biggers CQHCI_DUMP(": ===========================================\n"); 145*06533002SEric Biggers } 146*06533002SEric Biggers 147*06533002SEric Biggers /* 148*06533002SEric Biggers * The allocated descriptor table for task, link & transfer descritors 149*06533002SEric Biggers * looks like: 150*06533002SEric Biggers * |----------| 151*06533002SEric Biggers * |task desc | |->|----------| 152*06533002SEric Biggers * |----------| | |trans desc| 153*06533002SEric Biggers * |link desc-|->| |----------| 154*06533002SEric Biggers * |----------| . 155*06533002SEric Biggers * . . 156*06533002SEric Biggers * no. of slots max-segs 157*06533002SEric Biggers * . |----------| 158*06533002SEric Biggers * |----------| 159*06533002SEric Biggers * The idea here is to create the [task+trans] table and mark & point the 160*06533002SEric Biggers * link desc to the transfer desc table on a per slot basis. 161*06533002SEric Biggers */ 162*06533002SEric Biggers static int cqhci_host_alloc_tdl(struct cqhci_host *cq_host) 163*06533002SEric Biggers { 164*06533002SEric Biggers int i = 0; 165*06533002SEric Biggers 166*06533002SEric Biggers /* task descriptor can be 64/128 bit irrespective of arch */ 167*06533002SEric Biggers if (cq_host->caps & CQHCI_TASK_DESC_SZ_128) { 168*06533002SEric Biggers cqhci_writel(cq_host, cqhci_readl(cq_host, CQHCI_CFG) | 169*06533002SEric Biggers CQHCI_TASK_DESC_SZ, CQHCI_CFG); 170*06533002SEric Biggers cq_host->task_desc_len = 16; 171*06533002SEric Biggers } else { 172*06533002SEric Biggers cq_host->task_desc_len = 8; 173*06533002SEric Biggers } 174*06533002SEric Biggers 175*06533002SEric Biggers /* 176*06533002SEric Biggers * 96 bits length of transfer desc instead of 128 bits which means 177*06533002SEric Biggers * ADMA would expect next valid descriptor at the 96th bit 178*06533002SEric Biggers * or 128th bit 179*06533002SEric Biggers */ 180*06533002SEric Biggers if (cq_host->dma64) { 181*06533002SEric Biggers if (cq_host->quirks & CQHCI_QUIRK_SHORT_TXFR_DESC_SZ) 182*06533002SEric Biggers cq_host->trans_desc_len = 12; 183*06533002SEric Biggers else 184*06533002SEric Biggers cq_host->trans_desc_len = 16; 185*06533002SEric Biggers cq_host->link_desc_len = 16; 186*06533002SEric Biggers } else { 187*06533002SEric Biggers cq_host->trans_desc_len = 8; 188*06533002SEric Biggers cq_host->link_desc_len = 8; 189*06533002SEric Biggers } 190*06533002SEric Biggers 191*06533002SEric Biggers /* total size of a slot: 1 task & 1 transfer (link) */ 192*06533002SEric Biggers cq_host->slot_sz = cq_host->task_desc_len + cq_host->link_desc_len; 193*06533002SEric Biggers 194*06533002SEric Biggers cq_host->desc_size = cq_host->slot_sz * cq_host->num_slots; 195*06533002SEric Biggers 196*06533002SEric Biggers cq_host->data_size = cq_host->trans_desc_len * cq_host->mmc->max_segs * 197*06533002SEric Biggers cq_host->mmc->cqe_qdepth; 198*06533002SEric Biggers 199*06533002SEric Biggers pr_debug("%s: cqhci: desc_size: %zu data_sz: %zu slot-sz: %d\n", 200*06533002SEric Biggers mmc_hostname(cq_host->mmc), cq_host->desc_size, cq_host->data_size, 201*06533002SEric Biggers cq_host->slot_sz); 202*06533002SEric Biggers 203*06533002SEric Biggers /* 204*06533002SEric Biggers * allocate a dma-mapped chunk of memory for the descriptors 205*06533002SEric Biggers * allocate a dma-mapped chunk of memory for link descriptors 206*06533002SEric Biggers * setup each link-desc memory offset per slot-number to 207*06533002SEric Biggers * the descriptor table. 208*06533002SEric Biggers */ 209*06533002SEric Biggers cq_host->desc_base = dmam_alloc_coherent(mmc_dev(cq_host->mmc), 210*06533002SEric Biggers cq_host->desc_size, 211*06533002SEric Biggers &cq_host->desc_dma_base, 212*06533002SEric Biggers GFP_KERNEL); 213*06533002SEric Biggers if (!cq_host->desc_base) 214*06533002SEric Biggers return -ENOMEM; 215*06533002SEric Biggers 216*06533002SEric Biggers cq_host->trans_desc_base = dmam_alloc_coherent(mmc_dev(cq_host->mmc), 217*06533002SEric Biggers cq_host->data_size, 218*06533002SEric Biggers &cq_host->trans_desc_dma_base, 219*06533002SEric Biggers GFP_KERNEL); 220*06533002SEric Biggers if (!cq_host->trans_desc_base) { 221*06533002SEric Biggers dmam_free_coherent(mmc_dev(cq_host->mmc), cq_host->desc_size, 222*06533002SEric Biggers cq_host->desc_base, 223*06533002SEric Biggers cq_host->desc_dma_base); 224*06533002SEric Biggers cq_host->desc_base = NULL; 225*06533002SEric Biggers cq_host->desc_dma_base = 0; 226*06533002SEric Biggers return -ENOMEM; 227*06533002SEric Biggers } 228*06533002SEric Biggers 229*06533002SEric Biggers pr_debug("%s: cqhci: desc-base: 0x%p trans-base: 0x%p\n desc_dma 0x%llx trans_dma: 0x%llx\n", 230*06533002SEric Biggers mmc_hostname(cq_host->mmc), cq_host->desc_base, cq_host->trans_desc_base, 231*06533002SEric Biggers (unsigned long long)cq_host->desc_dma_base, 232*06533002SEric Biggers (unsigned long long)cq_host->trans_desc_dma_base); 233*06533002SEric Biggers 234*06533002SEric Biggers for (; i < (cq_host->num_slots); i++) 235*06533002SEric Biggers setup_trans_desc(cq_host, i); 236*06533002SEric Biggers 237*06533002SEric Biggers return 0; 238*06533002SEric Biggers } 239*06533002SEric Biggers 240*06533002SEric Biggers static void __cqhci_enable(struct cqhci_host *cq_host) 241*06533002SEric Biggers { 242*06533002SEric Biggers struct mmc_host *mmc = cq_host->mmc; 243*06533002SEric Biggers u32 cqcfg; 244*06533002SEric Biggers 245*06533002SEric Biggers cqcfg = cqhci_readl(cq_host, CQHCI_CFG); 246*06533002SEric Biggers 247*06533002SEric Biggers /* Configuration must not be changed while enabled */ 248*06533002SEric Biggers if (cqcfg & CQHCI_ENABLE) { 249*06533002SEric Biggers cqcfg &= ~CQHCI_ENABLE; 250*06533002SEric Biggers cqhci_writel(cq_host, cqcfg, CQHCI_CFG); 251*06533002SEric Biggers } 252*06533002SEric Biggers 253*06533002SEric Biggers cqcfg &= ~(CQHCI_DCMD | CQHCI_TASK_DESC_SZ); 254*06533002SEric Biggers 255*06533002SEric Biggers if (mmc->caps2 & MMC_CAP2_CQE_DCMD) 256*06533002SEric Biggers cqcfg |= CQHCI_DCMD; 257*06533002SEric Biggers 258*06533002SEric Biggers if (cq_host->caps & CQHCI_TASK_DESC_SZ_128) 259*06533002SEric Biggers cqcfg |= CQHCI_TASK_DESC_SZ; 260*06533002SEric Biggers 261*06533002SEric Biggers cqhci_writel(cq_host, cqcfg, CQHCI_CFG); 262*06533002SEric Biggers 263*06533002SEric Biggers cqhci_writel(cq_host, lower_32_bits(cq_host->desc_dma_base), 264*06533002SEric Biggers CQHCI_TDLBA); 265*06533002SEric Biggers cqhci_writel(cq_host, upper_32_bits(cq_host->desc_dma_base), 266*06533002SEric Biggers CQHCI_TDLBAU); 267*06533002SEric Biggers 268*06533002SEric Biggers cqhci_writel(cq_host, cq_host->rca, CQHCI_SSC2); 269*06533002SEric Biggers 270*06533002SEric Biggers cqhci_set_irqs(cq_host, 0); 271*06533002SEric Biggers 272*06533002SEric Biggers cqcfg |= CQHCI_ENABLE; 273*06533002SEric Biggers 274*06533002SEric Biggers cqhci_writel(cq_host, cqcfg, CQHCI_CFG); 275*06533002SEric Biggers 276*06533002SEric Biggers mmc->cqe_on = true; 277*06533002SEric Biggers 278*06533002SEric Biggers if (cq_host->ops->enable) 279*06533002SEric Biggers cq_host->ops->enable(mmc); 280*06533002SEric Biggers 281*06533002SEric Biggers /* Ensure all writes are done before interrupts are enabled */ 282*06533002SEric Biggers wmb(); 283*06533002SEric Biggers 284*06533002SEric Biggers cqhci_set_irqs(cq_host, CQHCI_IS_MASK); 285*06533002SEric Biggers 286*06533002SEric Biggers cq_host->activated = true; 287*06533002SEric Biggers } 288*06533002SEric Biggers 289*06533002SEric Biggers static void __cqhci_disable(struct cqhci_host *cq_host) 290*06533002SEric Biggers { 291*06533002SEric Biggers u32 cqcfg; 292*06533002SEric Biggers 293*06533002SEric Biggers cqcfg = cqhci_readl(cq_host, CQHCI_CFG); 294*06533002SEric Biggers cqcfg &= ~CQHCI_ENABLE; 295*06533002SEric Biggers cqhci_writel(cq_host, cqcfg, CQHCI_CFG); 296*06533002SEric Biggers 297*06533002SEric Biggers cq_host->mmc->cqe_on = false; 298*06533002SEric Biggers 299*06533002SEric Biggers cq_host->activated = false; 300*06533002SEric Biggers } 301*06533002SEric Biggers 302*06533002SEric Biggers int cqhci_deactivate(struct mmc_host *mmc) 303*06533002SEric Biggers { 304*06533002SEric Biggers struct cqhci_host *cq_host = mmc->cqe_private; 305*06533002SEric Biggers 306*06533002SEric Biggers if (cq_host->enabled && cq_host->activated) 307*06533002SEric Biggers __cqhci_disable(cq_host); 308*06533002SEric Biggers 309*06533002SEric Biggers return 0; 310*06533002SEric Biggers } 311*06533002SEric Biggers EXPORT_SYMBOL(cqhci_deactivate); 312*06533002SEric Biggers 313*06533002SEric Biggers int cqhci_resume(struct mmc_host *mmc) 314*06533002SEric Biggers { 315*06533002SEric Biggers /* Re-enable is done upon first request */ 316*06533002SEric Biggers return 0; 317*06533002SEric Biggers } 318*06533002SEric Biggers EXPORT_SYMBOL(cqhci_resume); 319*06533002SEric Biggers 320*06533002SEric Biggers static int cqhci_enable(struct mmc_host *mmc, struct mmc_card *card) 321*06533002SEric Biggers { 322*06533002SEric Biggers struct cqhci_host *cq_host = mmc->cqe_private; 323*06533002SEric Biggers int err; 324*06533002SEric Biggers 325*06533002SEric Biggers if (!card->ext_csd.cmdq_en) 326*06533002SEric Biggers return -EINVAL; 327*06533002SEric Biggers 328*06533002SEric Biggers if (cq_host->enabled) 329*06533002SEric Biggers return 0; 330*06533002SEric Biggers 331*06533002SEric Biggers cq_host->rca = card->rca; 332*06533002SEric Biggers 333*06533002SEric Biggers err = cqhci_host_alloc_tdl(cq_host); 334*06533002SEric Biggers if (err) { 335*06533002SEric Biggers pr_err("%s: Failed to enable CQE, error %d\n", 336*06533002SEric Biggers mmc_hostname(mmc), err); 337*06533002SEric Biggers return err; 338*06533002SEric Biggers } 339*06533002SEric Biggers 340*06533002SEric Biggers __cqhci_enable(cq_host); 341*06533002SEric Biggers 342*06533002SEric Biggers cq_host->enabled = true; 343*06533002SEric Biggers 344*06533002SEric Biggers #ifdef DEBUG 345*06533002SEric Biggers cqhci_dumpregs(cq_host); 346*06533002SEric Biggers #endif 347*06533002SEric Biggers return 0; 348*06533002SEric Biggers } 349*06533002SEric Biggers 350*06533002SEric Biggers /* CQHCI is idle and should halt immediately, so set a small timeout */ 351*06533002SEric Biggers #define CQHCI_OFF_TIMEOUT 100 352*06533002SEric Biggers 353*06533002SEric Biggers static u32 cqhci_read_ctl(struct cqhci_host *cq_host) 354*06533002SEric Biggers { 355*06533002SEric Biggers return cqhci_readl(cq_host, CQHCI_CTL); 356*06533002SEric Biggers } 357*06533002SEric Biggers 358*06533002SEric Biggers static void cqhci_off(struct mmc_host *mmc) 359*06533002SEric Biggers { 360*06533002SEric Biggers struct cqhci_host *cq_host = mmc->cqe_private; 361*06533002SEric Biggers u32 reg; 362*06533002SEric Biggers int err; 363*06533002SEric Biggers 364*06533002SEric Biggers if (!cq_host->enabled || !mmc->cqe_on || cq_host->recovery_halt) 365*06533002SEric Biggers return; 366*06533002SEric Biggers 367*06533002SEric Biggers if (cq_host->ops->disable) 368*06533002SEric Biggers cq_host->ops->disable(mmc, false); 369*06533002SEric Biggers 370*06533002SEric Biggers cqhci_writel(cq_host, CQHCI_HALT, CQHCI_CTL); 371*06533002SEric Biggers 372*06533002SEric Biggers err = readx_poll_timeout(cqhci_read_ctl, cq_host, reg, 373*06533002SEric Biggers reg & CQHCI_HALT, 0, CQHCI_OFF_TIMEOUT); 374*06533002SEric Biggers if (err < 0) 375*06533002SEric Biggers pr_err("%s: cqhci: CQE stuck on\n", mmc_hostname(mmc)); 376*06533002SEric Biggers else 377*06533002SEric Biggers pr_debug("%s: cqhci: CQE off\n", mmc_hostname(mmc)); 378*06533002SEric Biggers 379*06533002SEric Biggers if (cq_host->ops->post_disable) 380*06533002SEric Biggers cq_host->ops->post_disable(mmc); 381*06533002SEric Biggers 382*06533002SEric Biggers mmc->cqe_on = false; 383*06533002SEric Biggers } 384*06533002SEric Biggers 385*06533002SEric Biggers static void cqhci_disable(struct mmc_host *mmc) 386*06533002SEric Biggers { 387*06533002SEric Biggers struct cqhci_host *cq_host = mmc->cqe_private; 388*06533002SEric Biggers 389*06533002SEric Biggers if (!cq_host->enabled) 390*06533002SEric Biggers return; 391*06533002SEric Biggers 392*06533002SEric Biggers cqhci_off(mmc); 393*06533002SEric Biggers 394*06533002SEric Biggers __cqhci_disable(cq_host); 395*06533002SEric Biggers 396*06533002SEric Biggers dmam_free_coherent(mmc_dev(mmc), cq_host->data_size, 397*06533002SEric Biggers cq_host->trans_desc_base, 398*06533002SEric Biggers cq_host->trans_desc_dma_base); 399*06533002SEric Biggers 400*06533002SEric Biggers dmam_free_coherent(mmc_dev(mmc), cq_host->desc_size, 401*06533002SEric Biggers cq_host->desc_base, 402*06533002SEric Biggers cq_host->desc_dma_base); 403*06533002SEric Biggers 404*06533002SEric Biggers cq_host->trans_desc_base = NULL; 405*06533002SEric Biggers cq_host->desc_base = NULL; 406*06533002SEric Biggers 407*06533002SEric Biggers cq_host->enabled = false; 408*06533002SEric Biggers } 409*06533002SEric Biggers 410*06533002SEric Biggers static void cqhci_prep_task_desc(struct mmc_request *mrq, 411*06533002SEric Biggers u64 *data, bool intr) 412*06533002SEric Biggers { 413*06533002SEric Biggers u32 req_flags = mrq->data->flags; 414*06533002SEric Biggers 415*06533002SEric Biggers *data = CQHCI_VALID(1) | 416*06533002SEric Biggers CQHCI_END(1) | 417*06533002SEric Biggers CQHCI_INT(intr) | 418*06533002SEric Biggers CQHCI_ACT(0x5) | 419*06533002SEric Biggers CQHCI_FORCED_PROG(!!(req_flags & MMC_DATA_FORCED_PRG)) | 420*06533002SEric Biggers CQHCI_DATA_TAG(!!(req_flags & MMC_DATA_DAT_TAG)) | 421*06533002SEric Biggers CQHCI_DATA_DIR(!!(req_flags & MMC_DATA_READ)) | 422*06533002SEric Biggers CQHCI_PRIORITY(!!(req_flags & MMC_DATA_PRIO)) | 423*06533002SEric Biggers CQHCI_QBAR(!!(req_flags & MMC_DATA_QBR)) | 424*06533002SEric Biggers CQHCI_REL_WRITE(!!(req_flags & MMC_DATA_REL_WR)) | 425*06533002SEric Biggers CQHCI_BLK_COUNT(mrq->data->blocks) | 426*06533002SEric Biggers CQHCI_BLK_ADDR((u64)mrq->data->blk_addr); 427*06533002SEric Biggers 428*06533002SEric Biggers pr_debug("%s: cqhci: tag %d task descriptor 0x%016llx\n", 429*06533002SEric Biggers mmc_hostname(mrq->host), mrq->tag, (unsigned long long)*data); 430*06533002SEric Biggers } 431*06533002SEric Biggers 432*06533002SEric Biggers static int cqhci_dma_map(struct mmc_host *host, struct mmc_request *mrq) 433*06533002SEric Biggers { 434*06533002SEric Biggers int sg_count; 435*06533002SEric Biggers struct mmc_data *data = mrq->data; 436*06533002SEric Biggers 437*06533002SEric Biggers if (!data) 438*06533002SEric Biggers return -EINVAL; 439*06533002SEric Biggers 440*06533002SEric Biggers sg_count = dma_map_sg(mmc_dev(host), data->sg, 441*06533002SEric Biggers data->sg_len, 442*06533002SEric Biggers (data->flags & MMC_DATA_WRITE) ? 443*06533002SEric Biggers DMA_TO_DEVICE : DMA_FROM_DEVICE); 444*06533002SEric Biggers if (!sg_count) { 445*06533002SEric Biggers pr_err("%s: sg-len: %d\n", __func__, data->sg_len); 446*06533002SEric Biggers return -ENOMEM; 447*06533002SEric Biggers } 448*06533002SEric Biggers 449*06533002SEric Biggers return sg_count; 450*06533002SEric Biggers } 451*06533002SEric Biggers 452*06533002SEric Biggers static void cqhci_set_tran_desc(u8 *desc, dma_addr_t addr, int len, bool end, 453*06533002SEric Biggers bool dma64) 454*06533002SEric Biggers { 455*06533002SEric Biggers __le32 *attr = (__le32 __force *)desc; 456*06533002SEric Biggers 457*06533002SEric Biggers *attr = (CQHCI_VALID(1) | 458*06533002SEric Biggers CQHCI_END(end ? 1 : 0) | 459*06533002SEric Biggers CQHCI_INT(0) | 460*06533002SEric Biggers CQHCI_ACT(0x4) | 461*06533002SEric Biggers CQHCI_DAT_LENGTH(len)); 462*06533002SEric Biggers 463*06533002SEric Biggers if (dma64) { 464*06533002SEric Biggers __le64 *dataddr = (__le64 __force *)(desc + 4); 465*06533002SEric Biggers 466*06533002SEric Biggers dataddr[0] = cpu_to_le64(addr); 467*06533002SEric Biggers } else { 468*06533002SEric Biggers __le32 *dataddr = (__le32 __force *)(desc + 4); 469*06533002SEric Biggers 470*06533002SEric Biggers dataddr[0] = cpu_to_le32(addr); 471*06533002SEric Biggers } 472*06533002SEric Biggers } 473*06533002SEric Biggers 474*06533002SEric Biggers static int cqhci_prep_tran_desc(struct mmc_request *mrq, 475*06533002SEric Biggers struct cqhci_host *cq_host, int tag) 476*06533002SEric Biggers { 477*06533002SEric Biggers struct mmc_data *data = mrq->data; 478*06533002SEric Biggers int i, sg_count, len; 479*06533002SEric Biggers bool end = false; 480*06533002SEric Biggers bool dma64 = cq_host->dma64; 481*06533002SEric Biggers dma_addr_t addr; 482*06533002SEric Biggers u8 *desc; 483*06533002SEric Biggers struct scatterlist *sg; 484*06533002SEric Biggers 485*06533002SEric Biggers sg_count = cqhci_dma_map(mrq->host, mrq); 486*06533002SEric Biggers if (sg_count < 0) { 487*06533002SEric Biggers pr_err("%s: %s: unable to map sg lists, %d\n", 488*06533002SEric Biggers mmc_hostname(mrq->host), __func__, sg_count); 489*06533002SEric Biggers return sg_count; 490*06533002SEric Biggers } 491*06533002SEric Biggers 492*06533002SEric Biggers desc = get_trans_desc(cq_host, tag); 493*06533002SEric Biggers 494*06533002SEric Biggers for_each_sg(data->sg, sg, sg_count, i) { 495*06533002SEric Biggers addr = sg_dma_address(sg); 496*06533002SEric Biggers len = sg_dma_len(sg); 497*06533002SEric Biggers 498*06533002SEric Biggers if ((i+1) == sg_count) 499*06533002SEric Biggers end = true; 500*06533002SEric Biggers cqhci_set_tran_desc(desc, addr, len, end, dma64); 501*06533002SEric Biggers desc += cq_host->trans_desc_len; 502*06533002SEric Biggers } 503*06533002SEric Biggers 504*06533002SEric Biggers return 0; 505*06533002SEric Biggers } 506*06533002SEric Biggers 507*06533002SEric Biggers static void cqhci_prep_dcmd_desc(struct mmc_host *mmc, 508*06533002SEric Biggers struct mmc_request *mrq) 509*06533002SEric Biggers { 510*06533002SEric Biggers u64 *task_desc = NULL; 511*06533002SEric Biggers u64 data = 0; 512*06533002SEric Biggers u8 resp_type; 513*06533002SEric Biggers u8 *desc; 514*06533002SEric Biggers __le64 *dataddr; 515*06533002SEric Biggers struct cqhci_host *cq_host = mmc->cqe_private; 516*06533002SEric Biggers u8 timing; 517*06533002SEric Biggers 518*06533002SEric Biggers if (!(mrq->cmd->flags & MMC_RSP_PRESENT)) { 519*06533002SEric Biggers resp_type = 0x0; 520*06533002SEric Biggers timing = 0x1; 521*06533002SEric Biggers } else { 522*06533002SEric Biggers if (mrq->cmd->flags & MMC_RSP_R1B) { 523*06533002SEric Biggers resp_type = 0x3; 524*06533002SEric Biggers timing = 0x0; 525*06533002SEric Biggers } else { 526*06533002SEric Biggers resp_type = 0x2; 527*06533002SEric Biggers timing = 0x1; 528*06533002SEric Biggers } 529*06533002SEric Biggers } 530*06533002SEric Biggers 531*06533002SEric Biggers task_desc = (__le64 __force *)get_desc(cq_host, cq_host->dcmd_slot); 532*06533002SEric Biggers memset(task_desc, 0, cq_host->task_desc_len); 533*06533002SEric Biggers data |= (CQHCI_VALID(1) | 534*06533002SEric Biggers CQHCI_END(1) | 535*06533002SEric Biggers CQHCI_INT(1) | 536*06533002SEric Biggers CQHCI_QBAR(1) | 537*06533002SEric Biggers CQHCI_ACT(0x5) | 538*06533002SEric Biggers CQHCI_CMD_INDEX(mrq->cmd->opcode) | 539*06533002SEric Biggers CQHCI_CMD_TIMING(timing) | CQHCI_RESP_TYPE(resp_type)); 540*06533002SEric Biggers if (cq_host->ops->update_dcmd_desc) 541*06533002SEric Biggers cq_host->ops->update_dcmd_desc(mmc, mrq, &data); 542*06533002SEric Biggers *task_desc |= data; 543*06533002SEric Biggers desc = (u8 *)task_desc; 544*06533002SEric Biggers pr_debug("%s: cqhci: dcmd: cmd: %d timing: %d resp: %d\n", 545*06533002SEric Biggers mmc_hostname(mmc), mrq->cmd->opcode, timing, resp_type); 546*06533002SEric Biggers dataddr = (__le64 __force *)(desc + 4); 547*06533002SEric Biggers dataddr[0] = cpu_to_le64((u64)mrq->cmd->arg); 548*06533002SEric Biggers 549*06533002SEric Biggers } 550*06533002SEric Biggers 551*06533002SEric Biggers static void cqhci_post_req(struct mmc_host *host, struct mmc_request *mrq) 552*06533002SEric Biggers { 553*06533002SEric Biggers struct mmc_data *data = mrq->data; 554*06533002SEric Biggers 555*06533002SEric Biggers if (data) { 556*06533002SEric Biggers dma_unmap_sg(mmc_dev(host), data->sg, data->sg_len, 557*06533002SEric Biggers (data->flags & MMC_DATA_READ) ? 558*06533002SEric Biggers DMA_FROM_DEVICE : DMA_TO_DEVICE); 559*06533002SEric Biggers } 560*06533002SEric Biggers } 561*06533002SEric Biggers 562*06533002SEric Biggers static inline int cqhci_tag(struct mmc_request *mrq) 563*06533002SEric Biggers { 564*06533002SEric Biggers return mrq->cmd ? DCMD_SLOT : mrq->tag; 565*06533002SEric Biggers } 566*06533002SEric Biggers 567*06533002SEric Biggers static int cqhci_request(struct mmc_host *mmc, struct mmc_request *mrq) 568*06533002SEric Biggers { 569*06533002SEric Biggers int err = 0; 570*06533002SEric Biggers u64 data = 0; 571*06533002SEric Biggers u64 *task_desc = NULL; 572*06533002SEric Biggers int tag = cqhci_tag(mrq); 573*06533002SEric Biggers struct cqhci_host *cq_host = mmc->cqe_private; 574*06533002SEric Biggers unsigned long flags; 575*06533002SEric Biggers 576*06533002SEric Biggers if (!cq_host->enabled) { 577*06533002SEric Biggers pr_err("%s: cqhci: not enabled\n", mmc_hostname(mmc)); 578*06533002SEric Biggers return -EINVAL; 579*06533002SEric Biggers } 580*06533002SEric Biggers 581*06533002SEric Biggers /* First request after resume has to re-enable */ 582*06533002SEric Biggers if (!cq_host->activated) 583*06533002SEric Biggers __cqhci_enable(cq_host); 584*06533002SEric Biggers 585*06533002SEric Biggers if (!mmc->cqe_on) { 586*06533002SEric Biggers if (cq_host->ops->pre_enable) 587*06533002SEric Biggers cq_host->ops->pre_enable(mmc); 588*06533002SEric Biggers 589*06533002SEric Biggers cqhci_writel(cq_host, 0, CQHCI_CTL); 590*06533002SEric Biggers mmc->cqe_on = true; 591*06533002SEric Biggers pr_debug("%s: cqhci: CQE on\n", mmc_hostname(mmc)); 592*06533002SEric Biggers if (cqhci_readl(cq_host, CQHCI_CTL) && CQHCI_HALT) { 593*06533002SEric Biggers pr_err("%s: cqhci: CQE failed to exit halt state\n", 594*06533002SEric Biggers mmc_hostname(mmc)); 595*06533002SEric Biggers } 596*06533002SEric Biggers if (cq_host->ops->enable) 597*06533002SEric Biggers cq_host->ops->enable(mmc); 598*06533002SEric Biggers } 599*06533002SEric Biggers 600*06533002SEric Biggers if (mrq->data) { 601*06533002SEric Biggers task_desc = (__le64 __force *)get_desc(cq_host, tag); 602*06533002SEric Biggers cqhci_prep_task_desc(mrq, &data, 1); 603*06533002SEric Biggers *task_desc = cpu_to_le64(data); 604*06533002SEric Biggers err = cqhci_prep_tran_desc(mrq, cq_host, tag); 605*06533002SEric Biggers if (err) { 606*06533002SEric Biggers pr_err("%s: cqhci: failed to setup tx desc: %d\n", 607*06533002SEric Biggers mmc_hostname(mmc), err); 608*06533002SEric Biggers return err; 609*06533002SEric Biggers } 610*06533002SEric Biggers } else { 611*06533002SEric Biggers cqhci_prep_dcmd_desc(mmc, mrq); 612*06533002SEric Biggers } 613*06533002SEric Biggers 614*06533002SEric Biggers spin_lock_irqsave(&cq_host->lock, flags); 615*06533002SEric Biggers 616*06533002SEric Biggers if (cq_host->recovery_halt) { 617*06533002SEric Biggers err = -EBUSY; 618*06533002SEric Biggers goto out_unlock; 619*06533002SEric Biggers } 620*06533002SEric Biggers 621*06533002SEric Biggers cq_host->slot[tag].mrq = mrq; 622*06533002SEric Biggers cq_host->slot[tag].flags = 0; 623*06533002SEric Biggers 624*06533002SEric Biggers cq_host->qcnt += 1; 625*06533002SEric Biggers /* Make sure descriptors are ready before ringing the doorbell */ 626*06533002SEric Biggers wmb(); 627*06533002SEric Biggers cqhci_writel(cq_host, 1 << tag, CQHCI_TDBR); 628*06533002SEric Biggers if (!(cqhci_readl(cq_host, CQHCI_TDBR) & (1 << tag))) 629*06533002SEric Biggers pr_debug("%s: cqhci: doorbell not set for tag %d\n", 630*06533002SEric Biggers mmc_hostname(mmc), tag); 631*06533002SEric Biggers out_unlock: 632*06533002SEric Biggers spin_unlock_irqrestore(&cq_host->lock, flags); 633*06533002SEric Biggers 634*06533002SEric Biggers if (err) 635*06533002SEric Biggers cqhci_post_req(mmc, mrq); 636*06533002SEric Biggers 637*06533002SEric Biggers return err; 638*06533002SEric Biggers } 639*06533002SEric Biggers 640*06533002SEric Biggers static void cqhci_recovery_needed(struct mmc_host *mmc, struct mmc_request *mrq, 641*06533002SEric Biggers bool notify) 642*06533002SEric Biggers { 643*06533002SEric Biggers struct cqhci_host *cq_host = mmc->cqe_private; 644*06533002SEric Biggers 645*06533002SEric Biggers if (!cq_host->recovery_halt) { 646*06533002SEric Biggers cq_host->recovery_halt = true; 647*06533002SEric Biggers pr_debug("%s: cqhci: recovery needed\n", mmc_hostname(mmc)); 648*06533002SEric Biggers wake_up(&cq_host->wait_queue); 649*06533002SEric Biggers if (notify && mrq->recovery_notifier) 650*06533002SEric Biggers mrq->recovery_notifier(mrq); 651*06533002SEric Biggers } 652*06533002SEric Biggers } 653*06533002SEric Biggers 654*06533002SEric Biggers static unsigned int cqhci_error_flags(int error1, int error2) 655*06533002SEric Biggers { 656*06533002SEric Biggers int error = error1 ? error1 : error2; 657*06533002SEric Biggers 658*06533002SEric Biggers switch (error) { 659*06533002SEric Biggers case -EILSEQ: 660*06533002SEric Biggers return CQHCI_HOST_CRC; 661*06533002SEric Biggers case -ETIMEDOUT: 662*06533002SEric Biggers return CQHCI_HOST_TIMEOUT; 663*06533002SEric Biggers default: 664*06533002SEric Biggers return CQHCI_HOST_OTHER; 665*06533002SEric Biggers } 666*06533002SEric Biggers } 667*06533002SEric Biggers 668*06533002SEric Biggers static void cqhci_error_irq(struct mmc_host *mmc, u32 status, int cmd_error, 669*06533002SEric Biggers int data_error) 670*06533002SEric Biggers { 671*06533002SEric Biggers struct cqhci_host *cq_host = mmc->cqe_private; 672*06533002SEric Biggers struct cqhci_slot *slot; 673*06533002SEric Biggers u32 terri; 674*06533002SEric Biggers int tag; 675*06533002SEric Biggers 676*06533002SEric Biggers spin_lock(&cq_host->lock); 677*06533002SEric Biggers 678*06533002SEric Biggers terri = cqhci_readl(cq_host, CQHCI_TERRI); 679*06533002SEric Biggers 680*06533002SEric Biggers pr_debug("%s: cqhci: error IRQ status: 0x%08x cmd error %d data error %d TERRI: 0x%08x\n", 681*06533002SEric Biggers mmc_hostname(mmc), status, cmd_error, data_error, terri); 682*06533002SEric Biggers 683*06533002SEric Biggers /* Forget about errors when recovery has already been triggered */ 684*06533002SEric Biggers if (cq_host->recovery_halt) 685*06533002SEric Biggers goto out_unlock; 686*06533002SEric Biggers 687*06533002SEric Biggers if (!cq_host->qcnt) { 688*06533002SEric Biggers WARN_ONCE(1, "%s: cqhci: error when idle. IRQ status: 0x%08x cmd error %d data error %d TERRI: 0x%08x\n", 689*06533002SEric Biggers mmc_hostname(mmc), status, cmd_error, data_error, 690*06533002SEric Biggers terri); 691*06533002SEric Biggers goto out_unlock; 692*06533002SEric Biggers } 693*06533002SEric Biggers 694*06533002SEric Biggers if (CQHCI_TERRI_C_VALID(terri)) { 695*06533002SEric Biggers tag = CQHCI_TERRI_C_TASK(terri); 696*06533002SEric Biggers slot = &cq_host->slot[tag]; 697*06533002SEric Biggers if (slot->mrq) { 698*06533002SEric Biggers slot->flags = cqhci_error_flags(cmd_error, data_error); 699*06533002SEric Biggers cqhci_recovery_needed(mmc, slot->mrq, true); 700*06533002SEric Biggers } 701*06533002SEric Biggers } 702*06533002SEric Biggers 703*06533002SEric Biggers if (CQHCI_TERRI_D_VALID(terri)) { 704*06533002SEric Biggers tag = CQHCI_TERRI_D_TASK(terri); 705*06533002SEric Biggers slot = &cq_host->slot[tag]; 706*06533002SEric Biggers if (slot->mrq) { 707*06533002SEric Biggers slot->flags = cqhci_error_flags(data_error, cmd_error); 708*06533002SEric Biggers cqhci_recovery_needed(mmc, slot->mrq, true); 709*06533002SEric Biggers } 710*06533002SEric Biggers } 711*06533002SEric Biggers 712*06533002SEric Biggers if (!cq_host->recovery_halt) { 713*06533002SEric Biggers /* 714*06533002SEric Biggers * The only way to guarantee forward progress is to mark at 715*06533002SEric Biggers * least one task in error, so if none is indicated, pick one. 716*06533002SEric Biggers */ 717*06533002SEric Biggers for (tag = 0; tag < NUM_SLOTS; tag++) { 718*06533002SEric Biggers slot = &cq_host->slot[tag]; 719*06533002SEric Biggers if (!slot->mrq) 720*06533002SEric Biggers continue; 721*06533002SEric Biggers slot->flags = cqhci_error_flags(data_error, cmd_error); 722*06533002SEric Biggers cqhci_recovery_needed(mmc, slot->mrq, true); 723*06533002SEric Biggers break; 724*06533002SEric Biggers } 725*06533002SEric Biggers } 726*06533002SEric Biggers 727*06533002SEric Biggers out_unlock: 728*06533002SEric Biggers spin_unlock(&cq_host->lock); 729*06533002SEric Biggers } 730*06533002SEric Biggers 731*06533002SEric Biggers static void cqhci_finish_mrq(struct mmc_host *mmc, unsigned int tag) 732*06533002SEric Biggers { 733*06533002SEric Biggers struct cqhci_host *cq_host = mmc->cqe_private; 734*06533002SEric Biggers struct cqhci_slot *slot = &cq_host->slot[tag]; 735*06533002SEric Biggers struct mmc_request *mrq = slot->mrq; 736*06533002SEric Biggers struct mmc_data *data; 737*06533002SEric Biggers 738*06533002SEric Biggers if (!mrq) { 739*06533002SEric Biggers WARN_ONCE(1, "%s: cqhci: spurious TCN for tag %d\n", 740*06533002SEric Biggers mmc_hostname(mmc), tag); 741*06533002SEric Biggers return; 742*06533002SEric Biggers } 743*06533002SEric Biggers 744*06533002SEric Biggers /* No completions allowed during recovery */ 745*06533002SEric Biggers if (cq_host->recovery_halt) { 746*06533002SEric Biggers slot->flags |= CQHCI_COMPLETED; 747*06533002SEric Biggers return; 748*06533002SEric Biggers } 749*06533002SEric Biggers 750*06533002SEric Biggers slot->mrq = NULL; 751*06533002SEric Biggers 752*06533002SEric Biggers cq_host->qcnt -= 1; 753*06533002SEric Biggers 754*06533002SEric Biggers data = mrq->data; 755*06533002SEric Biggers if (data) { 756*06533002SEric Biggers if (data->error) 757*06533002SEric Biggers data->bytes_xfered = 0; 758*06533002SEric Biggers else 759*06533002SEric Biggers data->bytes_xfered = data->blksz * data->blocks; 760*06533002SEric Biggers } 761*06533002SEric Biggers 762*06533002SEric Biggers mmc_cqe_request_done(mmc, mrq); 763*06533002SEric Biggers } 764*06533002SEric Biggers 765*06533002SEric Biggers irqreturn_t cqhci_irq(struct mmc_host *mmc, u32 intmask, int cmd_error, 766*06533002SEric Biggers int data_error) 767*06533002SEric Biggers { 768*06533002SEric Biggers u32 status; 769*06533002SEric Biggers unsigned long tag = 0, comp_status; 770*06533002SEric Biggers struct cqhci_host *cq_host = mmc->cqe_private; 771*06533002SEric Biggers 772*06533002SEric Biggers status = cqhci_readl(cq_host, CQHCI_IS); 773*06533002SEric Biggers cqhci_writel(cq_host, status, CQHCI_IS); 774*06533002SEric Biggers 775*06533002SEric Biggers pr_debug("%s: cqhci: IRQ status: 0x%08x\n", mmc_hostname(mmc), status); 776*06533002SEric Biggers 777*06533002SEric Biggers if ((status & CQHCI_IS_RED) || cmd_error || data_error) 778*06533002SEric Biggers cqhci_error_irq(mmc, status, cmd_error, data_error); 779*06533002SEric Biggers 780*06533002SEric Biggers if (status & CQHCI_IS_TCC) { 781*06533002SEric Biggers /* read TCN and complete the request */ 782*06533002SEric Biggers comp_status = cqhci_readl(cq_host, CQHCI_TCN); 783*06533002SEric Biggers cqhci_writel(cq_host, comp_status, CQHCI_TCN); 784*06533002SEric Biggers pr_debug("%s: cqhci: TCN: 0x%08lx\n", 785*06533002SEric Biggers mmc_hostname(mmc), comp_status); 786*06533002SEric Biggers 787*06533002SEric Biggers spin_lock(&cq_host->lock); 788*06533002SEric Biggers 789*06533002SEric Biggers for_each_set_bit(tag, &comp_status, cq_host->num_slots) { 790*06533002SEric Biggers /* complete the corresponding mrq */ 791*06533002SEric Biggers pr_debug("%s: cqhci: completing tag %lu\n", 792*06533002SEric Biggers mmc_hostname(mmc), tag); 793*06533002SEric Biggers cqhci_finish_mrq(mmc, tag); 794*06533002SEric Biggers } 795*06533002SEric Biggers 796*06533002SEric Biggers if (cq_host->waiting_for_idle && !cq_host->qcnt) { 797*06533002SEric Biggers cq_host->waiting_for_idle = false; 798*06533002SEric Biggers wake_up(&cq_host->wait_queue); 799*06533002SEric Biggers } 800*06533002SEric Biggers 801*06533002SEric Biggers spin_unlock(&cq_host->lock); 802*06533002SEric Biggers } 803*06533002SEric Biggers 804*06533002SEric Biggers if (status & CQHCI_IS_TCL) 805*06533002SEric Biggers wake_up(&cq_host->wait_queue); 806*06533002SEric Biggers 807*06533002SEric Biggers if (status & CQHCI_IS_HAC) 808*06533002SEric Biggers wake_up(&cq_host->wait_queue); 809*06533002SEric Biggers 810*06533002SEric Biggers return IRQ_HANDLED; 811*06533002SEric Biggers } 812*06533002SEric Biggers EXPORT_SYMBOL(cqhci_irq); 813*06533002SEric Biggers 814*06533002SEric Biggers static bool cqhci_is_idle(struct cqhci_host *cq_host, int *ret) 815*06533002SEric Biggers { 816*06533002SEric Biggers unsigned long flags; 817*06533002SEric Biggers bool is_idle; 818*06533002SEric Biggers 819*06533002SEric Biggers spin_lock_irqsave(&cq_host->lock, flags); 820*06533002SEric Biggers is_idle = !cq_host->qcnt || cq_host->recovery_halt; 821*06533002SEric Biggers *ret = cq_host->recovery_halt ? -EBUSY : 0; 822*06533002SEric Biggers cq_host->waiting_for_idle = !is_idle; 823*06533002SEric Biggers spin_unlock_irqrestore(&cq_host->lock, flags); 824*06533002SEric Biggers 825*06533002SEric Biggers return is_idle; 826*06533002SEric Biggers } 827*06533002SEric Biggers 828*06533002SEric Biggers static int cqhci_wait_for_idle(struct mmc_host *mmc) 829*06533002SEric Biggers { 830*06533002SEric Biggers struct cqhci_host *cq_host = mmc->cqe_private; 831*06533002SEric Biggers int ret; 832*06533002SEric Biggers 833*06533002SEric Biggers wait_event(cq_host->wait_queue, cqhci_is_idle(cq_host, &ret)); 834*06533002SEric Biggers 835*06533002SEric Biggers return ret; 836*06533002SEric Biggers } 837*06533002SEric Biggers 838*06533002SEric Biggers static bool cqhci_timeout(struct mmc_host *mmc, struct mmc_request *mrq, 839*06533002SEric Biggers bool *recovery_needed) 840*06533002SEric Biggers { 841*06533002SEric Biggers struct cqhci_host *cq_host = mmc->cqe_private; 842*06533002SEric Biggers int tag = cqhci_tag(mrq); 843*06533002SEric Biggers struct cqhci_slot *slot = &cq_host->slot[tag]; 844*06533002SEric Biggers unsigned long flags; 845*06533002SEric Biggers bool timed_out; 846*06533002SEric Biggers 847*06533002SEric Biggers spin_lock_irqsave(&cq_host->lock, flags); 848*06533002SEric Biggers timed_out = slot->mrq == mrq; 849*06533002SEric Biggers if (timed_out) { 850*06533002SEric Biggers slot->flags |= CQHCI_EXTERNAL_TIMEOUT; 851*06533002SEric Biggers cqhci_recovery_needed(mmc, mrq, false); 852*06533002SEric Biggers *recovery_needed = cq_host->recovery_halt; 853*06533002SEric Biggers } 854*06533002SEric Biggers spin_unlock_irqrestore(&cq_host->lock, flags); 855*06533002SEric Biggers 856*06533002SEric Biggers if (timed_out) { 857*06533002SEric Biggers pr_err("%s: cqhci: timeout for tag %d\n", 858*06533002SEric Biggers mmc_hostname(mmc), tag); 859*06533002SEric Biggers cqhci_dumpregs(cq_host); 860*06533002SEric Biggers } 861*06533002SEric Biggers 862*06533002SEric Biggers return timed_out; 863*06533002SEric Biggers } 864*06533002SEric Biggers 865*06533002SEric Biggers static bool cqhci_tasks_cleared(struct cqhci_host *cq_host) 866*06533002SEric Biggers { 867*06533002SEric Biggers return !(cqhci_readl(cq_host, CQHCI_CTL) & CQHCI_CLEAR_ALL_TASKS); 868*06533002SEric Biggers } 869*06533002SEric Biggers 870*06533002SEric Biggers static bool cqhci_clear_all_tasks(struct mmc_host *mmc, unsigned int timeout) 871*06533002SEric Biggers { 872*06533002SEric Biggers struct cqhci_host *cq_host = mmc->cqe_private; 873*06533002SEric Biggers bool ret; 874*06533002SEric Biggers u32 ctl; 875*06533002SEric Biggers 876*06533002SEric Biggers cqhci_set_irqs(cq_host, CQHCI_IS_TCL); 877*06533002SEric Biggers 878*06533002SEric Biggers ctl = cqhci_readl(cq_host, CQHCI_CTL); 879*06533002SEric Biggers ctl |= CQHCI_CLEAR_ALL_TASKS; 880*06533002SEric Biggers cqhci_writel(cq_host, ctl, CQHCI_CTL); 881*06533002SEric Biggers 882*06533002SEric Biggers wait_event_timeout(cq_host->wait_queue, cqhci_tasks_cleared(cq_host), 883*06533002SEric Biggers msecs_to_jiffies(timeout) + 1); 884*06533002SEric Biggers 885*06533002SEric Biggers cqhci_set_irqs(cq_host, 0); 886*06533002SEric Biggers 887*06533002SEric Biggers ret = cqhci_tasks_cleared(cq_host); 888*06533002SEric Biggers 889*06533002SEric Biggers if (!ret) 890*06533002SEric Biggers pr_debug("%s: cqhci: Failed to clear tasks\n", 891*06533002SEric Biggers mmc_hostname(mmc)); 892*06533002SEric Biggers 893*06533002SEric Biggers return ret; 894*06533002SEric Biggers } 895*06533002SEric Biggers 896*06533002SEric Biggers static bool cqhci_halted(struct cqhci_host *cq_host) 897*06533002SEric Biggers { 898*06533002SEric Biggers return cqhci_readl(cq_host, CQHCI_CTL) & CQHCI_HALT; 899*06533002SEric Biggers } 900*06533002SEric Biggers 901*06533002SEric Biggers static bool cqhci_halt(struct mmc_host *mmc, unsigned int timeout) 902*06533002SEric Biggers { 903*06533002SEric Biggers struct cqhci_host *cq_host = mmc->cqe_private; 904*06533002SEric Biggers bool ret; 905*06533002SEric Biggers u32 ctl; 906*06533002SEric Biggers 907*06533002SEric Biggers if (cqhci_halted(cq_host)) 908*06533002SEric Biggers return true; 909*06533002SEric Biggers 910*06533002SEric Biggers cqhci_set_irqs(cq_host, CQHCI_IS_HAC); 911*06533002SEric Biggers 912*06533002SEric Biggers ctl = cqhci_readl(cq_host, CQHCI_CTL); 913*06533002SEric Biggers ctl |= CQHCI_HALT; 914*06533002SEric Biggers cqhci_writel(cq_host, ctl, CQHCI_CTL); 915*06533002SEric Biggers 916*06533002SEric Biggers wait_event_timeout(cq_host->wait_queue, cqhci_halted(cq_host), 917*06533002SEric Biggers msecs_to_jiffies(timeout) + 1); 918*06533002SEric Biggers 919*06533002SEric Biggers cqhci_set_irqs(cq_host, 0); 920*06533002SEric Biggers 921*06533002SEric Biggers ret = cqhci_halted(cq_host); 922*06533002SEric Biggers 923*06533002SEric Biggers if (!ret) 924*06533002SEric Biggers pr_debug("%s: cqhci: Failed to halt\n", mmc_hostname(mmc)); 925*06533002SEric Biggers 926*06533002SEric Biggers return ret; 927*06533002SEric Biggers } 928*06533002SEric Biggers 929*06533002SEric Biggers /* 930*06533002SEric Biggers * After halting we expect to be able to use the command line. We interpret the 931*06533002SEric Biggers * failure to halt to mean the data lines might still be in use (and the upper 932*06533002SEric Biggers * layers will need to send a STOP command), so we set the timeout based on a 933*06533002SEric Biggers * generous command timeout. 934*06533002SEric Biggers */ 935*06533002SEric Biggers #define CQHCI_START_HALT_TIMEOUT 5 936*06533002SEric Biggers 937*06533002SEric Biggers static void cqhci_recovery_start(struct mmc_host *mmc) 938*06533002SEric Biggers { 939*06533002SEric Biggers struct cqhci_host *cq_host = mmc->cqe_private; 940*06533002SEric Biggers 941*06533002SEric Biggers pr_debug("%s: cqhci: %s\n", mmc_hostname(mmc), __func__); 942*06533002SEric Biggers 943*06533002SEric Biggers WARN_ON(!cq_host->recovery_halt); 944*06533002SEric Biggers 945*06533002SEric Biggers cqhci_halt(mmc, CQHCI_START_HALT_TIMEOUT); 946*06533002SEric Biggers 947*06533002SEric Biggers if (cq_host->ops->disable) 948*06533002SEric Biggers cq_host->ops->disable(mmc, true); 949*06533002SEric Biggers 950*06533002SEric Biggers mmc->cqe_on = false; 951*06533002SEric Biggers } 952*06533002SEric Biggers 953*06533002SEric Biggers static int cqhci_error_from_flags(unsigned int flags) 954*06533002SEric Biggers { 955*06533002SEric Biggers if (!flags) 956*06533002SEric Biggers return 0; 957*06533002SEric Biggers 958*06533002SEric Biggers /* CRC errors might indicate re-tuning so prefer to report that */ 959*06533002SEric Biggers if (flags & CQHCI_HOST_CRC) 960*06533002SEric Biggers return -EILSEQ; 961*06533002SEric Biggers 962*06533002SEric Biggers if (flags & (CQHCI_EXTERNAL_TIMEOUT | CQHCI_HOST_TIMEOUT)) 963*06533002SEric Biggers return -ETIMEDOUT; 964*06533002SEric Biggers 965*06533002SEric Biggers return -EIO; 966*06533002SEric Biggers } 967*06533002SEric Biggers 968*06533002SEric Biggers static void cqhci_recover_mrq(struct cqhci_host *cq_host, unsigned int tag) 969*06533002SEric Biggers { 970*06533002SEric Biggers struct cqhci_slot *slot = &cq_host->slot[tag]; 971*06533002SEric Biggers struct mmc_request *mrq = slot->mrq; 972*06533002SEric Biggers struct mmc_data *data; 973*06533002SEric Biggers 974*06533002SEric Biggers if (!mrq) 975*06533002SEric Biggers return; 976*06533002SEric Biggers 977*06533002SEric Biggers slot->mrq = NULL; 978*06533002SEric Biggers 979*06533002SEric Biggers cq_host->qcnt -= 1; 980*06533002SEric Biggers 981*06533002SEric Biggers data = mrq->data; 982*06533002SEric Biggers if (data) { 983*06533002SEric Biggers data->bytes_xfered = 0; 984*06533002SEric Biggers data->error = cqhci_error_from_flags(slot->flags); 985*06533002SEric Biggers } else { 986*06533002SEric Biggers mrq->cmd->error = cqhci_error_from_flags(slot->flags); 987*06533002SEric Biggers } 988*06533002SEric Biggers 989*06533002SEric Biggers mmc_cqe_request_done(cq_host->mmc, mrq); 990*06533002SEric Biggers } 991*06533002SEric Biggers 992*06533002SEric Biggers static void cqhci_recover_mrqs(struct cqhci_host *cq_host) 993*06533002SEric Biggers { 994*06533002SEric Biggers int i; 995*06533002SEric Biggers 996*06533002SEric Biggers for (i = 0; i < cq_host->num_slots; i++) 997*06533002SEric Biggers cqhci_recover_mrq(cq_host, i); 998*06533002SEric Biggers } 999*06533002SEric Biggers 1000*06533002SEric Biggers /* 1001*06533002SEric Biggers * By now the command and data lines should be unused so there is no reason for 1002*06533002SEric Biggers * CQHCI to take a long time to halt, but if it doesn't halt there could be 1003*06533002SEric Biggers * problems clearing tasks, so be generous. 1004*06533002SEric Biggers */ 1005*06533002SEric Biggers #define CQHCI_FINISH_HALT_TIMEOUT 20 1006*06533002SEric Biggers 1007*06533002SEric Biggers /* CQHCI could be expected to clear it's internal state pretty quickly */ 1008*06533002SEric Biggers #define CQHCI_CLEAR_TIMEOUT 20 1009*06533002SEric Biggers 1010*06533002SEric Biggers static void cqhci_recovery_finish(struct mmc_host *mmc) 1011*06533002SEric Biggers { 1012*06533002SEric Biggers struct cqhci_host *cq_host = mmc->cqe_private; 1013*06533002SEric Biggers unsigned long flags; 1014*06533002SEric Biggers u32 cqcfg; 1015*06533002SEric Biggers bool ok; 1016*06533002SEric Biggers 1017*06533002SEric Biggers pr_debug("%s: cqhci: %s\n", mmc_hostname(mmc), __func__); 1018*06533002SEric Biggers 1019*06533002SEric Biggers WARN_ON(!cq_host->recovery_halt); 1020*06533002SEric Biggers 1021*06533002SEric Biggers ok = cqhci_halt(mmc, CQHCI_FINISH_HALT_TIMEOUT); 1022*06533002SEric Biggers 1023*06533002SEric Biggers if (!cqhci_clear_all_tasks(mmc, CQHCI_CLEAR_TIMEOUT)) 1024*06533002SEric Biggers ok = false; 1025*06533002SEric Biggers 1026*06533002SEric Biggers /* 1027*06533002SEric Biggers * The specification contradicts itself, by saying that tasks cannot be 1028*06533002SEric Biggers * cleared if CQHCI does not halt, but if CQHCI does not halt, it should 1029*06533002SEric Biggers * be disabled/re-enabled, but not to disable before clearing tasks. 1030*06533002SEric Biggers * Have a go anyway. 1031*06533002SEric Biggers */ 1032*06533002SEric Biggers if (!ok) { 1033*06533002SEric Biggers pr_debug("%s: cqhci: disable / re-enable\n", mmc_hostname(mmc)); 1034*06533002SEric Biggers cqcfg = cqhci_readl(cq_host, CQHCI_CFG); 1035*06533002SEric Biggers cqcfg &= ~CQHCI_ENABLE; 1036*06533002SEric Biggers cqhci_writel(cq_host, cqcfg, CQHCI_CFG); 1037*06533002SEric Biggers cqcfg |= CQHCI_ENABLE; 1038*06533002SEric Biggers cqhci_writel(cq_host, cqcfg, CQHCI_CFG); 1039*06533002SEric Biggers /* Be sure that there are no tasks */ 1040*06533002SEric Biggers ok = cqhci_halt(mmc, CQHCI_FINISH_HALT_TIMEOUT); 1041*06533002SEric Biggers if (!cqhci_clear_all_tasks(mmc, CQHCI_CLEAR_TIMEOUT)) 1042*06533002SEric Biggers ok = false; 1043*06533002SEric Biggers WARN_ON(!ok); 1044*06533002SEric Biggers } 1045*06533002SEric Biggers 1046*06533002SEric Biggers cqhci_recover_mrqs(cq_host); 1047*06533002SEric Biggers 1048*06533002SEric Biggers WARN_ON(cq_host->qcnt); 1049*06533002SEric Biggers 1050*06533002SEric Biggers spin_lock_irqsave(&cq_host->lock, flags); 1051*06533002SEric Biggers cq_host->qcnt = 0; 1052*06533002SEric Biggers cq_host->recovery_halt = false; 1053*06533002SEric Biggers mmc->cqe_on = false; 1054*06533002SEric Biggers spin_unlock_irqrestore(&cq_host->lock, flags); 1055*06533002SEric Biggers 1056*06533002SEric Biggers /* Ensure all writes are done before interrupts are re-enabled */ 1057*06533002SEric Biggers wmb(); 1058*06533002SEric Biggers 1059*06533002SEric Biggers cqhci_writel(cq_host, CQHCI_IS_HAC | CQHCI_IS_TCL, CQHCI_IS); 1060*06533002SEric Biggers 1061*06533002SEric Biggers cqhci_set_irqs(cq_host, CQHCI_IS_MASK); 1062*06533002SEric Biggers 1063*06533002SEric Biggers pr_debug("%s: cqhci: recovery done\n", mmc_hostname(mmc)); 1064*06533002SEric Biggers } 1065*06533002SEric Biggers 1066*06533002SEric Biggers static const struct mmc_cqe_ops cqhci_cqe_ops = { 1067*06533002SEric Biggers .cqe_enable = cqhci_enable, 1068*06533002SEric Biggers .cqe_disable = cqhci_disable, 1069*06533002SEric Biggers .cqe_request = cqhci_request, 1070*06533002SEric Biggers .cqe_post_req = cqhci_post_req, 1071*06533002SEric Biggers .cqe_off = cqhci_off, 1072*06533002SEric Biggers .cqe_wait_for_idle = cqhci_wait_for_idle, 1073*06533002SEric Biggers .cqe_timeout = cqhci_timeout, 1074*06533002SEric Biggers .cqe_recovery_start = cqhci_recovery_start, 1075*06533002SEric Biggers .cqe_recovery_finish = cqhci_recovery_finish, 1076*06533002SEric Biggers }; 1077*06533002SEric Biggers 1078*06533002SEric Biggers struct cqhci_host *cqhci_pltfm_init(struct platform_device *pdev) 1079*06533002SEric Biggers { 1080*06533002SEric Biggers struct cqhci_host *cq_host; 1081*06533002SEric Biggers struct resource *cqhci_memres = NULL; 1082*06533002SEric Biggers 1083*06533002SEric Biggers /* check and setup CMDQ interface */ 1084*06533002SEric Biggers cqhci_memres = platform_get_resource_byname(pdev, IORESOURCE_MEM, 1085*06533002SEric Biggers "cqhci"); 1086*06533002SEric Biggers if (!cqhci_memres) { 1087*06533002SEric Biggers dev_dbg(&pdev->dev, "CMDQ not supported\n"); 1088*06533002SEric Biggers return ERR_PTR(-EINVAL); 1089*06533002SEric Biggers } 1090*06533002SEric Biggers 1091*06533002SEric Biggers cq_host = devm_kzalloc(&pdev->dev, sizeof(*cq_host), GFP_KERNEL); 1092*06533002SEric Biggers if (!cq_host) 1093*06533002SEric Biggers return ERR_PTR(-ENOMEM); 1094*06533002SEric Biggers cq_host->mmio = devm_ioremap(&pdev->dev, 1095*06533002SEric Biggers cqhci_memres->start, 1096*06533002SEric Biggers resource_size(cqhci_memres)); 1097*06533002SEric Biggers if (!cq_host->mmio) { 1098*06533002SEric Biggers dev_err(&pdev->dev, "failed to remap cqhci regs\n"); 1099*06533002SEric Biggers return ERR_PTR(-EBUSY); 1100*06533002SEric Biggers } 1101*06533002SEric Biggers dev_dbg(&pdev->dev, "CMDQ ioremap: done\n"); 1102*06533002SEric Biggers 1103*06533002SEric Biggers return cq_host; 1104*06533002SEric Biggers } 1105*06533002SEric Biggers EXPORT_SYMBOL(cqhci_pltfm_init); 1106*06533002SEric Biggers 1107*06533002SEric Biggers static unsigned int cqhci_ver_major(struct cqhci_host *cq_host) 1108*06533002SEric Biggers { 1109*06533002SEric Biggers return CQHCI_VER_MAJOR(cqhci_readl(cq_host, CQHCI_VER)); 1110*06533002SEric Biggers } 1111*06533002SEric Biggers 1112*06533002SEric Biggers static unsigned int cqhci_ver_minor(struct cqhci_host *cq_host) 1113*06533002SEric Biggers { 1114*06533002SEric Biggers u32 ver = cqhci_readl(cq_host, CQHCI_VER); 1115*06533002SEric Biggers 1116*06533002SEric Biggers return CQHCI_VER_MINOR1(ver) * 10 + CQHCI_VER_MINOR2(ver); 1117*06533002SEric Biggers } 1118*06533002SEric Biggers 1119*06533002SEric Biggers int cqhci_init(struct cqhci_host *cq_host, struct mmc_host *mmc, 1120*06533002SEric Biggers bool dma64) 1121*06533002SEric Biggers { 1122*06533002SEric Biggers int err; 1123*06533002SEric Biggers 1124*06533002SEric Biggers cq_host->dma64 = dma64; 1125*06533002SEric Biggers cq_host->mmc = mmc; 1126*06533002SEric Biggers cq_host->mmc->cqe_private = cq_host; 1127*06533002SEric Biggers 1128*06533002SEric Biggers cq_host->num_slots = NUM_SLOTS; 1129*06533002SEric Biggers cq_host->dcmd_slot = DCMD_SLOT; 1130*06533002SEric Biggers 1131*06533002SEric Biggers mmc->cqe_ops = &cqhci_cqe_ops; 1132*06533002SEric Biggers 1133*06533002SEric Biggers mmc->cqe_qdepth = NUM_SLOTS; 1134*06533002SEric Biggers if (mmc->caps2 & MMC_CAP2_CQE_DCMD) 1135*06533002SEric Biggers mmc->cqe_qdepth -= 1; 1136*06533002SEric Biggers 1137*06533002SEric Biggers cq_host->slot = devm_kcalloc(mmc_dev(mmc), cq_host->num_slots, 1138*06533002SEric Biggers sizeof(*cq_host->slot), GFP_KERNEL); 1139*06533002SEric Biggers if (!cq_host->slot) { 1140*06533002SEric Biggers err = -ENOMEM; 1141*06533002SEric Biggers goto out_err; 1142*06533002SEric Biggers } 1143*06533002SEric Biggers 1144*06533002SEric Biggers spin_lock_init(&cq_host->lock); 1145*06533002SEric Biggers 1146*06533002SEric Biggers init_completion(&cq_host->halt_comp); 1147*06533002SEric Biggers init_waitqueue_head(&cq_host->wait_queue); 1148*06533002SEric Biggers 1149*06533002SEric Biggers pr_info("%s: CQHCI version %u.%02u\n", 1150*06533002SEric Biggers mmc_hostname(mmc), cqhci_ver_major(cq_host), 1151*06533002SEric Biggers cqhci_ver_minor(cq_host)); 1152*06533002SEric Biggers 1153*06533002SEric Biggers return 0; 1154*06533002SEric Biggers 1155*06533002SEric Biggers out_err: 1156*06533002SEric Biggers pr_err("%s: CQHCI version %u.%02u failed to initialize, error %d\n", 1157*06533002SEric Biggers mmc_hostname(mmc), cqhci_ver_major(cq_host), 1158*06533002SEric Biggers cqhci_ver_minor(cq_host), err); 1159*06533002SEric Biggers return err; 1160*06533002SEric Biggers } 1161*06533002SEric Biggers EXPORT_SYMBOL(cqhci_init); 1162*06533002SEric Biggers 1163*06533002SEric Biggers MODULE_AUTHOR("Venkat Gopalakrishnan <venkatg@codeaurora.org>"); 1164*06533002SEric Biggers MODULE_DESCRIPTION("Command Queue Host Controller Interface driver"); 1165*06533002SEric Biggers MODULE_LICENSE("GPL v2"); 1166