1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright (c) 2015, The Linux Foundation. All rights reserved. 3 */ 4 5 #include <linux/delay.h> 6 #include <linux/highmem.h> 7 #include <linux/io.h> 8 #include <linux/iopoll.h> 9 #include <linux/module.h> 10 #include <linux/dma-mapping.h> 11 #include <linux/slab.h> 12 #include <linux/scatterlist.h> 13 #include <linux/platform_device.h> 14 #include <linux/ktime.h> 15 16 #include <linux/mmc/mmc.h> 17 #include <linux/mmc/host.h> 18 #include <linux/mmc/card.h> 19 20 #include "cqhci.h" 21 22 #define DCMD_SLOT 31 23 #define NUM_SLOTS 32 24 25 struct cqhci_slot { 26 struct mmc_request *mrq; 27 unsigned int flags; 28 #define CQHCI_EXTERNAL_TIMEOUT BIT(0) 29 #define CQHCI_COMPLETED BIT(1) 30 #define CQHCI_HOST_CRC BIT(2) 31 #define CQHCI_HOST_TIMEOUT BIT(3) 32 #define CQHCI_HOST_OTHER BIT(4) 33 }; 34 35 static inline u8 *get_desc(struct cqhci_host *cq_host, u8 tag) 36 { 37 return cq_host->desc_base + (tag * cq_host->slot_sz); 38 } 39 40 static inline u8 *get_link_desc(struct cqhci_host *cq_host, u8 tag) 41 { 42 u8 *desc = get_desc(cq_host, tag); 43 44 return desc + cq_host->task_desc_len; 45 } 46 47 static inline dma_addr_t get_trans_desc_dma(struct cqhci_host *cq_host, u8 tag) 48 { 49 return cq_host->trans_desc_dma_base + 50 (cq_host->mmc->max_segs * tag * 51 cq_host->trans_desc_len); 52 } 53 54 static inline u8 *get_trans_desc(struct cqhci_host *cq_host, u8 tag) 55 { 56 return cq_host->trans_desc_base + 57 (cq_host->trans_desc_len * cq_host->mmc->max_segs * tag); 58 } 59 60 static void setup_trans_desc(struct cqhci_host *cq_host, u8 tag) 61 { 62 u8 *link_temp; 63 dma_addr_t trans_temp; 64 65 link_temp = get_link_desc(cq_host, tag); 66 trans_temp = get_trans_desc_dma(cq_host, tag); 67 68 memset(link_temp, 0, cq_host->link_desc_len); 69 if (cq_host->link_desc_len > 8) 70 *(link_temp + 8) = 0; 71 72 if (tag == DCMD_SLOT && (cq_host->mmc->caps2 & MMC_CAP2_CQE_DCMD)) { 73 *link_temp = CQHCI_VALID(0) | CQHCI_ACT(0) | CQHCI_END(1); 74 return; 75 } 76 77 *link_temp = CQHCI_VALID(1) | CQHCI_ACT(0x6) | CQHCI_END(0); 78 79 if (cq_host->dma64) { 80 __le64 *data_addr = (__le64 __force *)(link_temp + 4); 81 82 data_addr[0] = cpu_to_le64(trans_temp); 83 } else { 84 __le32 *data_addr = (__le32 __force *)(link_temp + 4); 85 86 data_addr[0] = cpu_to_le32(trans_temp); 87 } 88 } 89 90 static void cqhci_set_irqs(struct cqhci_host *cq_host, u32 set) 91 { 92 cqhci_writel(cq_host, set, CQHCI_ISTE); 93 cqhci_writel(cq_host, set, CQHCI_ISGE); 94 } 95 96 #define DRV_NAME "cqhci" 97 98 #define CQHCI_DUMP(f, x...) \ 99 pr_err("%s: " DRV_NAME ": " f, mmc_hostname(mmc), ## x) 100 101 static void cqhci_dumpregs(struct cqhci_host *cq_host) 102 { 103 struct mmc_host *mmc = cq_host->mmc; 104 105 CQHCI_DUMP("============ CQHCI REGISTER DUMP ===========\n"); 106 107 CQHCI_DUMP("Caps: 0x%08x | Version: 0x%08x\n", 108 cqhci_readl(cq_host, CQHCI_CAP), 109 cqhci_readl(cq_host, CQHCI_VER)); 110 CQHCI_DUMP("Config: 0x%08x | Control: 0x%08x\n", 111 cqhci_readl(cq_host, CQHCI_CFG), 112 cqhci_readl(cq_host, CQHCI_CTL)); 113 CQHCI_DUMP("Int stat: 0x%08x | Int enab: 0x%08x\n", 114 cqhci_readl(cq_host, CQHCI_IS), 115 cqhci_readl(cq_host, CQHCI_ISTE)); 116 CQHCI_DUMP("Int sig: 0x%08x | Int Coal: 0x%08x\n", 117 cqhci_readl(cq_host, CQHCI_ISGE), 118 cqhci_readl(cq_host, CQHCI_IC)); 119 CQHCI_DUMP("TDL base: 0x%08x | TDL up32: 0x%08x\n", 120 cqhci_readl(cq_host, CQHCI_TDLBA), 121 cqhci_readl(cq_host, CQHCI_TDLBAU)); 122 CQHCI_DUMP("Doorbell: 0x%08x | TCN: 0x%08x\n", 123 cqhci_readl(cq_host, CQHCI_TDBR), 124 cqhci_readl(cq_host, CQHCI_TCN)); 125 CQHCI_DUMP("Dev queue: 0x%08x | Dev Pend: 0x%08x\n", 126 cqhci_readl(cq_host, CQHCI_DQS), 127 cqhci_readl(cq_host, CQHCI_DPT)); 128 CQHCI_DUMP("Task clr: 0x%08x | SSC1: 0x%08x\n", 129 cqhci_readl(cq_host, CQHCI_TCLR), 130 cqhci_readl(cq_host, CQHCI_SSC1)); 131 CQHCI_DUMP("SSC2: 0x%08x | DCMD rsp: 0x%08x\n", 132 cqhci_readl(cq_host, CQHCI_SSC2), 133 cqhci_readl(cq_host, CQHCI_CRDCT)); 134 CQHCI_DUMP("RED mask: 0x%08x | TERRI: 0x%08x\n", 135 cqhci_readl(cq_host, CQHCI_RMEM), 136 cqhci_readl(cq_host, CQHCI_TERRI)); 137 CQHCI_DUMP("Resp idx: 0x%08x | Resp arg: 0x%08x\n", 138 cqhci_readl(cq_host, CQHCI_CRI), 139 cqhci_readl(cq_host, CQHCI_CRA)); 140 141 if (cq_host->ops->dumpregs) 142 cq_host->ops->dumpregs(mmc); 143 else 144 CQHCI_DUMP(": ===========================================\n"); 145 } 146 147 /* 148 * The allocated descriptor table for task, link & transfer descritors 149 * looks like: 150 * |----------| 151 * |task desc | |->|----------| 152 * |----------| | |trans desc| 153 * |link desc-|->| |----------| 154 * |----------| . 155 * . . 156 * no. of slots max-segs 157 * . |----------| 158 * |----------| 159 * The idea here is to create the [task+trans] table and mark & point the 160 * link desc to the transfer desc table on a per slot basis. 161 */ 162 static int cqhci_host_alloc_tdl(struct cqhci_host *cq_host) 163 { 164 int i = 0; 165 166 /* task descriptor can be 64/128 bit irrespective of arch */ 167 if (cq_host->caps & CQHCI_TASK_DESC_SZ_128) { 168 cqhci_writel(cq_host, cqhci_readl(cq_host, CQHCI_CFG) | 169 CQHCI_TASK_DESC_SZ, CQHCI_CFG); 170 cq_host->task_desc_len = 16; 171 } else { 172 cq_host->task_desc_len = 8; 173 } 174 175 /* 176 * 96 bits length of transfer desc instead of 128 bits which means 177 * ADMA would expect next valid descriptor at the 96th bit 178 * or 128th bit 179 */ 180 if (cq_host->dma64) { 181 if (cq_host->quirks & CQHCI_QUIRK_SHORT_TXFR_DESC_SZ) 182 cq_host->trans_desc_len = 12; 183 else 184 cq_host->trans_desc_len = 16; 185 cq_host->link_desc_len = 16; 186 } else { 187 cq_host->trans_desc_len = 8; 188 cq_host->link_desc_len = 8; 189 } 190 191 /* total size of a slot: 1 task & 1 transfer (link) */ 192 cq_host->slot_sz = cq_host->task_desc_len + cq_host->link_desc_len; 193 194 cq_host->desc_size = cq_host->slot_sz * cq_host->num_slots; 195 196 cq_host->data_size = cq_host->trans_desc_len * cq_host->mmc->max_segs * 197 cq_host->mmc->cqe_qdepth; 198 199 pr_debug("%s: cqhci: desc_size: %zu data_sz: %zu slot-sz: %d\n", 200 mmc_hostname(cq_host->mmc), cq_host->desc_size, cq_host->data_size, 201 cq_host->slot_sz); 202 203 /* 204 * allocate a dma-mapped chunk of memory for the descriptors 205 * allocate a dma-mapped chunk of memory for link descriptors 206 * setup each link-desc memory offset per slot-number to 207 * the descriptor table. 208 */ 209 cq_host->desc_base = dmam_alloc_coherent(mmc_dev(cq_host->mmc), 210 cq_host->desc_size, 211 &cq_host->desc_dma_base, 212 GFP_KERNEL); 213 if (!cq_host->desc_base) 214 return -ENOMEM; 215 216 cq_host->trans_desc_base = dmam_alloc_coherent(mmc_dev(cq_host->mmc), 217 cq_host->data_size, 218 &cq_host->trans_desc_dma_base, 219 GFP_KERNEL); 220 if (!cq_host->trans_desc_base) { 221 dmam_free_coherent(mmc_dev(cq_host->mmc), cq_host->desc_size, 222 cq_host->desc_base, 223 cq_host->desc_dma_base); 224 cq_host->desc_base = NULL; 225 cq_host->desc_dma_base = 0; 226 return -ENOMEM; 227 } 228 229 pr_debug("%s: cqhci: desc-base: 0x%p trans-base: 0x%p\n desc_dma 0x%llx trans_dma: 0x%llx\n", 230 mmc_hostname(cq_host->mmc), cq_host->desc_base, cq_host->trans_desc_base, 231 (unsigned long long)cq_host->desc_dma_base, 232 (unsigned long long)cq_host->trans_desc_dma_base); 233 234 for (; i < (cq_host->num_slots); i++) 235 setup_trans_desc(cq_host, i); 236 237 return 0; 238 } 239 240 static void __cqhci_enable(struct cqhci_host *cq_host) 241 { 242 struct mmc_host *mmc = cq_host->mmc; 243 u32 cqcfg; 244 245 cqcfg = cqhci_readl(cq_host, CQHCI_CFG); 246 247 /* Configuration must not be changed while enabled */ 248 if (cqcfg & CQHCI_ENABLE) { 249 cqcfg &= ~CQHCI_ENABLE; 250 cqhci_writel(cq_host, cqcfg, CQHCI_CFG); 251 } 252 253 cqcfg &= ~(CQHCI_DCMD | CQHCI_TASK_DESC_SZ); 254 255 if (mmc->caps2 & MMC_CAP2_CQE_DCMD) 256 cqcfg |= CQHCI_DCMD; 257 258 if (cq_host->caps & CQHCI_TASK_DESC_SZ_128) 259 cqcfg |= CQHCI_TASK_DESC_SZ; 260 261 cqhci_writel(cq_host, cqcfg, CQHCI_CFG); 262 263 cqhci_writel(cq_host, lower_32_bits(cq_host->desc_dma_base), 264 CQHCI_TDLBA); 265 cqhci_writel(cq_host, upper_32_bits(cq_host->desc_dma_base), 266 CQHCI_TDLBAU); 267 268 cqhci_writel(cq_host, cq_host->rca, CQHCI_SSC2); 269 270 cqhci_set_irqs(cq_host, 0); 271 272 cqcfg |= CQHCI_ENABLE; 273 274 cqhci_writel(cq_host, cqcfg, CQHCI_CFG); 275 276 mmc->cqe_on = true; 277 278 if (cq_host->ops->enable) 279 cq_host->ops->enable(mmc); 280 281 /* Ensure all writes are done before interrupts are enabled */ 282 wmb(); 283 284 cqhci_set_irqs(cq_host, CQHCI_IS_MASK); 285 286 cq_host->activated = true; 287 } 288 289 static void __cqhci_disable(struct cqhci_host *cq_host) 290 { 291 u32 cqcfg; 292 293 cqcfg = cqhci_readl(cq_host, CQHCI_CFG); 294 cqcfg &= ~CQHCI_ENABLE; 295 cqhci_writel(cq_host, cqcfg, CQHCI_CFG); 296 297 cq_host->mmc->cqe_on = false; 298 299 cq_host->activated = false; 300 } 301 302 int cqhci_deactivate(struct mmc_host *mmc) 303 { 304 struct cqhci_host *cq_host = mmc->cqe_private; 305 306 if (cq_host->enabled && cq_host->activated) 307 __cqhci_disable(cq_host); 308 309 return 0; 310 } 311 EXPORT_SYMBOL(cqhci_deactivate); 312 313 int cqhci_resume(struct mmc_host *mmc) 314 { 315 /* Re-enable is done upon first request */ 316 return 0; 317 } 318 EXPORT_SYMBOL(cqhci_resume); 319 320 static int cqhci_enable(struct mmc_host *mmc, struct mmc_card *card) 321 { 322 struct cqhci_host *cq_host = mmc->cqe_private; 323 int err; 324 325 if (!card->ext_csd.cmdq_en) 326 return -EINVAL; 327 328 if (cq_host->enabled) 329 return 0; 330 331 cq_host->rca = card->rca; 332 333 err = cqhci_host_alloc_tdl(cq_host); 334 if (err) { 335 pr_err("%s: Failed to enable CQE, error %d\n", 336 mmc_hostname(mmc), err); 337 return err; 338 } 339 340 __cqhci_enable(cq_host); 341 342 cq_host->enabled = true; 343 344 #ifdef DEBUG 345 cqhci_dumpregs(cq_host); 346 #endif 347 return 0; 348 } 349 350 /* CQHCI is idle and should halt immediately, so set a small timeout */ 351 #define CQHCI_OFF_TIMEOUT 100 352 353 static u32 cqhci_read_ctl(struct cqhci_host *cq_host) 354 { 355 return cqhci_readl(cq_host, CQHCI_CTL); 356 } 357 358 static void cqhci_off(struct mmc_host *mmc) 359 { 360 struct cqhci_host *cq_host = mmc->cqe_private; 361 u32 reg; 362 int err; 363 364 if (!cq_host->enabled || !mmc->cqe_on || cq_host->recovery_halt) 365 return; 366 367 if (cq_host->ops->disable) 368 cq_host->ops->disable(mmc, false); 369 370 cqhci_writel(cq_host, CQHCI_HALT, CQHCI_CTL); 371 372 err = readx_poll_timeout(cqhci_read_ctl, cq_host, reg, 373 reg & CQHCI_HALT, 0, CQHCI_OFF_TIMEOUT); 374 if (err < 0) 375 pr_err("%s: cqhci: CQE stuck on\n", mmc_hostname(mmc)); 376 else 377 pr_debug("%s: cqhci: CQE off\n", mmc_hostname(mmc)); 378 379 if (cq_host->ops->post_disable) 380 cq_host->ops->post_disable(mmc); 381 382 mmc->cqe_on = false; 383 } 384 385 static void cqhci_disable(struct mmc_host *mmc) 386 { 387 struct cqhci_host *cq_host = mmc->cqe_private; 388 389 if (!cq_host->enabled) 390 return; 391 392 cqhci_off(mmc); 393 394 __cqhci_disable(cq_host); 395 396 dmam_free_coherent(mmc_dev(mmc), cq_host->data_size, 397 cq_host->trans_desc_base, 398 cq_host->trans_desc_dma_base); 399 400 dmam_free_coherent(mmc_dev(mmc), cq_host->desc_size, 401 cq_host->desc_base, 402 cq_host->desc_dma_base); 403 404 cq_host->trans_desc_base = NULL; 405 cq_host->desc_base = NULL; 406 407 cq_host->enabled = false; 408 } 409 410 static void cqhci_prep_task_desc(struct mmc_request *mrq, 411 struct cqhci_host *cq_host, int tag) 412 { 413 __le64 *task_desc = (__le64 __force *)get_desc(cq_host, tag); 414 u32 req_flags = mrq->data->flags; 415 u64 desc0; 416 417 desc0 = CQHCI_VALID(1) | 418 CQHCI_END(1) | 419 CQHCI_INT(1) | 420 CQHCI_ACT(0x5) | 421 CQHCI_FORCED_PROG(!!(req_flags & MMC_DATA_FORCED_PRG)) | 422 CQHCI_DATA_TAG(!!(req_flags & MMC_DATA_DAT_TAG)) | 423 CQHCI_DATA_DIR(!!(req_flags & MMC_DATA_READ)) | 424 CQHCI_PRIORITY(!!(req_flags & MMC_DATA_PRIO)) | 425 CQHCI_QBAR(!!(req_flags & MMC_DATA_QBR)) | 426 CQHCI_REL_WRITE(!!(req_flags & MMC_DATA_REL_WR)) | 427 CQHCI_BLK_COUNT(mrq->data->blocks) | 428 CQHCI_BLK_ADDR((u64)mrq->data->blk_addr); 429 430 task_desc[0] = cpu_to_le64(desc0); 431 432 if (cq_host->caps & CQHCI_TASK_DESC_SZ_128) { 433 u64 desc1 = 0; 434 435 task_desc[1] = cpu_to_le64(desc1); 436 437 pr_debug("%s: cqhci: tag %d task descriptor 0x%016llx%016llx\n", 438 mmc_hostname(mrq->host), mrq->tag, desc1, desc0); 439 } else { 440 pr_debug("%s: cqhci: tag %d task descriptor 0x%016llx\n", 441 mmc_hostname(mrq->host), mrq->tag, desc0); 442 } 443 } 444 445 static int cqhci_dma_map(struct mmc_host *host, struct mmc_request *mrq) 446 { 447 int sg_count; 448 struct mmc_data *data = mrq->data; 449 450 if (!data) 451 return -EINVAL; 452 453 sg_count = dma_map_sg(mmc_dev(host), data->sg, 454 data->sg_len, 455 (data->flags & MMC_DATA_WRITE) ? 456 DMA_TO_DEVICE : DMA_FROM_DEVICE); 457 if (!sg_count) { 458 pr_err("%s: sg-len: %d\n", __func__, data->sg_len); 459 return -ENOMEM; 460 } 461 462 return sg_count; 463 } 464 465 static void cqhci_set_tran_desc(u8 *desc, dma_addr_t addr, int len, bool end, 466 bool dma64) 467 { 468 __le32 *attr = (__le32 __force *)desc; 469 470 *attr = (CQHCI_VALID(1) | 471 CQHCI_END(end ? 1 : 0) | 472 CQHCI_INT(0) | 473 CQHCI_ACT(0x4) | 474 CQHCI_DAT_LENGTH(len)); 475 476 if (dma64) { 477 __le64 *dataddr = (__le64 __force *)(desc + 4); 478 479 dataddr[0] = cpu_to_le64(addr); 480 } else { 481 __le32 *dataddr = (__le32 __force *)(desc + 4); 482 483 dataddr[0] = cpu_to_le32(addr); 484 } 485 } 486 487 static int cqhci_prep_tran_desc(struct mmc_request *mrq, 488 struct cqhci_host *cq_host, int tag) 489 { 490 struct mmc_data *data = mrq->data; 491 int i, sg_count, len; 492 bool end = false; 493 bool dma64 = cq_host->dma64; 494 dma_addr_t addr; 495 u8 *desc; 496 struct scatterlist *sg; 497 498 sg_count = cqhci_dma_map(mrq->host, mrq); 499 if (sg_count < 0) { 500 pr_err("%s: %s: unable to map sg lists, %d\n", 501 mmc_hostname(mrq->host), __func__, sg_count); 502 return sg_count; 503 } 504 505 desc = get_trans_desc(cq_host, tag); 506 507 for_each_sg(data->sg, sg, sg_count, i) { 508 addr = sg_dma_address(sg); 509 len = sg_dma_len(sg); 510 511 if ((i+1) == sg_count) 512 end = true; 513 cqhci_set_tran_desc(desc, addr, len, end, dma64); 514 desc += cq_host->trans_desc_len; 515 } 516 517 return 0; 518 } 519 520 static void cqhci_prep_dcmd_desc(struct mmc_host *mmc, 521 struct mmc_request *mrq) 522 { 523 u64 *task_desc = NULL; 524 u64 data = 0; 525 u8 resp_type; 526 u8 *desc; 527 __le64 *dataddr; 528 struct cqhci_host *cq_host = mmc->cqe_private; 529 u8 timing; 530 531 if (!(mrq->cmd->flags & MMC_RSP_PRESENT)) { 532 resp_type = 0x0; 533 timing = 0x1; 534 } else { 535 if (mrq->cmd->flags & MMC_RSP_R1B) { 536 resp_type = 0x3; 537 timing = 0x0; 538 } else { 539 resp_type = 0x2; 540 timing = 0x1; 541 } 542 } 543 544 task_desc = (__le64 __force *)get_desc(cq_host, cq_host->dcmd_slot); 545 memset(task_desc, 0, cq_host->task_desc_len); 546 data |= (CQHCI_VALID(1) | 547 CQHCI_END(1) | 548 CQHCI_INT(1) | 549 CQHCI_QBAR(1) | 550 CQHCI_ACT(0x5) | 551 CQHCI_CMD_INDEX(mrq->cmd->opcode) | 552 CQHCI_CMD_TIMING(timing) | CQHCI_RESP_TYPE(resp_type)); 553 if (cq_host->ops->update_dcmd_desc) 554 cq_host->ops->update_dcmd_desc(mmc, mrq, &data); 555 *task_desc |= data; 556 desc = (u8 *)task_desc; 557 pr_debug("%s: cqhci: dcmd: cmd: %d timing: %d resp: %d\n", 558 mmc_hostname(mmc), mrq->cmd->opcode, timing, resp_type); 559 dataddr = (__le64 __force *)(desc + 4); 560 dataddr[0] = cpu_to_le64((u64)mrq->cmd->arg); 561 562 } 563 564 static void cqhci_post_req(struct mmc_host *host, struct mmc_request *mrq) 565 { 566 struct mmc_data *data = mrq->data; 567 568 if (data) { 569 dma_unmap_sg(mmc_dev(host), data->sg, data->sg_len, 570 (data->flags & MMC_DATA_READ) ? 571 DMA_FROM_DEVICE : DMA_TO_DEVICE); 572 } 573 } 574 575 static inline int cqhci_tag(struct mmc_request *mrq) 576 { 577 return mrq->cmd ? DCMD_SLOT : mrq->tag; 578 } 579 580 static int cqhci_request(struct mmc_host *mmc, struct mmc_request *mrq) 581 { 582 int err = 0; 583 int tag = cqhci_tag(mrq); 584 struct cqhci_host *cq_host = mmc->cqe_private; 585 unsigned long flags; 586 587 if (!cq_host->enabled) { 588 pr_err("%s: cqhci: not enabled\n", mmc_hostname(mmc)); 589 return -EINVAL; 590 } 591 592 /* First request after resume has to re-enable */ 593 if (!cq_host->activated) 594 __cqhci_enable(cq_host); 595 596 if (!mmc->cqe_on) { 597 if (cq_host->ops->pre_enable) 598 cq_host->ops->pre_enable(mmc); 599 600 cqhci_writel(cq_host, 0, CQHCI_CTL); 601 mmc->cqe_on = true; 602 pr_debug("%s: cqhci: CQE on\n", mmc_hostname(mmc)); 603 if (cqhci_readl(cq_host, CQHCI_CTL) && CQHCI_HALT) { 604 pr_err("%s: cqhci: CQE failed to exit halt state\n", 605 mmc_hostname(mmc)); 606 } 607 if (cq_host->ops->enable) 608 cq_host->ops->enable(mmc); 609 } 610 611 if (mrq->data) { 612 cqhci_prep_task_desc(mrq, cq_host, tag); 613 614 err = cqhci_prep_tran_desc(mrq, cq_host, tag); 615 if (err) { 616 pr_err("%s: cqhci: failed to setup tx desc: %d\n", 617 mmc_hostname(mmc), err); 618 return err; 619 } 620 } else { 621 cqhci_prep_dcmd_desc(mmc, mrq); 622 } 623 624 spin_lock_irqsave(&cq_host->lock, flags); 625 626 if (cq_host->recovery_halt) { 627 err = -EBUSY; 628 goto out_unlock; 629 } 630 631 cq_host->slot[tag].mrq = mrq; 632 cq_host->slot[tag].flags = 0; 633 634 cq_host->qcnt += 1; 635 /* Make sure descriptors are ready before ringing the doorbell */ 636 wmb(); 637 cqhci_writel(cq_host, 1 << tag, CQHCI_TDBR); 638 if (!(cqhci_readl(cq_host, CQHCI_TDBR) & (1 << tag))) 639 pr_debug("%s: cqhci: doorbell not set for tag %d\n", 640 mmc_hostname(mmc), tag); 641 out_unlock: 642 spin_unlock_irqrestore(&cq_host->lock, flags); 643 644 if (err) 645 cqhci_post_req(mmc, mrq); 646 647 return err; 648 } 649 650 static void cqhci_recovery_needed(struct mmc_host *mmc, struct mmc_request *mrq, 651 bool notify) 652 { 653 struct cqhci_host *cq_host = mmc->cqe_private; 654 655 if (!cq_host->recovery_halt) { 656 cq_host->recovery_halt = true; 657 pr_debug("%s: cqhci: recovery needed\n", mmc_hostname(mmc)); 658 wake_up(&cq_host->wait_queue); 659 if (notify && mrq->recovery_notifier) 660 mrq->recovery_notifier(mrq); 661 } 662 } 663 664 static unsigned int cqhci_error_flags(int error1, int error2) 665 { 666 int error = error1 ? error1 : error2; 667 668 switch (error) { 669 case -EILSEQ: 670 return CQHCI_HOST_CRC; 671 case -ETIMEDOUT: 672 return CQHCI_HOST_TIMEOUT; 673 default: 674 return CQHCI_HOST_OTHER; 675 } 676 } 677 678 static void cqhci_error_irq(struct mmc_host *mmc, u32 status, int cmd_error, 679 int data_error) 680 { 681 struct cqhci_host *cq_host = mmc->cqe_private; 682 struct cqhci_slot *slot; 683 u32 terri; 684 int tag; 685 686 spin_lock(&cq_host->lock); 687 688 terri = cqhci_readl(cq_host, CQHCI_TERRI); 689 690 pr_debug("%s: cqhci: error IRQ status: 0x%08x cmd error %d data error %d TERRI: 0x%08x\n", 691 mmc_hostname(mmc), status, cmd_error, data_error, terri); 692 693 /* Forget about errors when recovery has already been triggered */ 694 if (cq_host->recovery_halt) 695 goto out_unlock; 696 697 if (!cq_host->qcnt) { 698 WARN_ONCE(1, "%s: cqhci: error when idle. IRQ status: 0x%08x cmd error %d data error %d TERRI: 0x%08x\n", 699 mmc_hostname(mmc), status, cmd_error, data_error, 700 terri); 701 goto out_unlock; 702 } 703 704 if (CQHCI_TERRI_C_VALID(terri)) { 705 tag = CQHCI_TERRI_C_TASK(terri); 706 slot = &cq_host->slot[tag]; 707 if (slot->mrq) { 708 slot->flags = cqhci_error_flags(cmd_error, data_error); 709 cqhci_recovery_needed(mmc, slot->mrq, true); 710 } 711 } 712 713 if (CQHCI_TERRI_D_VALID(terri)) { 714 tag = CQHCI_TERRI_D_TASK(terri); 715 slot = &cq_host->slot[tag]; 716 if (slot->mrq) { 717 slot->flags = cqhci_error_flags(data_error, cmd_error); 718 cqhci_recovery_needed(mmc, slot->mrq, true); 719 } 720 } 721 722 if (!cq_host->recovery_halt) { 723 /* 724 * The only way to guarantee forward progress is to mark at 725 * least one task in error, so if none is indicated, pick one. 726 */ 727 for (tag = 0; tag < NUM_SLOTS; tag++) { 728 slot = &cq_host->slot[tag]; 729 if (!slot->mrq) 730 continue; 731 slot->flags = cqhci_error_flags(data_error, cmd_error); 732 cqhci_recovery_needed(mmc, slot->mrq, true); 733 break; 734 } 735 } 736 737 out_unlock: 738 spin_unlock(&cq_host->lock); 739 } 740 741 static void cqhci_finish_mrq(struct mmc_host *mmc, unsigned int tag) 742 { 743 struct cqhci_host *cq_host = mmc->cqe_private; 744 struct cqhci_slot *slot = &cq_host->slot[tag]; 745 struct mmc_request *mrq = slot->mrq; 746 struct mmc_data *data; 747 748 if (!mrq) { 749 WARN_ONCE(1, "%s: cqhci: spurious TCN for tag %d\n", 750 mmc_hostname(mmc), tag); 751 return; 752 } 753 754 /* No completions allowed during recovery */ 755 if (cq_host->recovery_halt) { 756 slot->flags |= CQHCI_COMPLETED; 757 return; 758 } 759 760 slot->mrq = NULL; 761 762 cq_host->qcnt -= 1; 763 764 data = mrq->data; 765 if (data) { 766 if (data->error) 767 data->bytes_xfered = 0; 768 else 769 data->bytes_xfered = data->blksz * data->blocks; 770 } 771 772 mmc_cqe_request_done(mmc, mrq); 773 } 774 775 irqreturn_t cqhci_irq(struct mmc_host *mmc, u32 intmask, int cmd_error, 776 int data_error) 777 { 778 u32 status; 779 unsigned long tag = 0, comp_status; 780 struct cqhci_host *cq_host = mmc->cqe_private; 781 782 status = cqhci_readl(cq_host, CQHCI_IS); 783 cqhci_writel(cq_host, status, CQHCI_IS); 784 785 pr_debug("%s: cqhci: IRQ status: 0x%08x\n", mmc_hostname(mmc), status); 786 787 if ((status & CQHCI_IS_RED) || cmd_error || data_error) 788 cqhci_error_irq(mmc, status, cmd_error, data_error); 789 790 if (status & CQHCI_IS_TCC) { 791 /* read TCN and complete the request */ 792 comp_status = cqhci_readl(cq_host, CQHCI_TCN); 793 cqhci_writel(cq_host, comp_status, CQHCI_TCN); 794 pr_debug("%s: cqhci: TCN: 0x%08lx\n", 795 mmc_hostname(mmc), comp_status); 796 797 spin_lock(&cq_host->lock); 798 799 for_each_set_bit(tag, &comp_status, cq_host->num_slots) { 800 /* complete the corresponding mrq */ 801 pr_debug("%s: cqhci: completing tag %lu\n", 802 mmc_hostname(mmc), tag); 803 cqhci_finish_mrq(mmc, tag); 804 } 805 806 if (cq_host->waiting_for_idle && !cq_host->qcnt) { 807 cq_host->waiting_for_idle = false; 808 wake_up(&cq_host->wait_queue); 809 } 810 811 spin_unlock(&cq_host->lock); 812 } 813 814 if (status & CQHCI_IS_TCL) 815 wake_up(&cq_host->wait_queue); 816 817 if (status & CQHCI_IS_HAC) 818 wake_up(&cq_host->wait_queue); 819 820 return IRQ_HANDLED; 821 } 822 EXPORT_SYMBOL(cqhci_irq); 823 824 static bool cqhci_is_idle(struct cqhci_host *cq_host, int *ret) 825 { 826 unsigned long flags; 827 bool is_idle; 828 829 spin_lock_irqsave(&cq_host->lock, flags); 830 is_idle = !cq_host->qcnt || cq_host->recovery_halt; 831 *ret = cq_host->recovery_halt ? -EBUSY : 0; 832 cq_host->waiting_for_idle = !is_idle; 833 spin_unlock_irqrestore(&cq_host->lock, flags); 834 835 return is_idle; 836 } 837 838 static int cqhci_wait_for_idle(struct mmc_host *mmc) 839 { 840 struct cqhci_host *cq_host = mmc->cqe_private; 841 int ret; 842 843 wait_event(cq_host->wait_queue, cqhci_is_idle(cq_host, &ret)); 844 845 return ret; 846 } 847 848 static bool cqhci_timeout(struct mmc_host *mmc, struct mmc_request *mrq, 849 bool *recovery_needed) 850 { 851 struct cqhci_host *cq_host = mmc->cqe_private; 852 int tag = cqhci_tag(mrq); 853 struct cqhci_slot *slot = &cq_host->slot[tag]; 854 unsigned long flags; 855 bool timed_out; 856 857 spin_lock_irqsave(&cq_host->lock, flags); 858 timed_out = slot->mrq == mrq; 859 if (timed_out) { 860 slot->flags |= CQHCI_EXTERNAL_TIMEOUT; 861 cqhci_recovery_needed(mmc, mrq, false); 862 *recovery_needed = cq_host->recovery_halt; 863 } 864 spin_unlock_irqrestore(&cq_host->lock, flags); 865 866 if (timed_out) { 867 pr_err("%s: cqhci: timeout for tag %d\n", 868 mmc_hostname(mmc), tag); 869 cqhci_dumpregs(cq_host); 870 } 871 872 return timed_out; 873 } 874 875 static bool cqhci_tasks_cleared(struct cqhci_host *cq_host) 876 { 877 return !(cqhci_readl(cq_host, CQHCI_CTL) & CQHCI_CLEAR_ALL_TASKS); 878 } 879 880 static bool cqhci_clear_all_tasks(struct mmc_host *mmc, unsigned int timeout) 881 { 882 struct cqhci_host *cq_host = mmc->cqe_private; 883 bool ret; 884 u32 ctl; 885 886 cqhci_set_irqs(cq_host, CQHCI_IS_TCL); 887 888 ctl = cqhci_readl(cq_host, CQHCI_CTL); 889 ctl |= CQHCI_CLEAR_ALL_TASKS; 890 cqhci_writel(cq_host, ctl, CQHCI_CTL); 891 892 wait_event_timeout(cq_host->wait_queue, cqhci_tasks_cleared(cq_host), 893 msecs_to_jiffies(timeout) + 1); 894 895 cqhci_set_irqs(cq_host, 0); 896 897 ret = cqhci_tasks_cleared(cq_host); 898 899 if (!ret) 900 pr_debug("%s: cqhci: Failed to clear tasks\n", 901 mmc_hostname(mmc)); 902 903 return ret; 904 } 905 906 static bool cqhci_halted(struct cqhci_host *cq_host) 907 { 908 return cqhci_readl(cq_host, CQHCI_CTL) & CQHCI_HALT; 909 } 910 911 static bool cqhci_halt(struct mmc_host *mmc, unsigned int timeout) 912 { 913 struct cqhci_host *cq_host = mmc->cqe_private; 914 bool ret; 915 u32 ctl; 916 917 if (cqhci_halted(cq_host)) 918 return true; 919 920 cqhci_set_irqs(cq_host, CQHCI_IS_HAC); 921 922 ctl = cqhci_readl(cq_host, CQHCI_CTL); 923 ctl |= CQHCI_HALT; 924 cqhci_writel(cq_host, ctl, CQHCI_CTL); 925 926 wait_event_timeout(cq_host->wait_queue, cqhci_halted(cq_host), 927 msecs_to_jiffies(timeout) + 1); 928 929 cqhci_set_irqs(cq_host, 0); 930 931 ret = cqhci_halted(cq_host); 932 933 if (!ret) 934 pr_debug("%s: cqhci: Failed to halt\n", mmc_hostname(mmc)); 935 936 return ret; 937 } 938 939 /* 940 * After halting we expect to be able to use the command line. We interpret the 941 * failure to halt to mean the data lines might still be in use (and the upper 942 * layers will need to send a STOP command), so we set the timeout based on a 943 * generous command timeout. 944 */ 945 #define CQHCI_START_HALT_TIMEOUT 5 946 947 static void cqhci_recovery_start(struct mmc_host *mmc) 948 { 949 struct cqhci_host *cq_host = mmc->cqe_private; 950 951 pr_debug("%s: cqhci: %s\n", mmc_hostname(mmc), __func__); 952 953 WARN_ON(!cq_host->recovery_halt); 954 955 cqhci_halt(mmc, CQHCI_START_HALT_TIMEOUT); 956 957 if (cq_host->ops->disable) 958 cq_host->ops->disable(mmc, true); 959 960 mmc->cqe_on = false; 961 } 962 963 static int cqhci_error_from_flags(unsigned int flags) 964 { 965 if (!flags) 966 return 0; 967 968 /* CRC errors might indicate re-tuning so prefer to report that */ 969 if (flags & CQHCI_HOST_CRC) 970 return -EILSEQ; 971 972 if (flags & (CQHCI_EXTERNAL_TIMEOUT | CQHCI_HOST_TIMEOUT)) 973 return -ETIMEDOUT; 974 975 return -EIO; 976 } 977 978 static void cqhci_recover_mrq(struct cqhci_host *cq_host, unsigned int tag) 979 { 980 struct cqhci_slot *slot = &cq_host->slot[tag]; 981 struct mmc_request *mrq = slot->mrq; 982 struct mmc_data *data; 983 984 if (!mrq) 985 return; 986 987 slot->mrq = NULL; 988 989 cq_host->qcnt -= 1; 990 991 data = mrq->data; 992 if (data) { 993 data->bytes_xfered = 0; 994 data->error = cqhci_error_from_flags(slot->flags); 995 } else { 996 mrq->cmd->error = cqhci_error_from_flags(slot->flags); 997 } 998 999 mmc_cqe_request_done(cq_host->mmc, mrq); 1000 } 1001 1002 static void cqhci_recover_mrqs(struct cqhci_host *cq_host) 1003 { 1004 int i; 1005 1006 for (i = 0; i < cq_host->num_slots; i++) 1007 cqhci_recover_mrq(cq_host, i); 1008 } 1009 1010 /* 1011 * By now the command and data lines should be unused so there is no reason for 1012 * CQHCI to take a long time to halt, but if it doesn't halt there could be 1013 * problems clearing tasks, so be generous. 1014 */ 1015 #define CQHCI_FINISH_HALT_TIMEOUT 20 1016 1017 /* CQHCI could be expected to clear it's internal state pretty quickly */ 1018 #define CQHCI_CLEAR_TIMEOUT 20 1019 1020 static void cqhci_recovery_finish(struct mmc_host *mmc) 1021 { 1022 struct cqhci_host *cq_host = mmc->cqe_private; 1023 unsigned long flags; 1024 u32 cqcfg; 1025 bool ok; 1026 1027 pr_debug("%s: cqhci: %s\n", mmc_hostname(mmc), __func__); 1028 1029 WARN_ON(!cq_host->recovery_halt); 1030 1031 ok = cqhci_halt(mmc, CQHCI_FINISH_HALT_TIMEOUT); 1032 1033 if (!cqhci_clear_all_tasks(mmc, CQHCI_CLEAR_TIMEOUT)) 1034 ok = false; 1035 1036 /* 1037 * The specification contradicts itself, by saying that tasks cannot be 1038 * cleared if CQHCI does not halt, but if CQHCI does not halt, it should 1039 * be disabled/re-enabled, but not to disable before clearing tasks. 1040 * Have a go anyway. 1041 */ 1042 if (!ok) { 1043 pr_debug("%s: cqhci: disable / re-enable\n", mmc_hostname(mmc)); 1044 cqcfg = cqhci_readl(cq_host, CQHCI_CFG); 1045 cqcfg &= ~CQHCI_ENABLE; 1046 cqhci_writel(cq_host, cqcfg, CQHCI_CFG); 1047 cqcfg |= CQHCI_ENABLE; 1048 cqhci_writel(cq_host, cqcfg, CQHCI_CFG); 1049 /* Be sure that there are no tasks */ 1050 ok = cqhci_halt(mmc, CQHCI_FINISH_HALT_TIMEOUT); 1051 if (!cqhci_clear_all_tasks(mmc, CQHCI_CLEAR_TIMEOUT)) 1052 ok = false; 1053 WARN_ON(!ok); 1054 } 1055 1056 cqhci_recover_mrqs(cq_host); 1057 1058 WARN_ON(cq_host->qcnt); 1059 1060 spin_lock_irqsave(&cq_host->lock, flags); 1061 cq_host->qcnt = 0; 1062 cq_host->recovery_halt = false; 1063 mmc->cqe_on = false; 1064 spin_unlock_irqrestore(&cq_host->lock, flags); 1065 1066 /* Ensure all writes are done before interrupts are re-enabled */ 1067 wmb(); 1068 1069 cqhci_writel(cq_host, CQHCI_IS_HAC | CQHCI_IS_TCL, CQHCI_IS); 1070 1071 cqhci_set_irqs(cq_host, CQHCI_IS_MASK); 1072 1073 pr_debug("%s: cqhci: recovery done\n", mmc_hostname(mmc)); 1074 } 1075 1076 static const struct mmc_cqe_ops cqhci_cqe_ops = { 1077 .cqe_enable = cqhci_enable, 1078 .cqe_disable = cqhci_disable, 1079 .cqe_request = cqhci_request, 1080 .cqe_post_req = cqhci_post_req, 1081 .cqe_off = cqhci_off, 1082 .cqe_wait_for_idle = cqhci_wait_for_idle, 1083 .cqe_timeout = cqhci_timeout, 1084 .cqe_recovery_start = cqhci_recovery_start, 1085 .cqe_recovery_finish = cqhci_recovery_finish, 1086 }; 1087 1088 struct cqhci_host *cqhci_pltfm_init(struct platform_device *pdev) 1089 { 1090 struct cqhci_host *cq_host; 1091 struct resource *cqhci_memres = NULL; 1092 1093 /* check and setup CMDQ interface */ 1094 cqhci_memres = platform_get_resource_byname(pdev, IORESOURCE_MEM, 1095 "cqhci"); 1096 if (!cqhci_memres) { 1097 dev_dbg(&pdev->dev, "CMDQ not supported\n"); 1098 return ERR_PTR(-EINVAL); 1099 } 1100 1101 cq_host = devm_kzalloc(&pdev->dev, sizeof(*cq_host), GFP_KERNEL); 1102 if (!cq_host) 1103 return ERR_PTR(-ENOMEM); 1104 cq_host->mmio = devm_ioremap(&pdev->dev, 1105 cqhci_memres->start, 1106 resource_size(cqhci_memres)); 1107 if (!cq_host->mmio) { 1108 dev_err(&pdev->dev, "failed to remap cqhci regs\n"); 1109 return ERR_PTR(-EBUSY); 1110 } 1111 dev_dbg(&pdev->dev, "CMDQ ioremap: done\n"); 1112 1113 return cq_host; 1114 } 1115 EXPORT_SYMBOL(cqhci_pltfm_init); 1116 1117 static unsigned int cqhci_ver_major(struct cqhci_host *cq_host) 1118 { 1119 return CQHCI_VER_MAJOR(cqhci_readl(cq_host, CQHCI_VER)); 1120 } 1121 1122 static unsigned int cqhci_ver_minor(struct cqhci_host *cq_host) 1123 { 1124 u32 ver = cqhci_readl(cq_host, CQHCI_VER); 1125 1126 return CQHCI_VER_MINOR1(ver) * 10 + CQHCI_VER_MINOR2(ver); 1127 } 1128 1129 int cqhci_init(struct cqhci_host *cq_host, struct mmc_host *mmc, 1130 bool dma64) 1131 { 1132 int err; 1133 1134 cq_host->dma64 = dma64; 1135 cq_host->mmc = mmc; 1136 cq_host->mmc->cqe_private = cq_host; 1137 1138 cq_host->num_slots = NUM_SLOTS; 1139 cq_host->dcmd_slot = DCMD_SLOT; 1140 1141 mmc->cqe_ops = &cqhci_cqe_ops; 1142 1143 mmc->cqe_qdepth = NUM_SLOTS; 1144 if (mmc->caps2 & MMC_CAP2_CQE_DCMD) 1145 mmc->cqe_qdepth -= 1; 1146 1147 cq_host->slot = devm_kcalloc(mmc_dev(mmc), cq_host->num_slots, 1148 sizeof(*cq_host->slot), GFP_KERNEL); 1149 if (!cq_host->slot) { 1150 err = -ENOMEM; 1151 goto out_err; 1152 } 1153 1154 spin_lock_init(&cq_host->lock); 1155 1156 init_completion(&cq_host->halt_comp); 1157 init_waitqueue_head(&cq_host->wait_queue); 1158 1159 pr_info("%s: CQHCI version %u.%02u\n", 1160 mmc_hostname(mmc), cqhci_ver_major(cq_host), 1161 cqhci_ver_minor(cq_host)); 1162 1163 return 0; 1164 1165 out_err: 1166 pr_err("%s: CQHCI version %u.%02u failed to initialize, error %d\n", 1167 mmc_hostname(mmc), cqhci_ver_major(cq_host), 1168 cqhci_ver_minor(cq_host), err); 1169 return err; 1170 } 1171 EXPORT_SYMBOL(cqhci_init); 1172 1173 MODULE_AUTHOR("Venkat Gopalakrishnan <venkatg@codeaurora.org>"); 1174 MODULE_DESCRIPTION("Command Queue Host Controller Interface driver"); 1175 MODULE_LICENSE("GPL v2"); 1176