1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright (c) 2015, The Linux Foundation. All rights reserved. 3 */ 4 5 #include <linux/delay.h> 6 #include <linux/highmem.h> 7 #include <linux/io.h> 8 #include <linux/iopoll.h> 9 #include <linux/module.h> 10 #include <linux/dma-mapping.h> 11 #include <linux/slab.h> 12 #include <linux/scatterlist.h> 13 #include <linux/platform_device.h> 14 #include <linux/ktime.h> 15 16 #include <linux/mmc/mmc.h> 17 #include <linux/mmc/host.h> 18 #include <linux/mmc/card.h> 19 20 #include "cqhci.h" 21 #include "cqhci-crypto.h" 22 23 #define DCMD_SLOT 31 24 #define NUM_SLOTS 32 25 26 struct cqhci_slot { 27 struct mmc_request *mrq; 28 unsigned int flags; 29 #define CQHCI_EXTERNAL_TIMEOUT BIT(0) 30 #define CQHCI_COMPLETED BIT(1) 31 #define CQHCI_HOST_CRC BIT(2) 32 #define CQHCI_HOST_TIMEOUT BIT(3) 33 #define CQHCI_HOST_OTHER BIT(4) 34 }; 35 36 static inline u8 *get_desc(struct cqhci_host *cq_host, u8 tag) 37 { 38 return cq_host->desc_base + (tag * cq_host->slot_sz); 39 } 40 41 static inline u8 *get_link_desc(struct cqhci_host *cq_host, u8 tag) 42 { 43 u8 *desc = get_desc(cq_host, tag); 44 45 return desc + cq_host->task_desc_len; 46 } 47 48 static inline dma_addr_t get_trans_desc_dma(struct cqhci_host *cq_host, u8 tag) 49 { 50 return cq_host->trans_desc_dma_base + 51 (cq_host->mmc->max_segs * tag * 52 cq_host->trans_desc_len); 53 } 54 55 static inline u8 *get_trans_desc(struct cqhci_host *cq_host, u8 tag) 56 { 57 return cq_host->trans_desc_base + 58 (cq_host->trans_desc_len * cq_host->mmc->max_segs * tag); 59 } 60 61 static void setup_trans_desc(struct cqhci_host *cq_host, u8 tag) 62 { 63 u8 *link_temp; 64 dma_addr_t trans_temp; 65 66 link_temp = get_link_desc(cq_host, tag); 67 trans_temp = get_trans_desc_dma(cq_host, tag); 68 69 memset(link_temp, 0, cq_host->link_desc_len); 70 if (cq_host->link_desc_len > 8) 71 *(link_temp + 8) = 0; 72 73 if (tag == DCMD_SLOT && (cq_host->mmc->caps2 & MMC_CAP2_CQE_DCMD)) { 74 *link_temp = CQHCI_VALID(0) | CQHCI_ACT(0) | CQHCI_END(1); 75 return; 76 } 77 78 *link_temp = CQHCI_VALID(1) | CQHCI_ACT(0x6) | CQHCI_END(0); 79 80 if (cq_host->dma64) { 81 __le64 *data_addr = (__le64 __force *)(link_temp + 4); 82 83 data_addr[0] = cpu_to_le64(trans_temp); 84 } else { 85 __le32 *data_addr = (__le32 __force *)(link_temp + 4); 86 87 data_addr[0] = cpu_to_le32(trans_temp); 88 } 89 } 90 91 static void cqhci_set_irqs(struct cqhci_host *cq_host, u32 set) 92 { 93 cqhci_writel(cq_host, set, CQHCI_ISTE); 94 cqhci_writel(cq_host, set, CQHCI_ISGE); 95 } 96 97 #define DRV_NAME "cqhci" 98 99 #define CQHCI_DUMP(f, x...) \ 100 pr_err("%s: " DRV_NAME ": " f, mmc_hostname(mmc), ## x) 101 102 static void cqhci_dumpregs(struct cqhci_host *cq_host) 103 { 104 struct mmc_host *mmc = cq_host->mmc; 105 106 CQHCI_DUMP("============ CQHCI REGISTER DUMP ===========\n"); 107 108 CQHCI_DUMP("Caps: 0x%08x | Version: 0x%08x\n", 109 cqhci_readl(cq_host, CQHCI_CAP), 110 cqhci_readl(cq_host, CQHCI_VER)); 111 CQHCI_DUMP("Config: 0x%08x | Control: 0x%08x\n", 112 cqhci_readl(cq_host, CQHCI_CFG), 113 cqhci_readl(cq_host, CQHCI_CTL)); 114 CQHCI_DUMP("Int stat: 0x%08x | Int enab: 0x%08x\n", 115 cqhci_readl(cq_host, CQHCI_IS), 116 cqhci_readl(cq_host, CQHCI_ISTE)); 117 CQHCI_DUMP("Int sig: 0x%08x | Int Coal: 0x%08x\n", 118 cqhci_readl(cq_host, CQHCI_ISGE), 119 cqhci_readl(cq_host, CQHCI_IC)); 120 CQHCI_DUMP("TDL base: 0x%08x | TDL up32: 0x%08x\n", 121 cqhci_readl(cq_host, CQHCI_TDLBA), 122 cqhci_readl(cq_host, CQHCI_TDLBAU)); 123 CQHCI_DUMP("Doorbell: 0x%08x | TCN: 0x%08x\n", 124 cqhci_readl(cq_host, CQHCI_TDBR), 125 cqhci_readl(cq_host, CQHCI_TCN)); 126 CQHCI_DUMP("Dev queue: 0x%08x | Dev Pend: 0x%08x\n", 127 cqhci_readl(cq_host, CQHCI_DQS), 128 cqhci_readl(cq_host, CQHCI_DPT)); 129 CQHCI_DUMP("Task clr: 0x%08x | SSC1: 0x%08x\n", 130 cqhci_readl(cq_host, CQHCI_TCLR), 131 cqhci_readl(cq_host, CQHCI_SSC1)); 132 CQHCI_DUMP("SSC2: 0x%08x | DCMD rsp: 0x%08x\n", 133 cqhci_readl(cq_host, CQHCI_SSC2), 134 cqhci_readl(cq_host, CQHCI_CRDCT)); 135 CQHCI_DUMP("RED mask: 0x%08x | TERRI: 0x%08x\n", 136 cqhci_readl(cq_host, CQHCI_RMEM), 137 cqhci_readl(cq_host, CQHCI_TERRI)); 138 CQHCI_DUMP("Resp idx: 0x%08x | Resp arg: 0x%08x\n", 139 cqhci_readl(cq_host, CQHCI_CRI), 140 cqhci_readl(cq_host, CQHCI_CRA)); 141 142 if (cq_host->ops->dumpregs) 143 cq_host->ops->dumpregs(mmc); 144 else 145 CQHCI_DUMP(": ===========================================\n"); 146 } 147 148 /* 149 * The allocated descriptor table for task, link & transfer descritors 150 * looks like: 151 * |----------| 152 * |task desc | |->|----------| 153 * |----------| | |trans desc| 154 * |link desc-|->| |----------| 155 * |----------| . 156 * . . 157 * no. of slots max-segs 158 * . |----------| 159 * |----------| 160 * The idea here is to create the [task+trans] table and mark & point the 161 * link desc to the transfer desc table on a per slot basis. 162 */ 163 static int cqhci_host_alloc_tdl(struct cqhci_host *cq_host) 164 { 165 int i = 0; 166 167 /* task descriptor can be 64/128 bit irrespective of arch */ 168 if (cq_host->caps & CQHCI_TASK_DESC_SZ_128) { 169 cqhci_writel(cq_host, cqhci_readl(cq_host, CQHCI_CFG) | 170 CQHCI_TASK_DESC_SZ, CQHCI_CFG); 171 cq_host->task_desc_len = 16; 172 } else { 173 cq_host->task_desc_len = 8; 174 } 175 176 /* 177 * 96 bits length of transfer desc instead of 128 bits which means 178 * ADMA would expect next valid descriptor at the 96th bit 179 * or 128th bit 180 */ 181 if (cq_host->dma64) { 182 if (cq_host->quirks & CQHCI_QUIRK_SHORT_TXFR_DESC_SZ) 183 cq_host->trans_desc_len = 12; 184 else 185 cq_host->trans_desc_len = 16; 186 cq_host->link_desc_len = 16; 187 } else { 188 cq_host->trans_desc_len = 8; 189 cq_host->link_desc_len = 8; 190 } 191 192 /* total size of a slot: 1 task & 1 transfer (link) */ 193 cq_host->slot_sz = cq_host->task_desc_len + cq_host->link_desc_len; 194 195 cq_host->desc_size = cq_host->slot_sz * cq_host->num_slots; 196 197 cq_host->data_size = cq_host->trans_desc_len * cq_host->mmc->max_segs * 198 cq_host->mmc->cqe_qdepth; 199 200 pr_debug("%s: cqhci: desc_size: %zu data_sz: %zu slot-sz: %d\n", 201 mmc_hostname(cq_host->mmc), cq_host->desc_size, cq_host->data_size, 202 cq_host->slot_sz); 203 204 /* 205 * allocate a dma-mapped chunk of memory for the descriptors 206 * allocate a dma-mapped chunk of memory for link descriptors 207 * setup each link-desc memory offset per slot-number to 208 * the descriptor table. 209 */ 210 cq_host->desc_base = dmam_alloc_coherent(mmc_dev(cq_host->mmc), 211 cq_host->desc_size, 212 &cq_host->desc_dma_base, 213 GFP_KERNEL); 214 if (!cq_host->desc_base) 215 return -ENOMEM; 216 217 cq_host->trans_desc_base = dmam_alloc_coherent(mmc_dev(cq_host->mmc), 218 cq_host->data_size, 219 &cq_host->trans_desc_dma_base, 220 GFP_KERNEL); 221 if (!cq_host->trans_desc_base) { 222 dmam_free_coherent(mmc_dev(cq_host->mmc), cq_host->desc_size, 223 cq_host->desc_base, 224 cq_host->desc_dma_base); 225 cq_host->desc_base = NULL; 226 cq_host->desc_dma_base = 0; 227 return -ENOMEM; 228 } 229 230 pr_debug("%s: cqhci: desc-base: 0x%p trans-base: 0x%p\n desc_dma 0x%llx trans_dma: 0x%llx\n", 231 mmc_hostname(cq_host->mmc), cq_host->desc_base, cq_host->trans_desc_base, 232 (unsigned long long)cq_host->desc_dma_base, 233 (unsigned long long)cq_host->trans_desc_dma_base); 234 235 for (; i < (cq_host->num_slots); i++) 236 setup_trans_desc(cq_host, i); 237 238 return 0; 239 } 240 241 static void __cqhci_enable(struct cqhci_host *cq_host) 242 { 243 struct mmc_host *mmc = cq_host->mmc; 244 u32 cqcfg; 245 246 cqcfg = cqhci_readl(cq_host, CQHCI_CFG); 247 248 /* Configuration must not be changed while enabled */ 249 if (cqcfg & CQHCI_ENABLE) { 250 cqcfg &= ~CQHCI_ENABLE; 251 cqhci_writel(cq_host, cqcfg, CQHCI_CFG); 252 } 253 254 cqcfg &= ~(CQHCI_DCMD | CQHCI_TASK_DESC_SZ); 255 256 if (mmc->caps2 & MMC_CAP2_CQE_DCMD) 257 cqcfg |= CQHCI_DCMD; 258 259 if (cq_host->caps & CQHCI_TASK_DESC_SZ_128) 260 cqcfg |= CQHCI_TASK_DESC_SZ; 261 262 if (mmc->caps2 & MMC_CAP2_CRYPTO) 263 cqcfg |= CQHCI_CRYPTO_GENERAL_ENABLE; 264 265 cqhci_writel(cq_host, cqcfg, CQHCI_CFG); 266 267 cqhci_writel(cq_host, lower_32_bits(cq_host->desc_dma_base), 268 CQHCI_TDLBA); 269 cqhci_writel(cq_host, upper_32_bits(cq_host->desc_dma_base), 270 CQHCI_TDLBAU); 271 272 cqhci_writel(cq_host, cq_host->rca, CQHCI_SSC2); 273 274 cqhci_set_irqs(cq_host, 0); 275 276 cqcfg |= CQHCI_ENABLE; 277 278 cqhci_writel(cq_host, cqcfg, CQHCI_CFG); 279 280 mmc->cqe_on = true; 281 282 if (cq_host->ops->enable) 283 cq_host->ops->enable(mmc); 284 285 /* Ensure all writes are done before interrupts are enabled */ 286 wmb(); 287 288 cqhci_set_irqs(cq_host, CQHCI_IS_MASK); 289 290 cq_host->activated = true; 291 } 292 293 static void __cqhci_disable(struct cqhci_host *cq_host) 294 { 295 u32 cqcfg; 296 297 cqcfg = cqhci_readl(cq_host, CQHCI_CFG); 298 cqcfg &= ~CQHCI_ENABLE; 299 cqhci_writel(cq_host, cqcfg, CQHCI_CFG); 300 301 cq_host->mmc->cqe_on = false; 302 303 cq_host->activated = false; 304 } 305 306 int cqhci_deactivate(struct mmc_host *mmc) 307 { 308 struct cqhci_host *cq_host = mmc->cqe_private; 309 310 if (cq_host->enabled && cq_host->activated) 311 __cqhci_disable(cq_host); 312 313 return 0; 314 } 315 EXPORT_SYMBOL(cqhci_deactivate); 316 317 int cqhci_resume(struct mmc_host *mmc) 318 { 319 /* Re-enable is done upon first request */ 320 return 0; 321 } 322 EXPORT_SYMBOL(cqhci_resume); 323 324 static int cqhci_enable(struct mmc_host *mmc, struct mmc_card *card) 325 { 326 struct cqhci_host *cq_host = mmc->cqe_private; 327 int err; 328 329 if (!card->ext_csd.cmdq_en) 330 return -EINVAL; 331 332 if (cq_host->enabled) 333 return 0; 334 335 cq_host->rca = card->rca; 336 337 err = cqhci_host_alloc_tdl(cq_host); 338 if (err) { 339 pr_err("%s: Failed to enable CQE, error %d\n", 340 mmc_hostname(mmc), err); 341 return err; 342 } 343 344 __cqhci_enable(cq_host); 345 346 cq_host->enabled = true; 347 348 #ifdef DEBUG 349 cqhci_dumpregs(cq_host); 350 #endif 351 return 0; 352 } 353 354 /* CQHCI is idle and should halt immediately, so set a small timeout */ 355 #define CQHCI_OFF_TIMEOUT 100 356 357 static u32 cqhci_read_ctl(struct cqhci_host *cq_host) 358 { 359 return cqhci_readl(cq_host, CQHCI_CTL); 360 } 361 362 static void cqhci_off(struct mmc_host *mmc) 363 { 364 struct cqhci_host *cq_host = mmc->cqe_private; 365 u32 reg; 366 int err; 367 368 if (!cq_host->enabled || !mmc->cqe_on || cq_host->recovery_halt) 369 return; 370 371 if (cq_host->ops->disable) 372 cq_host->ops->disable(mmc, false); 373 374 cqhci_writel(cq_host, CQHCI_HALT, CQHCI_CTL); 375 376 err = readx_poll_timeout(cqhci_read_ctl, cq_host, reg, 377 reg & CQHCI_HALT, 0, CQHCI_OFF_TIMEOUT); 378 if (err < 0) 379 pr_err("%s: cqhci: CQE stuck on\n", mmc_hostname(mmc)); 380 else 381 pr_debug("%s: cqhci: CQE off\n", mmc_hostname(mmc)); 382 383 if (cq_host->ops->post_disable) 384 cq_host->ops->post_disable(mmc); 385 386 mmc->cqe_on = false; 387 } 388 389 static void cqhci_disable(struct mmc_host *mmc) 390 { 391 struct cqhci_host *cq_host = mmc->cqe_private; 392 393 if (!cq_host->enabled) 394 return; 395 396 cqhci_off(mmc); 397 398 __cqhci_disable(cq_host); 399 400 dmam_free_coherent(mmc_dev(mmc), cq_host->data_size, 401 cq_host->trans_desc_base, 402 cq_host->trans_desc_dma_base); 403 404 dmam_free_coherent(mmc_dev(mmc), cq_host->desc_size, 405 cq_host->desc_base, 406 cq_host->desc_dma_base); 407 408 cq_host->trans_desc_base = NULL; 409 cq_host->desc_base = NULL; 410 411 cq_host->enabled = false; 412 } 413 414 static void cqhci_prep_task_desc(struct mmc_request *mrq, 415 struct cqhci_host *cq_host, int tag) 416 { 417 __le64 *task_desc = (__le64 __force *)get_desc(cq_host, tag); 418 u32 req_flags = mrq->data->flags; 419 u64 desc0; 420 421 desc0 = CQHCI_VALID(1) | 422 CQHCI_END(1) | 423 CQHCI_INT(1) | 424 CQHCI_ACT(0x5) | 425 CQHCI_FORCED_PROG(!!(req_flags & MMC_DATA_FORCED_PRG)) | 426 CQHCI_DATA_TAG(!!(req_flags & MMC_DATA_DAT_TAG)) | 427 CQHCI_DATA_DIR(!!(req_flags & MMC_DATA_READ)) | 428 CQHCI_PRIORITY(!!(req_flags & MMC_DATA_PRIO)) | 429 CQHCI_QBAR(!!(req_flags & MMC_DATA_QBR)) | 430 CQHCI_REL_WRITE(!!(req_flags & MMC_DATA_REL_WR)) | 431 CQHCI_BLK_COUNT(mrq->data->blocks) | 432 CQHCI_BLK_ADDR((u64)mrq->data->blk_addr); 433 434 task_desc[0] = cpu_to_le64(desc0); 435 436 if (cq_host->caps & CQHCI_TASK_DESC_SZ_128) { 437 u64 desc1 = cqhci_crypto_prep_task_desc(mrq); 438 439 task_desc[1] = cpu_to_le64(desc1); 440 441 pr_debug("%s: cqhci: tag %d task descriptor 0x%016llx%016llx\n", 442 mmc_hostname(mrq->host), mrq->tag, desc1, desc0); 443 } else { 444 pr_debug("%s: cqhci: tag %d task descriptor 0x%016llx\n", 445 mmc_hostname(mrq->host), mrq->tag, desc0); 446 } 447 } 448 449 static int cqhci_dma_map(struct mmc_host *host, struct mmc_request *mrq) 450 { 451 int sg_count; 452 struct mmc_data *data = mrq->data; 453 454 if (!data) 455 return -EINVAL; 456 457 sg_count = dma_map_sg(mmc_dev(host), data->sg, 458 data->sg_len, 459 (data->flags & MMC_DATA_WRITE) ? 460 DMA_TO_DEVICE : DMA_FROM_DEVICE); 461 if (!sg_count) { 462 pr_err("%s: sg-len: %d\n", __func__, data->sg_len); 463 return -ENOMEM; 464 } 465 466 return sg_count; 467 } 468 469 static void cqhci_set_tran_desc(u8 *desc, dma_addr_t addr, int len, bool end, 470 bool dma64) 471 { 472 __le32 *attr = (__le32 __force *)desc; 473 474 *attr = (CQHCI_VALID(1) | 475 CQHCI_END(end ? 1 : 0) | 476 CQHCI_INT(0) | 477 CQHCI_ACT(0x4) | 478 CQHCI_DAT_LENGTH(len)); 479 480 if (dma64) { 481 __le64 *dataddr = (__le64 __force *)(desc + 4); 482 483 dataddr[0] = cpu_to_le64(addr); 484 } else { 485 __le32 *dataddr = (__le32 __force *)(desc + 4); 486 487 dataddr[0] = cpu_to_le32(addr); 488 } 489 } 490 491 static int cqhci_prep_tran_desc(struct mmc_request *mrq, 492 struct cqhci_host *cq_host, int tag) 493 { 494 struct mmc_data *data = mrq->data; 495 int i, sg_count, len; 496 bool end = false; 497 bool dma64 = cq_host->dma64; 498 dma_addr_t addr; 499 u8 *desc; 500 struct scatterlist *sg; 501 502 sg_count = cqhci_dma_map(mrq->host, mrq); 503 if (sg_count < 0) { 504 pr_err("%s: %s: unable to map sg lists, %d\n", 505 mmc_hostname(mrq->host), __func__, sg_count); 506 return sg_count; 507 } 508 509 desc = get_trans_desc(cq_host, tag); 510 511 for_each_sg(data->sg, sg, sg_count, i) { 512 addr = sg_dma_address(sg); 513 len = sg_dma_len(sg); 514 515 if ((i+1) == sg_count) 516 end = true; 517 cqhci_set_tran_desc(desc, addr, len, end, dma64); 518 desc += cq_host->trans_desc_len; 519 } 520 521 return 0; 522 } 523 524 static void cqhci_prep_dcmd_desc(struct mmc_host *mmc, 525 struct mmc_request *mrq) 526 { 527 u64 *task_desc = NULL; 528 u64 data = 0; 529 u8 resp_type; 530 u8 *desc; 531 __le64 *dataddr; 532 struct cqhci_host *cq_host = mmc->cqe_private; 533 u8 timing; 534 535 if (!(mrq->cmd->flags & MMC_RSP_PRESENT)) { 536 resp_type = 0x0; 537 timing = 0x1; 538 } else { 539 if (mrq->cmd->flags & MMC_RSP_R1B) { 540 resp_type = 0x3; 541 timing = 0x0; 542 } else { 543 resp_type = 0x2; 544 timing = 0x1; 545 } 546 } 547 548 task_desc = (__le64 __force *)get_desc(cq_host, cq_host->dcmd_slot); 549 memset(task_desc, 0, cq_host->task_desc_len); 550 data |= (CQHCI_VALID(1) | 551 CQHCI_END(1) | 552 CQHCI_INT(1) | 553 CQHCI_QBAR(1) | 554 CQHCI_ACT(0x5) | 555 CQHCI_CMD_INDEX(mrq->cmd->opcode) | 556 CQHCI_CMD_TIMING(timing) | CQHCI_RESP_TYPE(resp_type)); 557 if (cq_host->ops->update_dcmd_desc) 558 cq_host->ops->update_dcmd_desc(mmc, mrq, &data); 559 *task_desc |= data; 560 desc = (u8 *)task_desc; 561 pr_debug("%s: cqhci: dcmd: cmd: %d timing: %d resp: %d\n", 562 mmc_hostname(mmc), mrq->cmd->opcode, timing, resp_type); 563 dataddr = (__le64 __force *)(desc + 4); 564 dataddr[0] = cpu_to_le64((u64)mrq->cmd->arg); 565 566 } 567 568 static void cqhci_post_req(struct mmc_host *host, struct mmc_request *mrq) 569 { 570 struct mmc_data *data = mrq->data; 571 572 if (data) { 573 dma_unmap_sg(mmc_dev(host), data->sg, data->sg_len, 574 (data->flags & MMC_DATA_READ) ? 575 DMA_FROM_DEVICE : DMA_TO_DEVICE); 576 } 577 } 578 579 static inline int cqhci_tag(struct mmc_request *mrq) 580 { 581 return mrq->cmd ? DCMD_SLOT : mrq->tag; 582 } 583 584 static int cqhci_request(struct mmc_host *mmc, struct mmc_request *mrq) 585 { 586 int err = 0; 587 int tag = cqhci_tag(mrq); 588 struct cqhci_host *cq_host = mmc->cqe_private; 589 unsigned long flags; 590 591 if (!cq_host->enabled) { 592 pr_err("%s: cqhci: not enabled\n", mmc_hostname(mmc)); 593 return -EINVAL; 594 } 595 596 /* First request after resume has to re-enable */ 597 if (!cq_host->activated) 598 __cqhci_enable(cq_host); 599 600 if (!mmc->cqe_on) { 601 if (cq_host->ops->pre_enable) 602 cq_host->ops->pre_enable(mmc); 603 604 cqhci_writel(cq_host, 0, CQHCI_CTL); 605 mmc->cqe_on = true; 606 pr_debug("%s: cqhci: CQE on\n", mmc_hostname(mmc)); 607 if (cqhci_readl(cq_host, CQHCI_CTL) && CQHCI_HALT) { 608 pr_err("%s: cqhci: CQE failed to exit halt state\n", 609 mmc_hostname(mmc)); 610 } 611 if (cq_host->ops->enable) 612 cq_host->ops->enable(mmc); 613 } 614 615 if (mrq->data) { 616 cqhci_prep_task_desc(mrq, cq_host, tag); 617 618 err = cqhci_prep_tran_desc(mrq, cq_host, tag); 619 if (err) { 620 pr_err("%s: cqhci: failed to setup tx desc: %d\n", 621 mmc_hostname(mmc), err); 622 return err; 623 } 624 } else { 625 cqhci_prep_dcmd_desc(mmc, mrq); 626 } 627 628 spin_lock_irqsave(&cq_host->lock, flags); 629 630 if (cq_host->recovery_halt) { 631 err = -EBUSY; 632 goto out_unlock; 633 } 634 635 cq_host->slot[tag].mrq = mrq; 636 cq_host->slot[tag].flags = 0; 637 638 cq_host->qcnt += 1; 639 /* Make sure descriptors are ready before ringing the doorbell */ 640 wmb(); 641 cqhci_writel(cq_host, 1 << tag, CQHCI_TDBR); 642 if (!(cqhci_readl(cq_host, CQHCI_TDBR) & (1 << tag))) 643 pr_debug("%s: cqhci: doorbell not set for tag %d\n", 644 mmc_hostname(mmc), tag); 645 out_unlock: 646 spin_unlock_irqrestore(&cq_host->lock, flags); 647 648 if (err) 649 cqhci_post_req(mmc, mrq); 650 651 return err; 652 } 653 654 static void cqhci_recovery_needed(struct mmc_host *mmc, struct mmc_request *mrq, 655 bool notify) 656 { 657 struct cqhci_host *cq_host = mmc->cqe_private; 658 659 if (!cq_host->recovery_halt) { 660 cq_host->recovery_halt = true; 661 pr_debug("%s: cqhci: recovery needed\n", mmc_hostname(mmc)); 662 wake_up(&cq_host->wait_queue); 663 if (notify && mrq->recovery_notifier) 664 mrq->recovery_notifier(mrq); 665 } 666 } 667 668 static unsigned int cqhci_error_flags(int error1, int error2) 669 { 670 int error = error1 ? error1 : error2; 671 672 switch (error) { 673 case -EILSEQ: 674 return CQHCI_HOST_CRC; 675 case -ETIMEDOUT: 676 return CQHCI_HOST_TIMEOUT; 677 default: 678 return CQHCI_HOST_OTHER; 679 } 680 } 681 682 static void cqhci_error_irq(struct mmc_host *mmc, u32 status, int cmd_error, 683 int data_error) 684 { 685 struct cqhci_host *cq_host = mmc->cqe_private; 686 struct cqhci_slot *slot; 687 u32 terri; 688 u32 tdpe; 689 int tag; 690 691 spin_lock(&cq_host->lock); 692 693 terri = cqhci_readl(cq_host, CQHCI_TERRI); 694 695 pr_debug("%s: cqhci: error IRQ status: 0x%08x cmd error %d data error %d TERRI: 0x%08x\n", 696 mmc_hostname(mmc), status, cmd_error, data_error, terri); 697 698 /* Forget about errors when recovery has already been triggered */ 699 if (cq_host->recovery_halt) 700 goto out_unlock; 701 702 if (!cq_host->qcnt) { 703 WARN_ONCE(1, "%s: cqhci: error when idle. IRQ status: 0x%08x cmd error %d data error %d TERRI: 0x%08x\n", 704 mmc_hostname(mmc), status, cmd_error, data_error, 705 terri); 706 goto out_unlock; 707 } 708 709 if (CQHCI_TERRI_C_VALID(terri)) { 710 tag = CQHCI_TERRI_C_TASK(terri); 711 slot = &cq_host->slot[tag]; 712 if (slot->mrq) { 713 slot->flags = cqhci_error_flags(cmd_error, data_error); 714 cqhci_recovery_needed(mmc, slot->mrq, true); 715 } 716 } 717 718 if (CQHCI_TERRI_D_VALID(terri)) { 719 tag = CQHCI_TERRI_D_TASK(terri); 720 slot = &cq_host->slot[tag]; 721 if (slot->mrq) { 722 slot->flags = cqhci_error_flags(data_error, cmd_error); 723 cqhci_recovery_needed(mmc, slot->mrq, true); 724 } 725 } 726 727 /* 728 * Handle ICCE ("Invalid Crypto Configuration Error"). This should 729 * never happen, since the block layer ensures that all crypto-enabled 730 * I/O requests have a valid keyslot before they reach the driver. 731 * 732 * Note that GCE ("General Crypto Error") is different; it already got 733 * handled above by checking TERRI. 734 */ 735 if (status & CQHCI_IS_ICCE) { 736 tdpe = cqhci_readl(cq_host, CQHCI_TDPE); 737 WARN_ONCE(1, 738 "%s: cqhci: invalid crypto configuration error. IRQ status: 0x%08x TDPE: 0x%08x\n", 739 mmc_hostname(mmc), status, tdpe); 740 while (tdpe != 0) { 741 tag = __ffs(tdpe); 742 tdpe &= ~(1 << tag); 743 slot = &cq_host->slot[tag]; 744 if (!slot->mrq) 745 continue; 746 slot->flags = cqhci_error_flags(data_error, cmd_error); 747 cqhci_recovery_needed(mmc, slot->mrq, true); 748 } 749 } 750 751 if (!cq_host->recovery_halt) { 752 /* 753 * The only way to guarantee forward progress is to mark at 754 * least one task in error, so if none is indicated, pick one. 755 */ 756 for (tag = 0; tag < NUM_SLOTS; tag++) { 757 slot = &cq_host->slot[tag]; 758 if (!slot->mrq) 759 continue; 760 slot->flags = cqhci_error_flags(data_error, cmd_error); 761 cqhci_recovery_needed(mmc, slot->mrq, true); 762 break; 763 } 764 } 765 766 out_unlock: 767 spin_unlock(&cq_host->lock); 768 } 769 770 static void cqhci_finish_mrq(struct mmc_host *mmc, unsigned int tag) 771 { 772 struct cqhci_host *cq_host = mmc->cqe_private; 773 struct cqhci_slot *slot = &cq_host->slot[tag]; 774 struct mmc_request *mrq = slot->mrq; 775 struct mmc_data *data; 776 777 if (!mrq) { 778 WARN_ONCE(1, "%s: cqhci: spurious TCN for tag %d\n", 779 mmc_hostname(mmc), tag); 780 return; 781 } 782 783 /* No completions allowed during recovery */ 784 if (cq_host->recovery_halt) { 785 slot->flags |= CQHCI_COMPLETED; 786 return; 787 } 788 789 slot->mrq = NULL; 790 791 cq_host->qcnt -= 1; 792 793 data = mrq->data; 794 if (data) { 795 if (data->error) 796 data->bytes_xfered = 0; 797 else 798 data->bytes_xfered = data->blksz * data->blocks; 799 } 800 801 mmc_cqe_request_done(mmc, mrq); 802 } 803 804 irqreturn_t cqhci_irq(struct mmc_host *mmc, u32 intmask, int cmd_error, 805 int data_error) 806 { 807 u32 status; 808 unsigned long tag = 0, comp_status; 809 struct cqhci_host *cq_host = mmc->cqe_private; 810 811 status = cqhci_readl(cq_host, CQHCI_IS); 812 cqhci_writel(cq_host, status, CQHCI_IS); 813 814 pr_debug("%s: cqhci: IRQ status: 0x%08x\n", mmc_hostname(mmc), status); 815 816 if ((status & (CQHCI_IS_RED | CQHCI_IS_GCE | CQHCI_IS_ICCE)) || 817 cmd_error || data_error) 818 cqhci_error_irq(mmc, status, cmd_error, data_error); 819 820 if (status & CQHCI_IS_TCC) { 821 /* read TCN and complete the request */ 822 comp_status = cqhci_readl(cq_host, CQHCI_TCN); 823 cqhci_writel(cq_host, comp_status, CQHCI_TCN); 824 pr_debug("%s: cqhci: TCN: 0x%08lx\n", 825 mmc_hostname(mmc), comp_status); 826 827 spin_lock(&cq_host->lock); 828 829 for_each_set_bit(tag, &comp_status, cq_host->num_slots) { 830 /* complete the corresponding mrq */ 831 pr_debug("%s: cqhci: completing tag %lu\n", 832 mmc_hostname(mmc), tag); 833 cqhci_finish_mrq(mmc, tag); 834 } 835 836 if (cq_host->waiting_for_idle && !cq_host->qcnt) { 837 cq_host->waiting_for_idle = false; 838 wake_up(&cq_host->wait_queue); 839 } 840 841 spin_unlock(&cq_host->lock); 842 } 843 844 if (status & CQHCI_IS_TCL) 845 wake_up(&cq_host->wait_queue); 846 847 if (status & CQHCI_IS_HAC) 848 wake_up(&cq_host->wait_queue); 849 850 return IRQ_HANDLED; 851 } 852 EXPORT_SYMBOL(cqhci_irq); 853 854 static bool cqhci_is_idle(struct cqhci_host *cq_host, int *ret) 855 { 856 unsigned long flags; 857 bool is_idle; 858 859 spin_lock_irqsave(&cq_host->lock, flags); 860 is_idle = !cq_host->qcnt || cq_host->recovery_halt; 861 *ret = cq_host->recovery_halt ? -EBUSY : 0; 862 cq_host->waiting_for_idle = !is_idle; 863 spin_unlock_irqrestore(&cq_host->lock, flags); 864 865 return is_idle; 866 } 867 868 static int cqhci_wait_for_idle(struct mmc_host *mmc) 869 { 870 struct cqhci_host *cq_host = mmc->cqe_private; 871 int ret; 872 873 wait_event(cq_host->wait_queue, cqhci_is_idle(cq_host, &ret)); 874 875 return ret; 876 } 877 878 static bool cqhci_timeout(struct mmc_host *mmc, struct mmc_request *mrq, 879 bool *recovery_needed) 880 { 881 struct cqhci_host *cq_host = mmc->cqe_private; 882 int tag = cqhci_tag(mrq); 883 struct cqhci_slot *slot = &cq_host->slot[tag]; 884 unsigned long flags; 885 bool timed_out; 886 887 spin_lock_irqsave(&cq_host->lock, flags); 888 timed_out = slot->mrq == mrq; 889 if (timed_out) { 890 slot->flags |= CQHCI_EXTERNAL_TIMEOUT; 891 cqhci_recovery_needed(mmc, mrq, false); 892 *recovery_needed = cq_host->recovery_halt; 893 } 894 spin_unlock_irqrestore(&cq_host->lock, flags); 895 896 if (timed_out) { 897 pr_err("%s: cqhci: timeout for tag %d\n", 898 mmc_hostname(mmc), tag); 899 cqhci_dumpregs(cq_host); 900 } 901 902 return timed_out; 903 } 904 905 static bool cqhci_tasks_cleared(struct cqhci_host *cq_host) 906 { 907 return !(cqhci_readl(cq_host, CQHCI_CTL) & CQHCI_CLEAR_ALL_TASKS); 908 } 909 910 static bool cqhci_clear_all_tasks(struct mmc_host *mmc, unsigned int timeout) 911 { 912 struct cqhci_host *cq_host = mmc->cqe_private; 913 bool ret; 914 u32 ctl; 915 916 cqhci_set_irqs(cq_host, CQHCI_IS_TCL); 917 918 ctl = cqhci_readl(cq_host, CQHCI_CTL); 919 ctl |= CQHCI_CLEAR_ALL_TASKS; 920 cqhci_writel(cq_host, ctl, CQHCI_CTL); 921 922 wait_event_timeout(cq_host->wait_queue, cqhci_tasks_cleared(cq_host), 923 msecs_to_jiffies(timeout) + 1); 924 925 cqhci_set_irqs(cq_host, 0); 926 927 ret = cqhci_tasks_cleared(cq_host); 928 929 if (!ret) 930 pr_debug("%s: cqhci: Failed to clear tasks\n", 931 mmc_hostname(mmc)); 932 933 return ret; 934 } 935 936 static bool cqhci_halted(struct cqhci_host *cq_host) 937 { 938 return cqhci_readl(cq_host, CQHCI_CTL) & CQHCI_HALT; 939 } 940 941 static bool cqhci_halt(struct mmc_host *mmc, unsigned int timeout) 942 { 943 struct cqhci_host *cq_host = mmc->cqe_private; 944 bool ret; 945 u32 ctl; 946 947 if (cqhci_halted(cq_host)) 948 return true; 949 950 cqhci_set_irqs(cq_host, CQHCI_IS_HAC); 951 952 ctl = cqhci_readl(cq_host, CQHCI_CTL); 953 ctl |= CQHCI_HALT; 954 cqhci_writel(cq_host, ctl, CQHCI_CTL); 955 956 wait_event_timeout(cq_host->wait_queue, cqhci_halted(cq_host), 957 msecs_to_jiffies(timeout) + 1); 958 959 cqhci_set_irqs(cq_host, 0); 960 961 ret = cqhci_halted(cq_host); 962 963 if (!ret) 964 pr_debug("%s: cqhci: Failed to halt\n", mmc_hostname(mmc)); 965 966 return ret; 967 } 968 969 /* 970 * After halting we expect to be able to use the command line. We interpret the 971 * failure to halt to mean the data lines might still be in use (and the upper 972 * layers will need to send a STOP command), so we set the timeout based on a 973 * generous command timeout. 974 */ 975 #define CQHCI_START_HALT_TIMEOUT 5 976 977 static void cqhci_recovery_start(struct mmc_host *mmc) 978 { 979 struct cqhci_host *cq_host = mmc->cqe_private; 980 981 pr_debug("%s: cqhci: %s\n", mmc_hostname(mmc), __func__); 982 983 WARN_ON(!cq_host->recovery_halt); 984 985 cqhci_halt(mmc, CQHCI_START_HALT_TIMEOUT); 986 987 if (cq_host->ops->disable) 988 cq_host->ops->disable(mmc, true); 989 990 mmc->cqe_on = false; 991 } 992 993 static int cqhci_error_from_flags(unsigned int flags) 994 { 995 if (!flags) 996 return 0; 997 998 /* CRC errors might indicate re-tuning so prefer to report that */ 999 if (flags & CQHCI_HOST_CRC) 1000 return -EILSEQ; 1001 1002 if (flags & (CQHCI_EXTERNAL_TIMEOUT | CQHCI_HOST_TIMEOUT)) 1003 return -ETIMEDOUT; 1004 1005 return -EIO; 1006 } 1007 1008 static void cqhci_recover_mrq(struct cqhci_host *cq_host, unsigned int tag) 1009 { 1010 struct cqhci_slot *slot = &cq_host->slot[tag]; 1011 struct mmc_request *mrq = slot->mrq; 1012 struct mmc_data *data; 1013 1014 if (!mrq) 1015 return; 1016 1017 slot->mrq = NULL; 1018 1019 cq_host->qcnt -= 1; 1020 1021 data = mrq->data; 1022 if (data) { 1023 data->bytes_xfered = 0; 1024 data->error = cqhci_error_from_flags(slot->flags); 1025 } else { 1026 mrq->cmd->error = cqhci_error_from_flags(slot->flags); 1027 } 1028 1029 mmc_cqe_request_done(cq_host->mmc, mrq); 1030 } 1031 1032 static void cqhci_recover_mrqs(struct cqhci_host *cq_host) 1033 { 1034 int i; 1035 1036 for (i = 0; i < cq_host->num_slots; i++) 1037 cqhci_recover_mrq(cq_host, i); 1038 } 1039 1040 /* 1041 * By now the command and data lines should be unused so there is no reason for 1042 * CQHCI to take a long time to halt, but if it doesn't halt there could be 1043 * problems clearing tasks, so be generous. 1044 */ 1045 #define CQHCI_FINISH_HALT_TIMEOUT 20 1046 1047 /* CQHCI could be expected to clear it's internal state pretty quickly */ 1048 #define CQHCI_CLEAR_TIMEOUT 20 1049 1050 static void cqhci_recovery_finish(struct mmc_host *mmc) 1051 { 1052 struct cqhci_host *cq_host = mmc->cqe_private; 1053 unsigned long flags; 1054 u32 cqcfg; 1055 bool ok; 1056 1057 pr_debug("%s: cqhci: %s\n", mmc_hostname(mmc), __func__); 1058 1059 WARN_ON(!cq_host->recovery_halt); 1060 1061 ok = cqhci_halt(mmc, CQHCI_FINISH_HALT_TIMEOUT); 1062 1063 if (!cqhci_clear_all_tasks(mmc, CQHCI_CLEAR_TIMEOUT)) 1064 ok = false; 1065 1066 /* 1067 * The specification contradicts itself, by saying that tasks cannot be 1068 * cleared if CQHCI does not halt, but if CQHCI does not halt, it should 1069 * be disabled/re-enabled, but not to disable before clearing tasks. 1070 * Have a go anyway. 1071 */ 1072 if (!ok) { 1073 pr_debug("%s: cqhci: disable / re-enable\n", mmc_hostname(mmc)); 1074 cqcfg = cqhci_readl(cq_host, CQHCI_CFG); 1075 cqcfg &= ~CQHCI_ENABLE; 1076 cqhci_writel(cq_host, cqcfg, CQHCI_CFG); 1077 cqcfg |= CQHCI_ENABLE; 1078 cqhci_writel(cq_host, cqcfg, CQHCI_CFG); 1079 /* Be sure that there are no tasks */ 1080 ok = cqhci_halt(mmc, CQHCI_FINISH_HALT_TIMEOUT); 1081 if (!cqhci_clear_all_tasks(mmc, CQHCI_CLEAR_TIMEOUT)) 1082 ok = false; 1083 WARN_ON(!ok); 1084 } 1085 1086 cqhci_recover_mrqs(cq_host); 1087 1088 WARN_ON(cq_host->qcnt); 1089 1090 spin_lock_irqsave(&cq_host->lock, flags); 1091 cq_host->qcnt = 0; 1092 cq_host->recovery_halt = false; 1093 mmc->cqe_on = false; 1094 spin_unlock_irqrestore(&cq_host->lock, flags); 1095 1096 /* Ensure all writes are done before interrupts are re-enabled */ 1097 wmb(); 1098 1099 cqhci_writel(cq_host, CQHCI_IS_HAC | CQHCI_IS_TCL, CQHCI_IS); 1100 1101 cqhci_set_irqs(cq_host, CQHCI_IS_MASK); 1102 1103 pr_debug("%s: cqhci: recovery done\n", mmc_hostname(mmc)); 1104 } 1105 1106 static const struct mmc_cqe_ops cqhci_cqe_ops = { 1107 .cqe_enable = cqhci_enable, 1108 .cqe_disable = cqhci_disable, 1109 .cqe_request = cqhci_request, 1110 .cqe_post_req = cqhci_post_req, 1111 .cqe_off = cqhci_off, 1112 .cqe_wait_for_idle = cqhci_wait_for_idle, 1113 .cqe_timeout = cqhci_timeout, 1114 .cqe_recovery_start = cqhci_recovery_start, 1115 .cqe_recovery_finish = cqhci_recovery_finish, 1116 }; 1117 1118 struct cqhci_host *cqhci_pltfm_init(struct platform_device *pdev) 1119 { 1120 struct cqhci_host *cq_host; 1121 struct resource *cqhci_memres = NULL; 1122 1123 /* check and setup CMDQ interface */ 1124 cqhci_memres = platform_get_resource_byname(pdev, IORESOURCE_MEM, 1125 "cqhci"); 1126 if (!cqhci_memres) { 1127 dev_dbg(&pdev->dev, "CMDQ not supported\n"); 1128 return ERR_PTR(-EINVAL); 1129 } 1130 1131 cq_host = devm_kzalloc(&pdev->dev, sizeof(*cq_host), GFP_KERNEL); 1132 if (!cq_host) 1133 return ERR_PTR(-ENOMEM); 1134 cq_host->mmio = devm_ioremap(&pdev->dev, 1135 cqhci_memres->start, 1136 resource_size(cqhci_memres)); 1137 if (!cq_host->mmio) { 1138 dev_err(&pdev->dev, "failed to remap cqhci regs\n"); 1139 return ERR_PTR(-EBUSY); 1140 } 1141 dev_dbg(&pdev->dev, "CMDQ ioremap: done\n"); 1142 1143 return cq_host; 1144 } 1145 EXPORT_SYMBOL(cqhci_pltfm_init); 1146 1147 static unsigned int cqhci_ver_major(struct cqhci_host *cq_host) 1148 { 1149 return CQHCI_VER_MAJOR(cqhci_readl(cq_host, CQHCI_VER)); 1150 } 1151 1152 static unsigned int cqhci_ver_minor(struct cqhci_host *cq_host) 1153 { 1154 u32 ver = cqhci_readl(cq_host, CQHCI_VER); 1155 1156 return CQHCI_VER_MINOR1(ver) * 10 + CQHCI_VER_MINOR2(ver); 1157 } 1158 1159 int cqhci_init(struct cqhci_host *cq_host, struct mmc_host *mmc, 1160 bool dma64) 1161 { 1162 int err; 1163 1164 cq_host->dma64 = dma64; 1165 cq_host->mmc = mmc; 1166 cq_host->mmc->cqe_private = cq_host; 1167 1168 cq_host->num_slots = NUM_SLOTS; 1169 cq_host->dcmd_slot = DCMD_SLOT; 1170 1171 mmc->cqe_ops = &cqhci_cqe_ops; 1172 1173 mmc->cqe_qdepth = NUM_SLOTS; 1174 if (mmc->caps2 & MMC_CAP2_CQE_DCMD) 1175 mmc->cqe_qdepth -= 1; 1176 1177 cq_host->slot = devm_kcalloc(mmc_dev(mmc), cq_host->num_slots, 1178 sizeof(*cq_host->slot), GFP_KERNEL); 1179 if (!cq_host->slot) { 1180 err = -ENOMEM; 1181 goto out_err; 1182 } 1183 1184 err = cqhci_crypto_init(cq_host); 1185 if (err) { 1186 pr_err("%s: CQHCI crypto initialization failed\n", 1187 mmc_hostname(mmc)); 1188 goto out_err; 1189 } 1190 1191 spin_lock_init(&cq_host->lock); 1192 1193 init_completion(&cq_host->halt_comp); 1194 init_waitqueue_head(&cq_host->wait_queue); 1195 1196 pr_info("%s: CQHCI version %u.%02u\n", 1197 mmc_hostname(mmc), cqhci_ver_major(cq_host), 1198 cqhci_ver_minor(cq_host)); 1199 1200 return 0; 1201 1202 out_err: 1203 pr_err("%s: CQHCI version %u.%02u failed to initialize, error %d\n", 1204 mmc_hostname(mmc), cqhci_ver_major(cq_host), 1205 cqhci_ver_minor(cq_host), err); 1206 return err; 1207 } 1208 EXPORT_SYMBOL(cqhci_init); 1209 1210 MODULE_AUTHOR("Venkat Gopalakrishnan <venkatg@codeaurora.org>"); 1211 MODULE_DESCRIPTION("Command Queue Host Controller Interface driver"); 1212 MODULE_LICENSE("GPL v2"); 1213