151c5d844SKevin Hilman /* 251c5d844SKevin Hilman * Amlogic SD/eMMC driver for the GX/S905 family SoCs 351c5d844SKevin Hilman * 451c5d844SKevin Hilman * Copyright (c) 2016 BayLibre, SAS. 551c5d844SKevin Hilman * Author: Kevin Hilman <khilman@baylibre.com> 651c5d844SKevin Hilman * 751c5d844SKevin Hilman * This program is free software; you can redistribute it and/or modify 851c5d844SKevin Hilman * it under the terms of version 2 of the GNU General Public License as 951c5d844SKevin Hilman * published by the Free Software Foundation. 1051c5d844SKevin Hilman * 1151c5d844SKevin Hilman * This program is distributed in the hope that it will be useful, but 1251c5d844SKevin Hilman * WITHOUT ANY WARRANTY; without even the implied warranty of 1351c5d844SKevin Hilman * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 1451c5d844SKevin Hilman * General Public License for more details. 1551c5d844SKevin Hilman * 1651c5d844SKevin Hilman * You should have received a copy of the GNU General Public License 1751c5d844SKevin Hilman * along with this program; if not, see <http://www.gnu.org/licenses/>. 1851c5d844SKevin Hilman * The full GNU General Public License is included in this distribution 1951c5d844SKevin Hilman * in the file called COPYING. 2051c5d844SKevin Hilman */ 2151c5d844SKevin Hilman #include <linux/kernel.h> 2251c5d844SKevin Hilman #include <linux/module.h> 2351c5d844SKevin Hilman #include <linux/init.h> 2418f92bc0SJerome Brunet #include <linux/delay.h> 2551c5d844SKevin Hilman #include <linux/device.h> 2698849da6SJerome Brunet #include <linux/iopoll.h> 2751c5d844SKevin Hilman #include <linux/of_device.h> 2851c5d844SKevin Hilman #include <linux/platform_device.h> 2951c5d844SKevin Hilman #include <linux/ioport.h> 3051c5d844SKevin Hilman #include <linux/dma-mapping.h> 3151c5d844SKevin Hilman #include <linux/mmc/host.h> 3251c5d844SKevin Hilman #include <linux/mmc/mmc.h> 3351c5d844SKevin Hilman #include <linux/mmc/sdio.h> 3451c5d844SKevin Hilman #include <linux/mmc/slot-gpio.h> 3551c5d844SKevin Hilman #include <linux/io.h> 3651c5d844SKevin Hilman #include <linux/clk.h> 3751c5d844SKevin Hilman #include <linux/clk-provider.h> 3851c5d844SKevin Hilman #include <linux/regulator/consumer.h> 3919c6beaaSJerome Brunet #include <linux/reset.h> 40b8789ec4SUlf Hansson #include <linux/interrupt.h> 411231e7ebSHeiner Kallweit #include <linux/bitfield.h> 428fb572acSThierry Reding #include <linux/pinctrl/consumer.h> 4351c5d844SKevin Hilman 4451c5d844SKevin Hilman #define DRIVER_NAME "meson-gx-mmc" 4551c5d844SKevin Hilman 4651c5d844SKevin Hilman #define SD_EMMC_CLOCK 0x0 471231e7ebSHeiner Kallweit #define CLK_DIV_MASK GENMASK(5, 0) 481231e7ebSHeiner Kallweit #define CLK_SRC_MASK GENMASK(7, 6) 491231e7ebSHeiner Kallweit #define CLK_CORE_PHASE_MASK GENMASK(9, 8) 50c08bcb6cSHeiner Kallweit #define CLK_TX_PHASE_MASK GENMASK(11, 10) 51c08bcb6cSHeiner Kallweit #define CLK_RX_PHASE_MASK GENMASK(13, 12) 52df069815SNan Li #define CLK_V2_TX_DELAY_MASK GENMASK(19, 16) 53df069815SNan Li #define CLK_V2_RX_DELAY_MASK GENMASK(23, 20) 54df069815SNan Li #define CLK_V2_ALWAYS_ON BIT(24) 55df069815SNan Li 56df069815SNan Li #define CLK_V3_TX_DELAY_MASK GENMASK(21, 16) 57df069815SNan Li #define CLK_V3_RX_DELAY_MASK GENMASK(27, 22) 58df069815SNan Li #define CLK_V3_ALWAYS_ON BIT(28) 59df069815SNan Li 60033d7168SJerome Brunet #define CLK_DELAY_STEP_PS 200 61d341ca88SJerome Brunet #define CLK_PHASE_STEP 30 62d341ca88SJerome Brunet #define CLK_PHASE_POINT_NUM (360 / CLK_PHASE_STEP) 63df069815SNan Li 64df069815SNan Li #define CLK_TX_DELAY_MASK(h) (h->data->tx_delay_mask) 65df069815SNan Li #define CLK_RX_DELAY_MASK(h) (h->data->rx_delay_mask) 66df069815SNan Li #define CLK_ALWAYS_ON(h) (h->data->always_on) 6751c5d844SKevin Hilman 6852899b99SJerome Brunet #define SD_EMMC_DELAY 0x4 6951c5d844SKevin Hilman #define SD_EMMC_ADJUST 0x8 7071645e65SJerome Brunet #define ADJUST_ADJ_DELAY_MASK GENMASK(21, 16) 7171645e65SJerome Brunet #define ADJUST_DS_EN BIT(15) 7271645e65SJerome Brunet #define ADJUST_ADJ_EN BIT(13) 73df069815SNan Li 74df069815SNan Li #define SD_EMMC_DELAY1 0x4 75df069815SNan Li #define SD_EMMC_DELAY2 0x8 76df069815SNan Li #define SD_EMMC_V3_ADJUST 0xc 77df069815SNan Li 7851c5d844SKevin Hilman #define SD_EMMC_CALOUT 0x10 7951c5d844SKevin Hilman #define SD_EMMC_START 0x40 8051c5d844SKevin Hilman #define START_DESC_INIT BIT(0) 8151c5d844SKevin Hilman #define START_DESC_BUSY BIT(1) 821231e7ebSHeiner Kallweit #define START_DESC_ADDR_MASK GENMASK(31, 2) 8351c5d844SKevin Hilman 8451c5d844SKevin Hilman #define SD_EMMC_CFG 0x44 851231e7ebSHeiner Kallweit #define CFG_BUS_WIDTH_MASK GENMASK(1, 0) 8651c5d844SKevin Hilman #define CFG_BUS_WIDTH_1 0x0 8751c5d844SKevin Hilman #define CFG_BUS_WIDTH_4 0x1 8851c5d844SKevin Hilman #define CFG_BUS_WIDTH_8 0x2 8951c5d844SKevin Hilman #define CFG_DDR BIT(2) 901231e7ebSHeiner Kallweit #define CFG_BLK_LEN_MASK GENMASK(7, 4) 911231e7ebSHeiner Kallweit #define CFG_RESP_TIMEOUT_MASK GENMASK(11, 8) 921231e7ebSHeiner Kallweit #define CFG_RC_CC_MASK GENMASK(15, 12) 9351c5d844SKevin Hilman #define CFG_STOP_CLOCK BIT(22) 9451c5d844SKevin Hilman #define CFG_CLK_ALWAYS_ON BIT(18) 95e21e6fddSHeiner Kallweit #define CFG_CHK_DS BIT(20) 9651c5d844SKevin Hilman #define CFG_AUTO_CLK BIT(23) 9718f92bc0SJerome Brunet #define CFG_ERR_ABORT BIT(27) 9851c5d844SKevin Hilman 9951c5d844SKevin Hilman #define SD_EMMC_STATUS 0x48 10051c5d844SKevin Hilman #define STATUS_BUSY BIT(31) 10118f92bc0SJerome Brunet #define STATUS_DESC_BUSY BIT(30) 102186cd8b7SJerome Brunet #define STATUS_DATI GENMASK(23, 16) 10351c5d844SKevin Hilman 10451c5d844SKevin Hilman #define SD_EMMC_IRQ_EN 0x4c 1051231e7ebSHeiner Kallweit #define IRQ_RXD_ERR_MASK GENMASK(7, 0) 10651c5d844SKevin Hilman #define IRQ_TXD_ERR BIT(8) 10751c5d844SKevin Hilman #define IRQ_DESC_ERR BIT(9) 10851c5d844SKevin Hilman #define IRQ_RESP_ERR BIT(10) 10974858655SJerome Brunet #define IRQ_CRC_ERR \ 11074858655SJerome Brunet (IRQ_RXD_ERR_MASK | IRQ_TXD_ERR | IRQ_DESC_ERR | IRQ_RESP_ERR) 11151c5d844SKevin Hilman #define IRQ_RESP_TIMEOUT BIT(11) 11251c5d844SKevin Hilman #define IRQ_DESC_TIMEOUT BIT(12) 11374858655SJerome Brunet #define IRQ_TIMEOUTS \ 11474858655SJerome Brunet (IRQ_RESP_TIMEOUT | IRQ_DESC_TIMEOUT) 11551c5d844SKevin Hilman #define IRQ_END_OF_CHAIN BIT(13) 11651c5d844SKevin Hilman #define IRQ_RESP_STATUS BIT(14) 11751c5d844SKevin Hilman #define IRQ_SDIO BIT(15) 11874858655SJerome Brunet #define IRQ_EN_MASK \ 11974858655SJerome Brunet (IRQ_CRC_ERR | IRQ_TIMEOUTS | IRQ_END_OF_CHAIN | IRQ_RESP_STATUS |\ 12074858655SJerome Brunet IRQ_SDIO) 12151c5d844SKevin Hilman 12251c5d844SKevin Hilman #define SD_EMMC_CMD_CFG 0x50 12351c5d844SKevin Hilman #define SD_EMMC_CMD_ARG 0x54 12451c5d844SKevin Hilman #define SD_EMMC_CMD_DAT 0x58 12551c5d844SKevin Hilman #define SD_EMMC_CMD_RSP 0x5c 12651c5d844SKevin Hilman #define SD_EMMC_CMD_RSP1 0x60 12751c5d844SKevin Hilman #define SD_EMMC_CMD_RSP2 0x64 12851c5d844SKevin Hilman #define SD_EMMC_CMD_RSP3 0x68 12951c5d844SKevin Hilman 13051c5d844SKevin Hilman #define SD_EMMC_RXD 0x94 13151c5d844SKevin Hilman #define SD_EMMC_TXD 0x94 13251c5d844SKevin Hilman #define SD_EMMC_LAST_REG SD_EMMC_TXD 13351c5d844SKevin Hilman 13451c5d844SKevin Hilman #define SD_EMMC_CFG_BLK_SIZE 512 /* internal buffer max: 512 bytes */ 13551c5d844SKevin Hilman #define SD_EMMC_CFG_RESP_TIMEOUT 256 /* in clock cycles */ 136bb11eff1SHeiner Kallweit #define SD_EMMC_CMD_TIMEOUT 1024 /* in ms */ 137bb11eff1SHeiner Kallweit #define SD_EMMC_CMD_TIMEOUT_DATA 4096 /* in ms */ 13851c5d844SKevin Hilman #define SD_EMMC_CFG_CMD_GAP 16 /* in clock cycles */ 13979ed05e3SHeiner Kallweit #define SD_EMMC_DESC_BUF_LEN PAGE_SIZE 14079ed05e3SHeiner Kallweit 14179ed05e3SHeiner Kallweit #define SD_EMMC_PRE_REQ_DONE BIT(0) 14279ed05e3SHeiner Kallweit #define SD_EMMC_DESC_CHAIN_MODE BIT(1) 14379ed05e3SHeiner Kallweit 14451c5d844SKevin Hilman #define MUX_CLK_NUM_PARENTS 2 14551c5d844SKevin Hilman 146df069815SNan Li struct meson_mmc_data { 147df069815SNan Li unsigned int tx_delay_mask; 148df069815SNan Li unsigned int rx_delay_mask; 149df069815SNan Li unsigned int always_on; 15071645e65SJerome Brunet unsigned int adjust; 151df069815SNan Li }; 152df069815SNan Li 15379ed05e3SHeiner Kallweit struct sd_emmc_desc { 15479ed05e3SHeiner Kallweit u32 cmd_cfg; 15579ed05e3SHeiner Kallweit u32 cmd_arg; 15679ed05e3SHeiner Kallweit u32 cmd_data; 15779ed05e3SHeiner Kallweit u32 cmd_resp; 15879ed05e3SHeiner Kallweit }; 15979ed05e3SHeiner Kallweit 16051c5d844SKevin Hilman struct meson_host { 16151c5d844SKevin Hilman struct device *dev; 162df069815SNan Li struct meson_mmc_data *data; 16351c5d844SKevin Hilman struct mmc_host *mmc; 16451c5d844SKevin Hilman struct mmc_command *cmd; 16551c5d844SKevin Hilman 16651c5d844SKevin Hilman void __iomem *regs; 16751c5d844SKevin Hilman struct clk *core_clk; 168bd911ec4SJerome Brunet struct clk *mmc_clk; 169033d7168SJerome Brunet struct clk *rx_clk; 170033d7168SJerome Brunet struct clk *tx_clk; 171f89f55dfSJerome Brunet unsigned long req_rate; 17251c5d844SKevin Hilman 1731e03331dSJerome Brunet struct pinctrl *pinctrl; 1741e03331dSJerome Brunet struct pinctrl_state *pins_default; 1751e03331dSJerome Brunet struct pinctrl_state *pins_clk_gate; 1761e03331dSJerome Brunet 17751c5d844SKevin Hilman unsigned int bounce_buf_size; 17851c5d844SKevin Hilman void *bounce_buf; 17951c5d844SKevin Hilman dma_addr_t bounce_dma_addr; 18079ed05e3SHeiner Kallweit struct sd_emmc_desc *descs; 18179ed05e3SHeiner Kallweit dma_addr_t descs_dma_addr; 18251c5d844SKevin Hilman 183bb364890SRemi Pommarel int irq; 184bb364890SRemi Pommarel 18551c5d844SKevin Hilman bool vqmmc_enabled; 18651c5d844SKevin Hilman }; 18751c5d844SKevin Hilman 1881231e7ebSHeiner Kallweit #define CMD_CFG_LENGTH_MASK GENMASK(8, 0) 18951c5d844SKevin Hilman #define CMD_CFG_BLOCK_MODE BIT(9) 19051c5d844SKevin Hilman #define CMD_CFG_R1B BIT(10) 19151c5d844SKevin Hilman #define CMD_CFG_END_OF_CHAIN BIT(11) 1921231e7ebSHeiner Kallweit #define CMD_CFG_TIMEOUT_MASK GENMASK(15, 12) 19351c5d844SKevin Hilman #define CMD_CFG_NO_RESP BIT(16) 19451c5d844SKevin Hilman #define CMD_CFG_NO_CMD BIT(17) 19551c5d844SKevin Hilman #define CMD_CFG_DATA_IO BIT(18) 19651c5d844SKevin Hilman #define CMD_CFG_DATA_WR BIT(19) 19751c5d844SKevin Hilman #define CMD_CFG_RESP_NOCRC BIT(20) 19851c5d844SKevin Hilman #define CMD_CFG_RESP_128 BIT(21) 19951c5d844SKevin Hilman #define CMD_CFG_RESP_NUM BIT(22) 20051c5d844SKevin Hilman #define CMD_CFG_DATA_NUM BIT(23) 2011231e7ebSHeiner Kallweit #define CMD_CFG_CMD_INDEX_MASK GENMASK(29, 24) 20251c5d844SKevin Hilman #define CMD_CFG_ERROR BIT(30) 20351c5d844SKevin Hilman #define CMD_CFG_OWNER BIT(31) 20451c5d844SKevin Hilman 2051231e7ebSHeiner Kallweit #define CMD_DATA_MASK GENMASK(31, 2) 20651c5d844SKevin Hilman #define CMD_DATA_BIG_ENDIAN BIT(1) 20751c5d844SKevin Hilman #define CMD_DATA_SRAM BIT(0) 2081231e7ebSHeiner Kallweit #define CMD_RESP_MASK GENMASK(31, 1) 20951c5d844SKevin Hilman #define CMD_RESP_SRAM BIT(0) 21051c5d844SKevin Hilman 211033d7168SJerome Brunet struct meson_mmc_phase { 212033d7168SJerome Brunet struct clk_hw hw; 213033d7168SJerome Brunet void __iomem *reg; 214033d7168SJerome Brunet unsigned long phase_mask; 215033d7168SJerome Brunet unsigned long delay_mask; 216033d7168SJerome Brunet unsigned int delay_step_ps; 217033d7168SJerome Brunet }; 218033d7168SJerome Brunet 219033d7168SJerome Brunet #define to_meson_mmc_phase(_hw) container_of(_hw, struct meson_mmc_phase, hw) 220033d7168SJerome Brunet 221033d7168SJerome Brunet static int meson_mmc_clk_get_phase(struct clk_hw *hw) 222033d7168SJerome Brunet { 223033d7168SJerome Brunet struct meson_mmc_phase *mmc = to_meson_mmc_phase(hw); 224033d7168SJerome Brunet unsigned int phase_num = 1 << hweight_long(mmc->phase_mask); 225033d7168SJerome Brunet unsigned long period_ps, p, d; 226033d7168SJerome Brunet int degrees; 227033d7168SJerome Brunet u32 val; 228033d7168SJerome Brunet 229033d7168SJerome Brunet val = readl(mmc->reg); 230795c633fSJerome Brunet p = (val & mmc->phase_mask) >> __ffs(mmc->phase_mask); 231033d7168SJerome Brunet degrees = p * 360 / phase_num; 232033d7168SJerome Brunet 233033d7168SJerome Brunet if (mmc->delay_mask) { 234033d7168SJerome Brunet period_ps = DIV_ROUND_UP((unsigned long)NSEC_PER_SEC * 1000, 235033d7168SJerome Brunet clk_get_rate(hw->clk)); 236795c633fSJerome Brunet d = (val & mmc->delay_mask) >> __ffs(mmc->delay_mask); 237033d7168SJerome Brunet degrees += d * mmc->delay_step_ps * 360 / period_ps; 238033d7168SJerome Brunet degrees %= 360; 239033d7168SJerome Brunet } 240033d7168SJerome Brunet 241033d7168SJerome Brunet return degrees; 242033d7168SJerome Brunet } 243033d7168SJerome Brunet 244033d7168SJerome Brunet static void meson_mmc_apply_phase_delay(struct meson_mmc_phase *mmc, 245033d7168SJerome Brunet unsigned int phase, 246033d7168SJerome Brunet unsigned int delay) 247033d7168SJerome Brunet { 248033d7168SJerome Brunet u32 val; 249033d7168SJerome Brunet 250033d7168SJerome Brunet val = readl(mmc->reg); 251033d7168SJerome Brunet val &= ~mmc->phase_mask; 252795c633fSJerome Brunet val |= phase << __ffs(mmc->phase_mask); 253033d7168SJerome Brunet 254033d7168SJerome Brunet if (mmc->delay_mask) { 255033d7168SJerome Brunet val &= ~mmc->delay_mask; 256795c633fSJerome Brunet val |= delay << __ffs(mmc->delay_mask); 257033d7168SJerome Brunet } 258033d7168SJerome Brunet 259033d7168SJerome Brunet writel(val, mmc->reg); 260033d7168SJerome Brunet } 261033d7168SJerome Brunet 262033d7168SJerome Brunet static int meson_mmc_clk_set_phase(struct clk_hw *hw, int degrees) 263033d7168SJerome Brunet { 264033d7168SJerome Brunet struct meson_mmc_phase *mmc = to_meson_mmc_phase(hw); 265033d7168SJerome Brunet unsigned int phase_num = 1 << hweight_long(mmc->phase_mask); 266033d7168SJerome Brunet unsigned long period_ps, d = 0, r; 267033d7168SJerome Brunet uint64_t p; 268033d7168SJerome Brunet 269033d7168SJerome Brunet p = degrees % 360; 270033d7168SJerome Brunet 271033d7168SJerome Brunet if (!mmc->delay_mask) { 272033d7168SJerome Brunet p = DIV_ROUND_CLOSEST_ULL(p, 360 / phase_num); 273033d7168SJerome Brunet } else { 274033d7168SJerome Brunet period_ps = DIV_ROUND_UP((unsigned long)NSEC_PER_SEC * 1000, 275033d7168SJerome Brunet clk_get_rate(hw->clk)); 276033d7168SJerome Brunet 277033d7168SJerome Brunet /* First compute the phase index (p), the remainder (r) is the 278033d7168SJerome Brunet * part we'll try to acheive using the delays (d). 279033d7168SJerome Brunet */ 280033d7168SJerome Brunet r = do_div(p, 360 / phase_num); 281033d7168SJerome Brunet d = DIV_ROUND_CLOSEST(r * period_ps, 282033d7168SJerome Brunet 360 * mmc->delay_step_ps); 283795c633fSJerome Brunet d = min(d, mmc->delay_mask >> __ffs(mmc->delay_mask)); 284033d7168SJerome Brunet } 285033d7168SJerome Brunet 286033d7168SJerome Brunet meson_mmc_apply_phase_delay(mmc, p, d); 287033d7168SJerome Brunet return 0; 288033d7168SJerome Brunet } 289033d7168SJerome Brunet 290033d7168SJerome Brunet static const struct clk_ops meson_mmc_clk_phase_ops = { 291033d7168SJerome Brunet .get_phase = meson_mmc_clk_get_phase, 292033d7168SJerome Brunet .set_phase = meson_mmc_clk_set_phase, 293033d7168SJerome Brunet }; 294033d7168SJerome Brunet 2954eee86c3SHeiner Kallweit static unsigned int meson_mmc_get_timeout_msecs(struct mmc_data *data) 2964eee86c3SHeiner Kallweit { 2974eee86c3SHeiner Kallweit unsigned int timeout = data->timeout_ns / NSEC_PER_MSEC; 2984eee86c3SHeiner Kallweit 2994eee86c3SHeiner Kallweit if (!timeout) 3004eee86c3SHeiner Kallweit return SD_EMMC_CMD_TIMEOUT_DATA; 3014eee86c3SHeiner Kallweit 3024eee86c3SHeiner Kallweit timeout = roundup_pow_of_two(timeout); 3034eee86c3SHeiner Kallweit 3044eee86c3SHeiner Kallweit return min(timeout, 32768U); /* max. 2^15 ms */ 3054eee86c3SHeiner Kallweit } 3064eee86c3SHeiner Kallweit 307e5e4a3ebSHeiner Kallweit static struct mmc_command *meson_mmc_get_next_command(struct mmc_command *cmd) 308e5e4a3ebSHeiner Kallweit { 309e5e4a3ebSHeiner Kallweit if (cmd->opcode == MMC_SET_BLOCK_COUNT && !cmd->error) 310e5e4a3ebSHeiner Kallweit return cmd->mrq->cmd; 311e5e4a3ebSHeiner Kallweit else if (mmc_op_multi(cmd->opcode) && 312e5e4a3ebSHeiner Kallweit (!cmd->mrq->sbc || cmd->error || cmd->data->error)) 313e5e4a3ebSHeiner Kallweit return cmd->mrq->stop; 314e5e4a3ebSHeiner Kallweit else 315e5e4a3ebSHeiner Kallweit return NULL; 316e5e4a3ebSHeiner Kallweit } 317e5e4a3ebSHeiner Kallweit 31879ed05e3SHeiner Kallweit static void meson_mmc_get_transfer_mode(struct mmc_host *mmc, 31979ed05e3SHeiner Kallweit struct mmc_request *mrq) 32079ed05e3SHeiner Kallweit { 32179ed05e3SHeiner Kallweit struct mmc_data *data = mrq->data; 32279ed05e3SHeiner Kallweit struct scatterlist *sg; 32379ed05e3SHeiner Kallweit int i; 32479ed05e3SHeiner Kallweit bool use_desc_chain_mode = true; 32579ed05e3SHeiner Kallweit 32624835611SHeiner Kallweit /* 32724835611SHeiner Kallweit * Broken SDIO with AP6255-based WiFi on Khadas VIM Pro has been 32824835611SHeiner Kallweit * reported. For some strange reason this occurs in descriptor 32924835611SHeiner Kallweit * chain mode only. So let's fall back to bounce buffer mode 33024835611SHeiner Kallweit * for command SD_IO_RW_EXTENDED. 33124835611SHeiner Kallweit */ 33224835611SHeiner Kallweit if (mrq->cmd->opcode == SD_IO_RW_EXTENDED) 33324835611SHeiner Kallweit return; 33424835611SHeiner Kallweit 33579ed05e3SHeiner Kallweit for_each_sg(data->sg, sg, data->sg_len, i) 33679ed05e3SHeiner Kallweit /* check for 8 byte alignment */ 33779ed05e3SHeiner Kallweit if (sg->offset & 7) { 33879ed05e3SHeiner Kallweit WARN_ONCE(1, "unaligned scatterlist buffer\n"); 33979ed05e3SHeiner Kallweit use_desc_chain_mode = false; 34079ed05e3SHeiner Kallweit break; 34179ed05e3SHeiner Kallweit } 34279ed05e3SHeiner Kallweit 34379ed05e3SHeiner Kallweit if (use_desc_chain_mode) 34479ed05e3SHeiner Kallweit data->host_cookie |= SD_EMMC_DESC_CHAIN_MODE; 34579ed05e3SHeiner Kallweit } 34679ed05e3SHeiner Kallweit 34779ed05e3SHeiner Kallweit static inline bool meson_mmc_desc_chain_mode(const struct mmc_data *data) 34879ed05e3SHeiner Kallweit { 34979ed05e3SHeiner Kallweit return data->host_cookie & SD_EMMC_DESC_CHAIN_MODE; 35079ed05e3SHeiner Kallweit } 35179ed05e3SHeiner Kallweit 35279ed05e3SHeiner Kallweit static inline bool meson_mmc_bounce_buf_read(const struct mmc_data *data) 35379ed05e3SHeiner Kallweit { 35479ed05e3SHeiner Kallweit return data && data->flags & MMC_DATA_READ && 35579ed05e3SHeiner Kallweit !meson_mmc_desc_chain_mode(data); 35679ed05e3SHeiner Kallweit } 35779ed05e3SHeiner Kallweit 35879ed05e3SHeiner Kallweit static void meson_mmc_pre_req(struct mmc_host *mmc, struct mmc_request *mrq) 35979ed05e3SHeiner Kallweit { 36079ed05e3SHeiner Kallweit struct mmc_data *data = mrq->data; 36179ed05e3SHeiner Kallweit 36279ed05e3SHeiner Kallweit if (!data) 36379ed05e3SHeiner Kallweit return; 36479ed05e3SHeiner Kallweit 36579ed05e3SHeiner Kallweit meson_mmc_get_transfer_mode(mmc, mrq); 36679ed05e3SHeiner Kallweit data->host_cookie |= SD_EMMC_PRE_REQ_DONE; 36779ed05e3SHeiner Kallweit 36879ed05e3SHeiner Kallweit if (!meson_mmc_desc_chain_mode(data)) 36979ed05e3SHeiner Kallweit return; 37079ed05e3SHeiner Kallweit 37179ed05e3SHeiner Kallweit data->sg_count = dma_map_sg(mmc_dev(mmc), data->sg, data->sg_len, 37279ed05e3SHeiner Kallweit mmc_get_dma_dir(data)); 37379ed05e3SHeiner Kallweit if (!data->sg_count) 37479ed05e3SHeiner Kallweit dev_err(mmc_dev(mmc), "dma_map_sg failed"); 37579ed05e3SHeiner Kallweit } 37679ed05e3SHeiner Kallweit 37779ed05e3SHeiner Kallweit static void meson_mmc_post_req(struct mmc_host *mmc, struct mmc_request *mrq, 37879ed05e3SHeiner Kallweit int err) 37979ed05e3SHeiner Kallweit { 38079ed05e3SHeiner Kallweit struct mmc_data *data = mrq->data; 38179ed05e3SHeiner Kallweit 38279ed05e3SHeiner Kallweit if (data && meson_mmc_desc_chain_mode(data) && data->sg_count) 38379ed05e3SHeiner Kallweit dma_unmap_sg(mmc_dev(mmc), data->sg, data->sg_len, 38479ed05e3SHeiner Kallweit mmc_get_dma_dir(data)); 38579ed05e3SHeiner Kallweit } 38679ed05e3SHeiner Kallweit 387844c8a75SJerome Brunet static bool meson_mmc_timing_is_ddr(struct mmc_ios *ios) 388844c8a75SJerome Brunet { 389844c8a75SJerome Brunet if (ios->timing == MMC_TIMING_MMC_DDR52 || 390844c8a75SJerome Brunet ios->timing == MMC_TIMING_UHS_DDR50 || 391844c8a75SJerome Brunet ios->timing == MMC_TIMING_MMC_HS400) 392844c8a75SJerome Brunet return true; 393844c8a75SJerome Brunet 394844c8a75SJerome Brunet return false; 395844c8a75SJerome Brunet } 396844c8a75SJerome Brunet 3971e03331dSJerome Brunet /* 3981e03331dSJerome Brunet * Gating the clock on this controller is tricky. It seems the mmc clock 3991e03331dSJerome Brunet * is also used by the controller. It may crash during some operation if the 4001e03331dSJerome Brunet * clock is stopped. The safest thing to do, whenever possible, is to keep 4011e03331dSJerome Brunet * clock running at stop it at the pad using the pinmux. 4021e03331dSJerome Brunet */ 4031e03331dSJerome Brunet static void meson_mmc_clk_gate(struct meson_host *host) 4041e03331dSJerome Brunet { 4051e03331dSJerome Brunet u32 cfg; 4061e03331dSJerome Brunet 4071e03331dSJerome Brunet if (host->pins_clk_gate) { 4081e03331dSJerome Brunet pinctrl_select_state(host->pinctrl, host->pins_clk_gate); 4091e03331dSJerome Brunet } else { 4101e03331dSJerome Brunet /* 4111e03331dSJerome Brunet * If the pinmux is not provided - default to the classic and 4121e03331dSJerome Brunet * unsafe method 4131e03331dSJerome Brunet */ 4141e03331dSJerome Brunet cfg = readl(host->regs + SD_EMMC_CFG); 4151e03331dSJerome Brunet cfg |= CFG_STOP_CLOCK; 4161e03331dSJerome Brunet writel(cfg, host->regs + SD_EMMC_CFG); 4171e03331dSJerome Brunet } 4181e03331dSJerome Brunet } 4191e03331dSJerome Brunet 4201e03331dSJerome Brunet static void meson_mmc_clk_ungate(struct meson_host *host) 4211e03331dSJerome Brunet { 4221e03331dSJerome Brunet u32 cfg; 4231e03331dSJerome Brunet 4241e03331dSJerome Brunet if (host->pins_clk_gate) 4251e03331dSJerome Brunet pinctrl_select_state(host->pinctrl, host->pins_default); 4261e03331dSJerome Brunet 4271e03331dSJerome Brunet /* Make sure the clock is not stopped in the controller */ 4281e03331dSJerome Brunet cfg = readl(host->regs + SD_EMMC_CFG); 4291e03331dSJerome Brunet cfg &= ~CFG_STOP_CLOCK; 4301e03331dSJerome Brunet writel(cfg, host->regs + SD_EMMC_CFG); 4311e03331dSJerome Brunet } 4321e03331dSJerome Brunet 433844c8a75SJerome Brunet static int meson_mmc_clk_set(struct meson_host *host, struct mmc_ios *ios) 43451c5d844SKevin Hilman { 43551c5d844SKevin Hilman struct mmc_host *mmc = host->mmc; 436844c8a75SJerome Brunet unsigned long rate = ios->clock; 4375da86887SHeiner Kallweit int ret; 43851c5d844SKevin Hilman u32 cfg; 43951c5d844SKevin Hilman 440844c8a75SJerome Brunet /* DDR modes require higher module clock */ 441844c8a75SJerome Brunet if (meson_mmc_timing_is_ddr(ios)) 442844c8a75SJerome Brunet rate <<= 1; 443844c8a75SJerome Brunet 444f89f55dfSJerome Brunet /* Same request - bail-out */ 445844c8a75SJerome Brunet if (host->req_rate == rate) 44651c5d844SKevin Hilman return 0; 44751c5d844SKevin Hilman 44851c5d844SKevin Hilman /* stop clock */ 4491e03331dSJerome Brunet meson_mmc_clk_gate(host); 450f89f55dfSJerome Brunet host->req_rate = 0; 45151c5d844SKevin Hilman 452844c8a75SJerome Brunet if (!rate) { 45351c5d844SKevin Hilman mmc->actual_clock = 0; 4545da86887SHeiner Kallweit /* return with clock being stopped */ 45551c5d844SKevin Hilman return 0; 45651c5d844SKevin Hilman } 45751c5d844SKevin Hilman 4581e03331dSJerome Brunet /* Stop the clock during rate change to avoid glitches */ 4591e03331dSJerome Brunet cfg = readl(host->regs + SD_EMMC_CFG); 4601e03331dSJerome Brunet cfg |= CFG_STOP_CLOCK; 4611e03331dSJerome Brunet writel(cfg, host->regs + SD_EMMC_CFG); 4621e03331dSJerome Brunet 463844c8a75SJerome Brunet ret = clk_set_rate(host->mmc_clk, rate); 4645da86887SHeiner Kallweit if (ret) { 4655da86887SHeiner Kallweit dev_err(host->dev, "Unable to set cfg_div_clk to %lu. ret=%d\n", 466844c8a75SJerome Brunet rate, ret); 4675da86887SHeiner Kallweit return ret; 4685da86887SHeiner Kallweit } 46951c5d844SKevin Hilman 470844c8a75SJerome Brunet host->req_rate = rate; 471bd911ec4SJerome Brunet mmc->actual_clock = clk_get_rate(host->mmc_clk); 4725da86887SHeiner Kallweit 473844c8a75SJerome Brunet /* We should report the real output frequency of the controller */ 474844c8a75SJerome Brunet if (meson_mmc_timing_is_ddr(ios)) 475844c8a75SJerome Brunet mmc->actual_clock >>= 1; 476844c8a75SJerome Brunet 477f89f55dfSJerome Brunet dev_dbg(host->dev, "clk rate: %u Hz\n", mmc->actual_clock); 478844c8a75SJerome Brunet if (ios->clock != mmc->actual_clock) 479844c8a75SJerome Brunet dev_dbg(host->dev, "requested rate was %u\n", ios->clock); 4805da86887SHeiner Kallweit 4815da86887SHeiner Kallweit /* (re)start clock */ 4821e03331dSJerome Brunet meson_mmc_clk_ungate(host); 48351c5d844SKevin Hilman 4845da86887SHeiner Kallweit return 0; 48551c5d844SKevin Hilman } 48651c5d844SKevin Hilman 48751c5d844SKevin Hilman /* 48851c5d844SKevin Hilman * The SD/eMMC IP block has an internal mux and divider used for 48951c5d844SKevin Hilman * generating the MMC clock. Use the clock framework to create and 49051c5d844SKevin Hilman * manage these clocks. 49151c5d844SKevin Hilman */ 49251c5d844SKevin Hilman static int meson_mmc_clk_init(struct meson_host *host) 49351c5d844SKevin Hilman { 49451c5d844SKevin Hilman struct clk_init_data init; 495bd911ec4SJerome Brunet struct clk_mux *mux; 496bd911ec4SJerome Brunet struct clk_divider *div; 497033d7168SJerome Brunet struct meson_mmc_phase *core, *tx, *rx; 498bd911ec4SJerome Brunet struct clk *clk; 49951c5d844SKevin Hilman char clk_name[32]; 50051c5d844SKevin Hilman int i, ret = 0; 50151c5d844SKevin Hilman const char *mux_parent_names[MUX_CLK_NUM_PARENTS]; 502bd911ec4SJerome Brunet const char *clk_parent[1]; 5033c39e2caSJerome Brunet u32 clk_reg; 50451c5d844SKevin Hilman 505ef5c4815SJerome Brunet /* init SD_EMMC_CLOCK to sane defaults w/min clock rate */ 506ef5c4815SJerome Brunet clk_reg = 0; 507df069815SNan Li clk_reg |= CLK_ALWAYS_ON(host); 508ef5c4815SJerome Brunet clk_reg |= CLK_DIV_MASK; 509ef5c4815SJerome Brunet writel(clk_reg, host->regs + SD_EMMC_CLOCK); 510ef5c4815SJerome Brunet 51151c5d844SKevin Hilman /* get the mux parents */ 51251c5d844SKevin Hilman for (i = 0; i < MUX_CLK_NUM_PARENTS; i++) { 513e9883ef2SHeiner Kallweit struct clk *clk; 51451c5d844SKevin Hilman char name[16]; 51551c5d844SKevin Hilman 51651c5d844SKevin Hilman snprintf(name, sizeof(name), "clkin%d", i); 517e9883ef2SHeiner Kallweit clk = devm_clk_get(host->dev, name); 518e9883ef2SHeiner Kallweit if (IS_ERR(clk)) { 519e9883ef2SHeiner Kallweit if (clk != ERR_PTR(-EPROBE_DEFER)) 52051c5d844SKevin Hilman dev_err(host->dev, "Missing clock %s\n", name); 521e9883ef2SHeiner Kallweit return PTR_ERR(clk); 52251c5d844SKevin Hilman } 52351c5d844SKevin Hilman 524e9883ef2SHeiner Kallweit mux_parent_names[i] = __clk_get_name(clk); 52551c5d844SKevin Hilman } 52651c5d844SKevin Hilman 52751c5d844SKevin Hilman /* create the mux */ 528bd911ec4SJerome Brunet mux = devm_kzalloc(host->dev, sizeof(*mux), GFP_KERNEL); 529bd911ec4SJerome Brunet if (!mux) 530bd911ec4SJerome Brunet return -ENOMEM; 531bd911ec4SJerome Brunet 53251c5d844SKevin Hilman snprintf(clk_name, sizeof(clk_name), "%s#mux", dev_name(host->dev)); 53351c5d844SKevin Hilman init.name = clk_name; 53451c5d844SKevin Hilman init.ops = &clk_mux_ops; 53551c5d844SKevin Hilman init.flags = 0; 53651c5d844SKevin Hilman init.parent_names = mux_parent_names; 5377558c113SHeiner Kallweit init.num_parents = MUX_CLK_NUM_PARENTS; 53851c5d844SKevin Hilman 539bd911ec4SJerome Brunet mux->reg = host->regs + SD_EMMC_CLOCK; 540795c633fSJerome Brunet mux->shift = __ffs(CLK_SRC_MASK); 541bd911ec4SJerome Brunet mux->mask = CLK_SRC_MASK >> mux->shift; 542bd911ec4SJerome Brunet mux->hw.init = &init; 543bd911ec4SJerome Brunet 544bd911ec4SJerome Brunet clk = devm_clk_register(host->dev, &mux->hw); 545bd911ec4SJerome Brunet if (WARN_ON(IS_ERR(clk))) 546bd911ec4SJerome Brunet return PTR_ERR(clk); 54751c5d844SKevin Hilman 54851c5d844SKevin Hilman /* create the divider */ 549bd911ec4SJerome Brunet div = devm_kzalloc(host->dev, sizeof(*div), GFP_KERNEL); 550bd911ec4SJerome Brunet if (!div) 551bd911ec4SJerome Brunet return -ENOMEM; 552bd911ec4SJerome Brunet 55351c5d844SKevin Hilman snprintf(clk_name, sizeof(clk_name), "%s#div", dev_name(host->dev)); 5547b9ebad3SHeiner Kallweit init.name = clk_name; 55551c5d844SKevin Hilman init.ops = &clk_divider_ops; 55651c5d844SKevin Hilman init.flags = CLK_SET_RATE_PARENT; 557bd911ec4SJerome Brunet clk_parent[0] = __clk_get_name(clk); 558bd911ec4SJerome Brunet init.parent_names = clk_parent; 559bd911ec4SJerome Brunet init.num_parents = 1; 56051c5d844SKevin Hilman 561bd911ec4SJerome Brunet div->reg = host->regs + SD_EMMC_CLOCK; 562795c633fSJerome Brunet div->shift = __ffs(CLK_DIV_MASK); 563bd911ec4SJerome Brunet div->width = __builtin_popcountl(CLK_DIV_MASK); 564bd911ec4SJerome Brunet div->hw.init = &init; 565ca3dcd3fSJerome Brunet div->flags = CLK_DIVIDER_ONE_BASED; 56651c5d844SKevin Hilman 567033d7168SJerome Brunet clk = devm_clk_register(host->dev, &div->hw); 568033d7168SJerome Brunet if (WARN_ON(IS_ERR(clk))) 569033d7168SJerome Brunet return PTR_ERR(clk); 570033d7168SJerome Brunet 571033d7168SJerome Brunet /* create the mmc core clock */ 572033d7168SJerome Brunet core = devm_kzalloc(host->dev, sizeof(*core), GFP_KERNEL); 573033d7168SJerome Brunet if (!core) 574033d7168SJerome Brunet return -ENOMEM; 575033d7168SJerome Brunet 576033d7168SJerome Brunet snprintf(clk_name, sizeof(clk_name), "%s#core", dev_name(host->dev)); 577033d7168SJerome Brunet init.name = clk_name; 578033d7168SJerome Brunet init.ops = &meson_mmc_clk_phase_ops; 579033d7168SJerome Brunet init.flags = CLK_SET_RATE_PARENT; 580033d7168SJerome Brunet clk_parent[0] = __clk_get_name(clk); 581033d7168SJerome Brunet init.parent_names = clk_parent; 582033d7168SJerome Brunet init.num_parents = 1; 583033d7168SJerome Brunet 584033d7168SJerome Brunet core->reg = host->regs + SD_EMMC_CLOCK; 585033d7168SJerome Brunet core->phase_mask = CLK_CORE_PHASE_MASK; 586033d7168SJerome Brunet core->hw.init = &init; 587033d7168SJerome Brunet 588033d7168SJerome Brunet host->mmc_clk = devm_clk_register(host->dev, &core->hw); 589bd911ec4SJerome Brunet if (WARN_ON(PTR_ERR_OR_ZERO(host->mmc_clk))) 590bd911ec4SJerome Brunet return PTR_ERR(host->mmc_clk); 59151c5d844SKevin Hilman 592033d7168SJerome Brunet /* create the mmc tx clock */ 593033d7168SJerome Brunet tx = devm_kzalloc(host->dev, sizeof(*tx), GFP_KERNEL); 594033d7168SJerome Brunet if (!tx) 595033d7168SJerome Brunet return -ENOMEM; 596033d7168SJerome Brunet 597033d7168SJerome Brunet snprintf(clk_name, sizeof(clk_name), "%s#tx", dev_name(host->dev)); 598033d7168SJerome Brunet init.name = clk_name; 599033d7168SJerome Brunet init.ops = &meson_mmc_clk_phase_ops; 600033d7168SJerome Brunet init.flags = 0; 601033d7168SJerome Brunet clk_parent[0] = __clk_get_name(host->mmc_clk); 602033d7168SJerome Brunet init.parent_names = clk_parent; 603033d7168SJerome Brunet init.num_parents = 1; 604033d7168SJerome Brunet 605033d7168SJerome Brunet tx->reg = host->regs + SD_EMMC_CLOCK; 606033d7168SJerome Brunet tx->phase_mask = CLK_TX_PHASE_MASK; 607df069815SNan Li tx->delay_mask = CLK_TX_DELAY_MASK(host); 608033d7168SJerome Brunet tx->delay_step_ps = CLK_DELAY_STEP_PS; 609033d7168SJerome Brunet tx->hw.init = &init; 610033d7168SJerome Brunet 611033d7168SJerome Brunet host->tx_clk = devm_clk_register(host->dev, &tx->hw); 612033d7168SJerome Brunet if (WARN_ON(PTR_ERR_OR_ZERO(host->tx_clk))) 613033d7168SJerome Brunet return PTR_ERR(host->tx_clk); 614033d7168SJerome Brunet 615033d7168SJerome Brunet /* create the mmc rx clock */ 616033d7168SJerome Brunet rx = devm_kzalloc(host->dev, sizeof(*rx), GFP_KERNEL); 617033d7168SJerome Brunet if (!rx) 618033d7168SJerome Brunet return -ENOMEM; 619033d7168SJerome Brunet 620033d7168SJerome Brunet snprintf(clk_name, sizeof(clk_name), "%s#rx", dev_name(host->dev)); 621033d7168SJerome Brunet init.name = clk_name; 622033d7168SJerome Brunet init.ops = &meson_mmc_clk_phase_ops; 623033d7168SJerome Brunet init.flags = 0; 624033d7168SJerome Brunet clk_parent[0] = __clk_get_name(host->mmc_clk); 625033d7168SJerome Brunet init.parent_names = clk_parent; 626033d7168SJerome Brunet init.num_parents = 1; 627033d7168SJerome Brunet 628033d7168SJerome Brunet rx->reg = host->regs + SD_EMMC_CLOCK; 629033d7168SJerome Brunet rx->phase_mask = CLK_RX_PHASE_MASK; 630df069815SNan Li rx->delay_mask = CLK_RX_DELAY_MASK(host); 631033d7168SJerome Brunet rx->delay_step_ps = CLK_DELAY_STEP_PS; 632033d7168SJerome Brunet rx->hw.init = &init; 633033d7168SJerome Brunet 634033d7168SJerome Brunet host->rx_clk = devm_clk_register(host->dev, &rx->hw); 635033d7168SJerome Brunet if (WARN_ON(PTR_ERR_OR_ZERO(host->rx_clk))) 636033d7168SJerome Brunet return PTR_ERR(host->rx_clk); 637033d7168SJerome Brunet 638bd911ec4SJerome Brunet /* init SD_EMMC_CLOCK to sane defaults w/min clock rate */ 639bd911ec4SJerome Brunet host->mmc->f_min = clk_round_rate(host->mmc_clk, 400000); 640bd911ec4SJerome Brunet ret = clk_set_rate(host->mmc_clk, host->mmc->f_min); 641a4c38c8dSUlf Hansson if (ret) 642a4c38c8dSUlf Hansson return ret; 64351c5d844SKevin Hilman 644d341ca88SJerome Brunet clk_set_phase(host->mmc_clk, 180); 645c5e1766dSJerome Brunet clk_set_phase(host->tx_clk, 0); 646d341ca88SJerome Brunet clk_set_phase(host->rx_clk, 0); 647d341ca88SJerome Brunet 648bd911ec4SJerome Brunet return clk_prepare_enable(host->mmc_clk); 64951c5d844SKevin Hilman } 65051c5d844SKevin Hilman 651d341ca88SJerome Brunet static void meson_mmc_shift_map(unsigned long *map, unsigned long shift) 652d341ca88SJerome Brunet { 653d341ca88SJerome Brunet DECLARE_BITMAP(left, CLK_PHASE_POINT_NUM); 654d341ca88SJerome Brunet DECLARE_BITMAP(right, CLK_PHASE_POINT_NUM); 655d341ca88SJerome Brunet 656d341ca88SJerome Brunet /* 657d341ca88SJerome Brunet * shift the bitmap right and reintroduce the dropped bits on the left 658d341ca88SJerome Brunet * of the bitmap 659d341ca88SJerome Brunet */ 660d341ca88SJerome Brunet bitmap_shift_right(right, map, shift, CLK_PHASE_POINT_NUM); 661d341ca88SJerome Brunet bitmap_shift_left(left, map, CLK_PHASE_POINT_NUM - shift, 662d341ca88SJerome Brunet CLK_PHASE_POINT_NUM); 663d341ca88SJerome Brunet bitmap_or(map, left, right, CLK_PHASE_POINT_NUM); 664d341ca88SJerome Brunet } 665d341ca88SJerome Brunet 666d341ca88SJerome Brunet static void meson_mmc_find_next_region(unsigned long *map, 667d341ca88SJerome Brunet unsigned long *start, 668d341ca88SJerome Brunet unsigned long *stop) 669d341ca88SJerome Brunet { 670d341ca88SJerome Brunet *start = find_next_bit(map, CLK_PHASE_POINT_NUM, *start); 671d341ca88SJerome Brunet *stop = find_next_zero_bit(map, CLK_PHASE_POINT_NUM, *start); 672d341ca88SJerome Brunet } 673d341ca88SJerome Brunet 674d341ca88SJerome Brunet static int meson_mmc_find_tuning_point(unsigned long *test) 675d341ca88SJerome Brunet { 676d341ca88SJerome Brunet unsigned long shift, stop, offset = 0, start = 0, size = 0; 677d341ca88SJerome Brunet 678d341ca88SJerome Brunet /* Get the all good/all bad situation out the way */ 679d341ca88SJerome Brunet if (bitmap_full(test, CLK_PHASE_POINT_NUM)) 680d341ca88SJerome Brunet return 0; /* All points are good so point 0 will do */ 681d341ca88SJerome Brunet else if (bitmap_empty(test, CLK_PHASE_POINT_NUM)) 682d341ca88SJerome Brunet return -EIO; /* No successful tuning point */ 683d341ca88SJerome Brunet 684d341ca88SJerome Brunet /* 685d341ca88SJerome Brunet * Now we know there is a least one region find. Make sure it does 686d341ca88SJerome Brunet * not wrap by the shifting the bitmap if necessary 687d341ca88SJerome Brunet */ 688d341ca88SJerome Brunet shift = find_first_zero_bit(test, CLK_PHASE_POINT_NUM); 689d341ca88SJerome Brunet if (shift != 0) 690d341ca88SJerome Brunet meson_mmc_shift_map(test, shift); 691d341ca88SJerome Brunet 692d341ca88SJerome Brunet while (start < CLK_PHASE_POINT_NUM) { 693d341ca88SJerome Brunet meson_mmc_find_next_region(test, &start, &stop); 694d341ca88SJerome Brunet 695d341ca88SJerome Brunet if ((stop - start) > size) { 696d341ca88SJerome Brunet offset = start; 697d341ca88SJerome Brunet size = stop - start; 698d341ca88SJerome Brunet } 699d341ca88SJerome Brunet 700d341ca88SJerome Brunet start = stop; 701d341ca88SJerome Brunet } 702d341ca88SJerome Brunet 703d341ca88SJerome Brunet /* Get the center point of the region */ 704d341ca88SJerome Brunet offset += (size / 2); 705d341ca88SJerome Brunet 706d341ca88SJerome Brunet /* Shift the result back */ 707d341ca88SJerome Brunet offset = (offset + shift) % CLK_PHASE_POINT_NUM; 708d341ca88SJerome Brunet 709d341ca88SJerome Brunet return offset; 710d341ca88SJerome Brunet } 711d341ca88SJerome Brunet 712d341ca88SJerome Brunet static int meson_mmc_clk_phase_tuning(struct mmc_host *mmc, u32 opcode, 713d341ca88SJerome Brunet struct clk *clk) 714d341ca88SJerome Brunet { 715d341ca88SJerome Brunet int point, ret; 716d341ca88SJerome Brunet DECLARE_BITMAP(test, CLK_PHASE_POINT_NUM); 717d341ca88SJerome Brunet 718d341ca88SJerome Brunet dev_dbg(mmc_dev(mmc), "%s phase/delay tunning...\n", 719d341ca88SJerome Brunet __clk_get_name(clk)); 720d341ca88SJerome Brunet bitmap_zero(test, CLK_PHASE_POINT_NUM); 721d341ca88SJerome Brunet 722d341ca88SJerome Brunet /* Explore tuning points */ 723d341ca88SJerome Brunet for (point = 0; point < CLK_PHASE_POINT_NUM; point++) { 724d341ca88SJerome Brunet clk_set_phase(clk, point * CLK_PHASE_STEP); 725d341ca88SJerome Brunet ret = mmc_send_tuning(mmc, opcode, NULL); 726d341ca88SJerome Brunet if (!ret) 727d341ca88SJerome Brunet set_bit(point, test); 728d341ca88SJerome Brunet } 729d341ca88SJerome Brunet 730d341ca88SJerome Brunet /* Find the optimal tuning point and apply it */ 731d341ca88SJerome Brunet point = meson_mmc_find_tuning_point(test); 732d341ca88SJerome Brunet if (point < 0) 733d341ca88SJerome Brunet return point; /* tuning failed */ 734d341ca88SJerome Brunet 735d341ca88SJerome Brunet clk_set_phase(clk, point * CLK_PHASE_STEP); 736d341ca88SJerome Brunet dev_dbg(mmc_dev(mmc), "success with phase: %d\n", 737d341ca88SJerome Brunet clk_get_phase(clk)); 738d341ca88SJerome Brunet return 0; 739d341ca88SJerome Brunet } 740d341ca88SJerome Brunet 741d341ca88SJerome Brunet static int meson_mmc_execute_tuning(struct mmc_host *mmc, u32 opcode) 742d341ca88SJerome Brunet { 743d341ca88SJerome Brunet struct meson_host *host = mmc_priv(mmc); 74471e3e00cSAndreas Fenkart int adj = 0; 74571e3e00cSAndreas Fenkart 74671e3e00cSAndreas Fenkart /* enable signal resampling w/o delay */ 74771e3e00cSAndreas Fenkart adj = ADJUST_ADJ_EN; 74871e3e00cSAndreas Fenkart writel(adj, host->regs + host->data->adjust); 749d341ca88SJerome Brunet 750d341ca88SJerome Brunet return meson_mmc_clk_phase_tuning(mmc, opcode, host->rx_clk); 751d341ca88SJerome Brunet } 752d341ca88SJerome Brunet 75351c5d844SKevin Hilman static void meson_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 75451c5d844SKevin Hilman { 75551c5d844SKevin Hilman struct meson_host *host = mmc_priv(mmc); 756c36cf125SJerome Brunet u32 bus_width, val; 757c36cf125SJerome Brunet int err; 75851c5d844SKevin Hilman 75951c5d844SKevin Hilman /* 76051c5d844SKevin Hilman * GPIO regulator, only controls switching between 1v8 and 76151c5d844SKevin Hilman * 3v3, doesn't support MMC_POWER_OFF, MMC_POWER_ON. 76251c5d844SKevin Hilman */ 76351c5d844SKevin Hilman switch (ios->power_mode) { 76451c5d844SKevin Hilman case MMC_POWER_OFF: 76551c5d844SKevin Hilman if (!IS_ERR(mmc->supply.vmmc)) 76651c5d844SKevin Hilman mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0); 76751c5d844SKevin Hilman 76851c5d844SKevin Hilman if (!IS_ERR(mmc->supply.vqmmc) && host->vqmmc_enabled) { 76951c5d844SKevin Hilman regulator_disable(mmc->supply.vqmmc); 77051c5d844SKevin Hilman host->vqmmc_enabled = false; 77151c5d844SKevin Hilman } 77251c5d844SKevin Hilman 77351c5d844SKevin Hilman break; 77451c5d844SKevin Hilman 77551c5d844SKevin Hilman case MMC_POWER_UP: 77651c5d844SKevin Hilman if (!IS_ERR(mmc->supply.vmmc)) 77751c5d844SKevin Hilman mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd); 7783e2b0af4SJerome Brunet 77971e3e00cSAndreas Fenkart /* disable signal resampling */ 78071e3e00cSAndreas Fenkart writel(0, host->regs + host->data->adjust); 78171e3e00cSAndreas Fenkart 782fe0e5804SJerome Brunet /* Reset rx phase */ 7833e2b0af4SJerome Brunet clk_set_phase(host->rx_clk, 0); 7843e2b0af4SJerome Brunet 78551c5d844SKevin Hilman break; 78651c5d844SKevin Hilman 78751c5d844SKevin Hilman case MMC_POWER_ON: 78851c5d844SKevin Hilman if (!IS_ERR(mmc->supply.vqmmc) && !host->vqmmc_enabled) { 78951c5d844SKevin Hilman int ret = regulator_enable(mmc->supply.vqmmc); 79051c5d844SKevin Hilman 79151c5d844SKevin Hilman if (ret < 0) 792c36cf125SJerome Brunet dev_err(host->dev, 79351c5d844SKevin Hilman "failed to enable vqmmc regulator\n"); 79451c5d844SKevin Hilman else 79551c5d844SKevin Hilman host->vqmmc_enabled = true; 79651c5d844SKevin Hilman } 79751c5d844SKevin Hilman 79851c5d844SKevin Hilman break; 79951c5d844SKevin Hilman } 80051c5d844SKevin Hilman 80151c5d844SKevin Hilman /* Bus width */ 80251c5d844SKevin Hilman switch (ios->bus_width) { 80351c5d844SKevin Hilman case MMC_BUS_WIDTH_1: 80451c5d844SKevin Hilman bus_width = CFG_BUS_WIDTH_1; 80551c5d844SKevin Hilman break; 80651c5d844SKevin Hilman case MMC_BUS_WIDTH_4: 80751c5d844SKevin Hilman bus_width = CFG_BUS_WIDTH_4; 80851c5d844SKevin Hilman break; 80951c5d844SKevin Hilman case MMC_BUS_WIDTH_8: 81051c5d844SKevin Hilman bus_width = CFG_BUS_WIDTH_8; 81151c5d844SKevin Hilman break; 81251c5d844SKevin Hilman default: 81351c5d844SKevin Hilman dev_err(host->dev, "Invalid ios->bus_width: %u. Setting to 4.\n", 81451c5d844SKevin Hilman ios->bus_width); 81551c5d844SKevin Hilman bus_width = CFG_BUS_WIDTH_4; 81651c5d844SKevin Hilman } 81751c5d844SKevin Hilman 81851c5d844SKevin Hilman val = readl(host->regs + SD_EMMC_CFG); 8191231e7ebSHeiner Kallweit val &= ~CFG_BUS_WIDTH_MASK; 8201231e7ebSHeiner Kallweit val |= FIELD_PREP(CFG_BUS_WIDTH_MASK, bus_width); 82151c5d844SKevin Hilman 822e21e6fddSHeiner Kallweit val &= ~CFG_DDR; 823844c8a75SJerome Brunet if (meson_mmc_timing_is_ddr(ios)) 824e21e6fddSHeiner Kallweit val |= CFG_DDR; 825e21e6fddSHeiner Kallweit 826844c8a75SJerome Brunet err = meson_mmc_clk_set(host, ios); 827c36cf125SJerome Brunet if (err) 828c36cf125SJerome Brunet dev_err(host->dev, "Failed to set clock: %d\n,", err); 829c36cf125SJerome Brunet 83051c5d844SKevin Hilman writel(val, host->regs + SD_EMMC_CFG); 831c36cf125SJerome Brunet dev_dbg(host->dev, "SD_EMMC_CFG: 0x%08x\n", val); 832c01d1219SHeiner Kallweit } 83351c5d844SKevin Hilman 8343d6c991bSHeiner Kallweit static void meson_mmc_request_done(struct mmc_host *mmc, 8353d6c991bSHeiner Kallweit struct mmc_request *mrq) 83651c5d844SKevin Hilman { 83751c5d844SKevin Hilman struct meson_host *host = mmc_priv(mmc); 83851c5d844SKevin Hilman 83951c5d844SKevin Hilman host->cmd = NULL; 84051c5d844SKevin Hilman mmc_request_done(host->mmc, mrq); 84151c5d844SKevin Hilman } 84251c5d844SKevin Hilman 8433d03f6a9SHeiner Kallweit static void meson_mmc_set_blksz(struct mmc_host *mmc, unsigned int blksz) 8443d03f6a9SHeiner Kallweit { 8453d03f6a9SHeiner Kallweit struct meson_host *host = mmc_priv(mmc); 8463d03f6a9SHeiner Kallweit u32 cfg, blksz_old; 8473d03f6a9SHeiner Kallweit 8483d03f6a9SHeiner Kallweit cfg = readl(host->regs + SD_EMMC_CFG); 8493d03f6a9SHeiner Kallweit blksz_old = FIELD_GET(CFG_BLK_LEN_MASK, cfg); 8503d03f6a9SHeiner Kallweit 8513d03f6a9SHeiner Kallweit if (!is_power_of_2(blksz)) 8523d03f6a9SHeiner Kallweit dev_err(host->dev, "blksz %u is not a power of 2\n", blksz); 8533d03f6a9SHeiner Kallweit 8543d03f6a9SHeiner Kallweit blksz = ilog2(blksz); 8553d03f6a9SHeiner Kallweit 8563d03f6a9SHeiner Kallweit /* check if block-size matches, if not update */ 8573d03f6a9SHeiner Kallweit if (blksz == blksz_old) 8583d03f6a9SHeiner Kallweit return; 8593d03f6a9SHeiner Kallweit 8603d03f6a9SHeiner Kallweit dev_dbg(host->dev, "%s: update blk_len %d -> %d\n", __func__, 8613d03f6a9SHeiner Kallweit blksz_old, blksz); 8623d03f6a9SHeiner Kallweit 8633d03f6a9SHeiner Kallweit cfg &= ~CFG_BLK_LEN_MASK; 8643d03f6a9SHeiner Kallweit cfg |= FIELD_PREP(CFG_BLK_LEN_MASK, blksz); 8653d03f6a9SHeiner Kallweit writel(cfg, host->regs + SD_EMMC_CFG); 8663d03f6a9SHeiner Kallweit } 8673d03f6a9SHeiner Kallweit 86875c7fd96SHeiner Kallweit static void meson_mmc_set_response_bits(struct mmc_command *cmd, u32 *cmd_cfg) 86975c7fd96SHeiner Kallweit { 87075c7fd96SHeiner Kallweit if (cmd->flags & MMC_RSP_PRESENT) { 87175c7fd96SHeiner Kallweit if (cmd->flags & MMC_RSP_136) 87275c7fd96SHeiner Kallweit *cmd_cfg |= CMD_CFG_RESP_128; 87375c7fd96SHeiner Kallweit *cmd_cfg |= CMD_CFG_RESP_NUM; 87475c7fd96SHeiner Kallweit 87575c7fd96SHeiner Kallweit if (!(cmd->flags & MMC_RSP_CRC)) 87675c7fd96SHeiner Kallweit *cmd_cfg |= CMD_CFG_RESP_NOCRC; 87775c7fd96SHeiner Kallweit 87875c7fd96SHeiner Kallweit if (cmd->flags & MMC_RSP_BUSY) 87975c7fd96SHeiner Kallweit *cmd_cfg |= CMD_CFG_R1B; 88075c7fd96SHeiner Kallweit } else { 88175c7fd96SHeiner Kallweit *cmd_cfg |= CMD_CFG_NO_RESP; 88275c7fd96SHeiner Kallweit } 88375c7fd96SHeiner Kallweit } 88475c7fd96SHeiner Kallweit 88579ed05e3SHeiner Kallweit static void meson_mmc_desc_chain_transfer(struct mmc_host *mmc, u32 cmd_cfg) 88679ed05e3SHeiner Kallweit { 88779ed05e3SHeiner Kallweit struct meson_host *host = mmc_priv(mmc); 88879ed05e3SHeiner Kallweit struct sd_emmc_desc *desc = host->descs; 88979ed05e3SHeiner Kallweit struct mmc_data *data = host->cmd->data; 89079ed05e3SHeiner Kallweit struct scatterlist *sg; 89179ed05e3SHeiner Kallweit u32 start; 89279ed05e3SHeiner Kallweit int i; 89379ed05e3SHeiner Kallweit 89479ed05e3SHeiner Kallweit if (data->flags & MMC_DATA_WRITE) 89579ed05e3SHeiner Kallweit cmd_cfg |= CMD_CFG_DATA_WR; 89679ed05e3SHeiner Kallweit 89779ed05e3SHeiner Kallweit if (data->blocks > 1) { 89879ed05e3SHeiner Kallweit cmd_cfg |= CMD_CFG_BLOCK_MODE; 89979ed05e3SHeiner Kallweit meson_mmc_set_blksz(mmc, data->blksz); 90079ed05e3SHeiner Kallweit } 90179ed05e3SHeiner Kallweit 90279ed05e3SHeiner Kallweit for_each_sg(data->sg, sg, data->sg_count, i) { 90379ed05e3SHeiner Kallweit unsigned int len = sg_dma_len(sg); 90479ed05e3SHeiner Kallweit 90579ed05e3SHeiner Kallweit if (data->blocks > 1) 90679ed05e3SHeiner Kallweit len /= data->blksz; 90779ed05e3SHeiner Kallweit 90879ed05e3SHeiner Kallweit desc[i].cmd_cfg = cmd_cfg; 90979ed05e3SHeiner Kallweit desc[i].cmd_cfg |= FIELD_PREP(CMD_CFG_LENGTH_MASK, len); 91079ed05e3SHeiner Kallweit if (i > 0) 91179ed05e3SHeiner Kallweit desc[i].cmd_cfg |= CMD_CFG_NO_CMD; 91279ed05e3SHeiner Kallweit desc[i].cmd_arg = host->cmd->arg; 91379ed05e3SHeiner Kallweit desc[i].cmd_resp = 0; 91479ed05e3SHeiner Kallweit desc[i].cmd_data = sg_dma_address(sg); 91579ed05e3SHeiner Kallweit } 91679ed05e3SHeiner Kallweit desc[data->sg_count - 1].cmd_cfg |= CMD_CFG_END_OF_CHAIN; 91779ed05e3SHeiner Kallweit 91879ed05e3SHeiner Kallweit dma_wmb(); /* ensure descriptor is written before kicked */ 91979ed05e3SHeiner Kallweit start = host->descs_dma_addr | START_DESC_BUSY; 92079ed05e3SHeiner Kallweit writel(start, host->regs + SD_EMMC_START); 92179ed05e3SHeiner Kallweit } 92279ed05e3SHeiner Kallweit 92351c5d844SKevin Hilman static void meson_mmc_start_cmd(struct mmc_host *mmc, struct mmc_command *cmd) 92451c5d844SKevin Hilman { 92551c5d844SKevin Hilman struct meson_host *host = mmc_priv(mmc); 92600412ddcSHeiner Kallweit struct mmc_data *data = cmd->data; 9273d03f6a9SHeiner Kallweit u32 cmd_cfg = 0, cmd_data = 0; 92851c5d844SKevin Hilman unsigned int xfer_bytes = 0; 92951c5d844SKevin Hilman 93051c5d844SKevin Hilman /* Setup descriptors */ 93151c5d844SKevin Hilman dma_rmb(); 93251c5d844SKevin Hilman 93379ed05e3SHeiner Kallweit host->cmd = cmd; 93479ed05e3SHeiner Kallweit 9351231e7ebSHeiner Kallweit cmd_cfg |= FIELD_PREP(CMD_CFG_CMD_INDEX_MASK, cmd->opcode); 936a322febeSHeiner Kallweit cmd_cfg |= CMD_CFG_OWNER; /* owned by CPU */ 93718f92bc0SJerome Brunet cmd_cfg |= CMD_CFG_ERROR; /* stop in case of error */ 93851c5d844SKevin Hilman 93975c7fd96SHeiner Kallweit meson_mmc_set_response_bits(cmd, &cmd_cfg); 94051c5d844SKevin Hilman 94151c5d844SKevin Hilman /* data? */ 94200412ddcSHeiner Kallweit if (data) { 94379ed05e3SHeiner Kallweit data->bytes_xfered = 0; 944a322febeSHeiner Kallweit cmd_cfg |= CMD_CFG_DATA_IO; 9451231e7ebSHeiner Kallweit cmd_cfg |= FIELD_PREP(CMD_CFG_TIMEOUT_MASK, 9464eee86c3SHeiner Kallweit ilog2(meson_mmc_get_timeout_msecs(data))); 947a744c6feSHeiner Kallweit 94879ed05e3SHeiner Kallweit if (meson_mmc_desc_chain_mode(data)) { 94979ed05e3SHeiner Kallweit meson_mmc_desc_chain_transfer(mmc, cmd_cfg); 95079ed05e3SHeiner Kallweit return; 95179ed05e3SHeiner Kallweit } 95279ed05e3SHeiner Kallweit 95300412ddcSHeiner Kallweit if (data->blocks > 1) { 954a322febeSHeiner Kallweit cmd_cfg |= CMD_CFG_BLOCK_MODE; 9551231e7ebSHeiner Kallweit cmd_cfg |= FIELD_PREP(CMD_CFG_LENGTH_MASK, 9561231e7ebSHeiner Kallweit data->blocks); 9573d03f6a9SHeiner Kallweit meson_mmc_set_blksz(mmc, data->blksz); 95851c5d844SKevin Hilman } else { 9591231e7ebSHeiner Kallweit cmd_cfg |= FIELD_PREP(CMD_CFG_LENGTH_MASK, data->blksz); 96051c5d844SKevin Hilman } 96151c5d844SKevin Hilman 96200412ddcSHeiner Kallweit xfer_bytes = data->blksz * data->blocks; 96300412ddcSHeiner Kallweit if (data->flags & MMC_DATA_WRITE) { 964a322febeSHeiner Kallweit cmd_cfg |= CMD_CFG_DATA_WR; 96551c5d844SKevin Hilman WARN_ON(xfer_bytes > host->bounce_buf_size); 96600412ddcSHeiner Kallweit sg_copy_to_buffer(data->sg, data->sg_len, 96751c5d844SKevin Hilman host->bounce_buf, xfer_bytes); 96851c5d844SKevin Hilman dma_wmb(); 96951c5d844SKevin Hilman } 97051c5d844SKevin Hilman 971a322febeSHeiner Kallweit cmd_data = host->bounce_dma_addr & CMD_DATA_MASK; 97251c5d844SKevin Hilman } else { 9731231e7ebSHeiner Kallweit cmd_cfg |= FIELD_PREP(CMD_CFG_TIMEOUT_MASK, 9741231e7ebSHeiner Kallweit ilog2(SD_EMMC_CMD_TIMEOUT)); 97551c5d844SKevin Hilman } 97651c5d844SKevin Hilman 97751c5d844SKevin Hilman /* Last descriptor */ 978a322febeSHeiner Kallweit cmd_cfg |= CMD_CFG_END_OF_CHAIN; 979a322febeSHeiner Kallweit writel(cmd_cfg, host->regs + SD_EMMC_CMD_CFG); 980a322febeSHeiner Kallweit writel(cmd_data, host->regs + SD_EMMC_CMD_DAT); 981a322febeSHeiner Kallweit writel(0, host->regs + SD_EMMC_CMD_RSP); 98251c5d844SKevin Hilman wmb(); /* ensure descriptor is written before kicked */ 983a322febeSHeiner Kallweit writel(cmd->arg, host->regs + SD_EMMC_CMD_ARG); 98451c5d844SKevin Hilman } 98551c5d844SKevin Hilman 98651c5d844SKevin Hilman static void meson_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq) 98751c5d844SKevin Hilman { 98851c5d844SKevin Hilman struct meson_host *host = mmc_priv(mmc); 98979ed05e3SHeiner Kallweit bool needs_pre_post_req = mrq->data && 99079ed05e3SHeiner Kallweit !(mrq->data->host_cookie & SD_EMMC_PRE_REQ_DONE); 99179ed05e3SHeiner Kallweit 99279ed05e3SHeiner Kallweit if (needs_pre_post_req) { 99379ed05e3SHeiner Kallweit meson_mmc_get_transfer_mode(mmc, mrq); 99479ed05e3SHeiner Kallweit if (!meson_mmc_desc_chain_mode(mrq->data)) 99579ed05e3SHeiner Kallweit needs_pre_post_req = false; 99679ed05e3SHeiner Kallweit } 99779ed05e3SHeiner Kallweit 99879ed05e3SHeiner Kallweit if (needs_pre_post_req) 99979ed05e3SHeiner Kallweit meson_mmc_pre_req(mmc, mrq); 100051c5d844SKevin Hilman 100151c5d844SKevin Hilman /* Stop execution */ 100251c5d844SKevin Hilman writel(0, host->regs + SD_EMMC_START); 100351c5d844SKevin Hilman 100479ed05e3SHeiner Kallweit meson_mmc_start_cmd(mmc, mrq->sbc ?: mrq->cmd); 100579ed05e3SHeiner Kallweit 100679ed05e3SHeiner Kallweit if (needs_pre_post_req) 100779ed05e3SHeiner Kallweit meson_mmc_post_req(mmc, mrq, 0); 100851c5d844SKevin Hilman } 100951c5d844SKevin Hilman 10103d6c991bSHeiner Kallweit static void meson_mmc_read_resp(struct mmc_host *mmc, struct mmc_command *cmd) 101151c5d844SKevin Hilman { 101251c5d844SKevin Hilman struct meson_host *host = mmc_priv(mmc); 101351c5d844SKevin Hilman 101451c5d844SKevin Hilman if (cmd->flags & MMC_RSP_136) { 101551c5d844SKevin Hilman cmd->resp[0] = readl(host->regs + SD_EMMC_CMD_RSP3); 101651c5d844SKevin Hilman cmd->resp[1] = readl(host->regs + SD_EMMC_CMD_RSP2); 101751c5d844SKevin Hilman cmd->resp[2] = readl(host->regs + SD_EMMC_CMD_RSP1); 101851c5d844SKevin Hilman cmd->resp[3] = readl(host->regs + SD_EMMC_CMD_RSP); 101951c5d844SKevin Hilman } else if (cmd->flags & MMC_RSP_PRESENT) { 102051c5d844SKevin Hilman cmd->resp[0] = readl(host->regs + SD_EMMC_CMD_RSP); 102151c5d844SKevin Hilman } 102251c5d844SKevin Hilman } 102351c5d844SKevin Hilman 102451c5d844SKevin Hilman static irqreturn_t meson_mmc_irq(int irq, void *dev_id) 102551c5d844SKevin Hilman { 102651c5d844SKevin Hilman struct meson_host *host = dev_id; 102719a91dd4SHeinrich Schuchardt struct mmc_command *cmd; 10282c8d96a4SHeiner Kallweit struct mmc_data *data; 102951c5d844SKevin Hilman u32 irq_en, status, raw_status; 103074858655SJerome Brunet irqreturn_t ret = IRQ_NONE; 103151c5d844SKevin Hilman 103218f92bc0SJerome Brunet irq_en = readl(host->regs + SD_EMMC_IRQ_EN); 103318f92bc0SJerome Brunet raw_status = readl(host->regs + SD_EMMC_STATUS); 103418f92bc0SJerome Brunet status = raw_status & irq_en; 103518f92bc0SJerome Brunet 103618f92bc0SJerome Brunet if (!status) { 103718f92bc0SJerome Brunet dev_dbg(host->dev, 103818f92bc0SJerome Brunet "Unexpected IRQ! irq_en 0x%08x - status 0x%08x\n", 103918f92bc0SJerome Brunet irq_en, raw_status); 104018f92bc0SJerome Brunet return IRQ_NONE; 104118f92bc0SJerome Brunet } 104218f92bc0SJerome Brunet 104374858655SJerome Brunet if (WARN_ON(!host) || WARN_ON(!host->cmd)) 104451c5d844SKevin Hilman return IRQ_NONE; 104551c5d844SKevin Hilman 104674858655SJerome Brunet cmd = host->cmd; 104774858655SJerome Brunet data = cmd->data; 104874858655SJerome Brunet cmd->error = 0; 104974858655SJerome Brunet if (status & IRQ_CRC_ERR) { 105074858655SJerome Brunet dev_dbg(host->dev, "CRC Error - status 0x%08x\n", status); 105174858655SJerome Brunet cmd->error = -EILSEQ; 105218f92bc0SJerome Brunet ret = IRQ_WAKE_THREAD; 105374858655SJerome Brunet goto out; 105474858655SJerome Brunet } 105574858655SJerome Brunet 105674858655SJerome Brunet if (status & IRQ_TIMEOUTS) { 105774858655SJerome Brunet dev_dbg(host->dev, "Timeout - status 0x%08x\n", status); 105874858655SJerome Brunet cmd->error = -ETIMEDOUT; 105918f92bc0SJerome Brunet ret = IRQ_WAKE_THREAD; 106051c5d844SKevin Hilman goto out; 106151c5d844SKevin Hilman } 106251c5d844SKevin Hilman 10631f8066d9SHeiner Kallweit meson_mmc_read_resp(host->mmc, cmd); 10641f8066d9SHeiner Kallweit 106574858655SJerome Brunet if (status & IRQ_SDIO) { 106674858655SJerome Brunet dev_dbg(host->dev, "IRQ: SDIO TODO.\n"); 106774858655SJerome Brunet ret = IRQ_HANDLED; 106851c5d844SKevin Hilman } 106951c5d844SKevin Hilman 10702c8d96a4SHeiner Kallweit if (status & (IRQ_END_OF_CHAIN | IRQ_RESP_STATUS)) { 10712c8d96a4SHeiner Kallweit if (data && !cmd->error) 10722c8d96a4SHeiner Kallweit data->bytes_xfered = data->blksz * data->blocks; 107379ed05e3SHeiner Kallweit if (meson_mmc_bounce_buf_read(data) || 107479ed05e3SHeiner Kallweit meson_mmc_get_next_command(cmd)) 107551c5d844SKevin Hilman ret = IRQ_WAKE_THREAD; 107674858655SJerome Brunet else 107774858655SJerome Brunet ret = IRQ_HANDLED; 107851c5d844SKevin Hilman } 107951c5d844SKevin Hilman 108051c5d844SKevin Hilman out: 108118f92bc0SJerome Brunet if (cmd->error) { 108218f92bc0SJerome Brunet /* Stop desc in case of errors */ 108318f92bc0SJerome Brunet u32 start = readl(host->regs + SD_EMMC_START); 108418f92bc0SJerome Brunet 108518f92bc0SJerome Brunet start &= ~START_DESC_BUSY; 108618f92bc0SJerome Brunet writel(start, host->regs + SD_EMMC_START); 108718f92bc0SJerome Brunet } 108818f92bc0SJerome Brunet 10891f8066d9SHeiner Kallweit if (ret == IRQ_HANDLED) 109051c5d844SKevin Hilman meson_mmc_request_done(host->mmc, cmd->mrq); 109151c5d844SKevin Hilman 10929c5fdb07SJerome Brunet /* ack all raised interrupts */ 10939c5fdb07SJerome Brunet writel(status, host->regs + SD_EMMC_STATUS); 10949c5fdb07SJerome Brunet 109551c5d844SKevin Hilman return ret; 109651c5d844SKevin Hilman } 109751c5d844SKevin Hilman 109818f92bc0SJerome Brunet static int meson_mmc_wait_desc_stop(struct meson_host *host) 109918f92bc0SJerome Brunet { 110018f92bc0SJerome Brunet u32 status; 110118f92bc0SJerome Brunet 110218f92bc0SJerome Brunet /* 110318f92bc0SJerome Brunet * It may sometimes take a while for it to actually halt. Here, we 110418f92bc0SJerome Brunet * are giving it 5ms to comply 110518f92bc0SJerome Brunet * 110618f92bc0SJerome Brunet * If we don't confirm the descriptor is stopped, it might raise new 110718f92bc0SJerome Brunet * IRQs after we have called mmc_request_done() which is bad. 110818f92bc0SJerome Brunet */ 110918f92bc0SJerome Brunet 111098849da6SJerome Brunet return readl_poll_timeout(host->regs + SD_EMMC_STATUS, status, 111198849da6SJerome Brunet !(status & (STATUS_BUSY | STATUS_DESC_BUSY)), 111298849da6SJerome Brunet 100, 5000); 111318f92bc0SJerome Brunet } 111418f92bc0SJerome Brunet 111551c5d844SKevin Hilman static irqreturn_t meson_mmc_irq_thread(int irq, void *dev_id) 111651c5d844SKevin Hilman { 111751c5d844SKevin Hilman struct meson_host *host = dev_id; 1118e5e4a3ebSHeiner Kallweit struct mmc_command *next_cmd, *cmd = host->cmd; 111951c5d844SKevin Hilman struct mmc_data *data; 112051c5d844SKevin Hilman unsigned int xfer_bytes; 112151c5d844SKevin Hilman 112251c5d844SKevin Hilman if (WARN_ON(!cmd)) 112319a91dd4SHeinrich Schuchardt return IRQ_NONE; 112451c5d844SKevin Hilman 112518f92bc0SJerome Brunet if (cmd->error) { 112618f92bc0SJerome Brunet meson_mmc_wait_desc_stop(host); 112718f92bc0SJerome Brunet meson_mmc_request_done(host->mmc, cmd->mrq); 112818f92bc0SJerome Brunet 112918f92bc0SJerome Brunet return IRQ_HANDLED; 113018f92bc0SJerome Brunet } 113118f92bc0SJerome Brunet 113251c5d844SKevin Hilman data = cmd->data; 113379ed05e3SHeiner Kallweit if (meson_mmc_bounce_buf_read(data)) { 113451c5d844SKevin Hilman xfer_bytes = data->blksz * data->blocks; 113551c5d844SKevin Hilman WARN_ON(xfer_bytes > host->bounce_buf_size); 113651c5d844SKevin Hilman sg_copy_from_buffer(data->sg, data->sg_len, 113751c5d844SKevin Hilman host->bounce_buf, xfer_bytes); 113851c5d844SKevin Hilman } 113951c5d844SKevin Hilman 1140e5e4a3ebSHeiner Kallweit next_cmd = meson_mmc_get_next_command(cmd); 1141e5e4a3ebSHeiner Kallweit if (next_cmd) 1142e5e4a3ebSHeiner Kallweit meson_mmc_start_cmd(host->mmc, next_cmd); 114351c5d844SKevin Hilman else 1144e5e4a3ebSHeiner Kallweit meson_mmc_request_done(host->mmc, cmd->mrq); 114551c5d844SKevin Hilman 1146690f90b6SHeiner Kallweit return IRQ_HANDLED; 114751c5d844SKevin Hilman } 114851c5d844SKevin Hilman 114951c5d844SKevin Hilman /* 115051c5d844SKevin Hilman * NOTE: we only need this until the GPIO/pinctrl driver can handle 115151c5d844SKevin Hilman * interrupts. For now, the MMC core will use this for polling. 115251c5d844SKevin Hilman */ 115351c5d844SKevin Hilman static int meson_mmc_get_cd(struct mmc_host *mmc) 115451c5d844SKevin Hilman { 115551c5d844SKevin Hilman int status = mmc_gpio_get_cd(mmc); 115651c5d844SKevin Hilman 115751c5d844SKevin Hilman if (status == -ENOSYS) 115851c5d844SKevin Hilman return 1; /* assume present */ 115951c5d844SKevin Hilman 116051c5d844SKevin Hilman return status; 116151c5d844SKevin Hilman } 116251c5d844SKevin Hilman 1163c01d1219SHeiner Kallweit static void meson_mmc_cfg_init(struct meson_host *host) 1164c01d1219SHeiner Kallweit { 116571e3e00cSAndreas Fenkart u32 cfg = 0; 1166c01d1219SHeiner Kallweit 11671231e7ebSHeiner Kallweit cfg |= FIELD_PREP(CFG_RESP_TIMEOUT_MASK, 11681231e7ebSHeiner Kallweit ilog2(SD_EMMC_CFG_RESP_TIMEOUT)); 11691231e7ebSHeiner Kallweit cfg |= FIELD_PREP(CFG_RC_CC_MASK, ilog2(SD_EMMC_CFG_CMD_GAP)); 11701231e7ebSHeiner Kallweit cfg |= FIELD_PREP(CFG_BLK_LEN_MASK, ilog2(SD_EMMC_CFG_BLK_SIZE)); 1171c01d1219SHeiner Kallweit 117218f92bc0SJerome Brunet /* abort chain on R/W errors */ 117318f92bc0SJerome Brunet cfg |= CFG_ERR_ABORT; 117418f92bc0SJerome Brunet 1175c01d1219SHeiner Kallweit writel(cfg, host->regs + SD_EMMC_CFG); 1176c01d1219SHeiner Kallweit } 1177c01d1219SHeiner Kallweit 1178186cd8b7SJerome Brunet static int meson_mmc_card_busy(struct mmc_host *mmc) 1179186cd8b7SJerome Brunet { 1180186cd8b7SJerome Brunet struct meson_host *host = mmc_priv(mmc); 1181186cd8b7SJerome Brunet u32 regval; 1182186cd8b7SJerome Brunet 1183186cd8b7SJerome Brunet regval = readl(host->regs + SD_EMMC_STATUS); 1184186cd8b7SJerome Brunet 1185186cd8b7SJerome Brunet /* We are only interrested in lines 0 to 3, so mask the other ones */ 1186186cd8b7SJerome Brunet return !(FIELD_GET(STATUS_DATI, regval) & 0xf); 1187186cd8b7SJerome Brunet } 1188186cd8b7SJerome Brunet 1189b1231b2fSJerome Brunet static int meson_mmc_voltage_switch(struct mmc_host *mmc, struct mmc_ios *ios) 1190b1231b2fSJerome Brunet { 1191b1231b2fSJerome Brunet /* vqmmc regulator is available */ 1192b1231b2fSJerome Brunet if (!IS_ERR(mmc->supply.vqmmc)) { 1193b1231b2fSJerome Brunet /* 1194b1231b2fSJerome Brunet * The usual amlogic setup uses a GPIO to switch from one 1195b1231b2fSJerome Brunet * regulator to the other. While the voltage ramp up is 1196b1231b2fSJerome Brunet * pretty fast, care must be taken when switching from 3.3v 1197b1231b2fSJerome Brunet * to 1.8v. Please make sure the regulator framework is aware 1198b1231b2fSJerome Brunet * of your own regulator constraints 1199b1231b2fSJerome Brunet */ 1200b1231b2fSJerome Brunet return mmc_regulator_set_vqmmc(mmc, ios); 1201b1231b2fSJerome Brunet } 1202b1231b2fSJerome Brunet 1203b1231b2fSJerome Brunet /* no vqmmc regulator, assume fixed regulator at 3/3.3V */ 1204b1231b2fSJerome Brunet if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330) 1205b1231b2fSJerome Brunet return 0; 1206b1231b2fSJerome Brunet 1207b1231b2fSJerome Brunet return -EINVAL; 1208b1231b2fSJerome Brunet } 1209b1231b2fSJerome Brunet 121051c5d844SKevin Hilman static const struct mmc_host_ops meson_mmc_ops = { 121151c5d844SKevin Hilman .request = meson_mmc_request, 121251c5d844SKevin Hilman .set_ios = meson_mmc_set_ios, 121351c5d844SKevin Hilman .get_cd = meson_mmc_get_cd, 121479ed05e3SHeiner Kallweit .pre_req = meson_mmc_pre_req, 121579ed05e3SHeiner Kallweit .post_req = meson_mmc_post_req, 12160b6ed71cSHeiner Kallweit .execute_tuning = meson_mmc_execute_tuning, 1217186cd8b7SJerome Brunet .card_busy = meson_mmc_card_busy, 1218b1231b2fSJerome Brunet .start_signal_voltage_switch = meson_mmc_voltage_switch, 121951c5d844SKevin Hilman }; 122051c5d844SKevin Hilman 122151c5d844SKevin Hilman static int meson_mmc_probe(struct platform_device *pdev) 122251c5d844SKevin Hilman { 122351c5d844SKevin Hilman struct resource *res; 122451c5d844SKevin Hilman struct meson_host *host; 122551c5d844SKevin Hilman struct mmc_host *mmc; 1226bb364890SRemi Pommarel int ret; 122751c5d844SKevin Hilman 122851c5d844SKevin Hilman mmc = mmc_alloc_host(sizeof(struct meson_host), &pdev->dev); 122951c5d844SKevin Hilman if (!mmc) 123051c5d844SKevin Hilman return -ENOMEM; 123151c5d844SKevin Hilman host = mmc_priv(mmc); 123251c5d844SKevin Hilman host->mmc = mmc; 123351c5d844SKevin Hilman host->dev = &pdev->dev; 123451c5d844SKevin Hilman dev_set_drvdata(&pdev->dev, host); 123551c5d844SKevin Hilman 123651c5d844SKevin Hilman /* Get regulators and the supported OCR mask */ 123751c5d844SKevin Hilman host->vqmmc_enabled = false; 123851c5d844SKevin Hilman ret = mmc_regulator_get_supply(mmc); 1239fa54f3e3SWolfram Sang if (ret) 124051c5d844SKevin Hilman goto free_host; 124151c5d844SKevin Hilman 124251c5d844SKevin Hilman ret = mmc_of_parse(mmc); 124351c5d844SKevin Hilman if (ret) { 1244dc012058SKevin Hilman if (ret != -EPROBE_DEFER) 124551c5d844SKevin Hilman dev_warn(&pdev->dev, "error parsing DT: %d\n", ret); 124651c5d844SKevin Hilman goto free_host; 124751c5d844SKevin Hilman } 124851c5d844SKevin Hilman 1249df069815SNan Li host->data = (struct meson_mmc_data *) 1250df069815SNan Li of_device_get_match_data(&pdev->dev); 1251df069815SNan Li if (!host->data) { 1252df069815SNan Li ret = -EINVAL; 1253df069815SNan Li goto free_host; 1254df069815SNan Li } 1255df069815SNan Li 125619c6beaaSJerome Brunet ret = device_reset_optional(&pdev->dev); 125719c6beaaSJerome Brunet if (ret) { 125819c6beaaSJerome Brunet if (ret != -EPROBE_DEFER) 125919c6beaaSJerome Brunet dev_err(&pdev->dev, "device reset failed: %d\n", ret); 126019c6beaaSJerome Brunet 126119c6beaaSJerome Brunet return ret; 126219c6beaaSJerome Brunet } 126319c6beaaSJerome Brunet 126451c5d844SKevin Hilman res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 126551c5d844SKevin Hilman host->regs = devm_ioremap_resource(&pdev->dev, res); 126651c5d844SKevin Hilman if (IS_ERR(host->regs)) { 126751c5d844SKevin Hilman ret = PTR_ERR(host->regs); 126851c5d844SKevin Hilman goto free_host; 126951c5d844SKevin Hilman } 127051c5d844SKevin Hilman 1271bb364890SRemi Pommarel host->irq = platform_get_irq(pdev, 0); 1272bb364890SRemi Pommarel if (host->irq <= 0) { 127351c5d844SKevin Hilman dev_err(&pdev->dev, "failed to get interrupt resource.\n"); 127451c5d844SKevin Hilman ret = -EINVAL; 127551c5d844SKevin Hilman goto free_host; 127651c5d844SKevin Hilman } 127751c5d844SKevin Hilman 12781e03331dSJerome Brunet host->pinctrl = devm_pinctrl_get(&pdev->dev); 12791e03331dSJerome Brunet if (IS_ERR(host->pinctrl)) { 12801e03331dSJerome Brunet ret = PTR_ERR(host->pinctrl); 12811e03331dSJerome Brunet goto free_host; 12821e03331dSJerome Brunet } 12831e03331dSJerome Brunet 12841e03331dSJerome Brunet host->pins_default = pinctrl_lookup_state(host->pinctrl, 12851e03331dSJerome Brunet PINCTRL_STATE_DEFAULT); 12861e03331dSJerome Brunet if (IS_ERR(host->pins_default)) { 12871e03331dSJerome Brunet ret = PTR_ERR(host->pins_default); 12881e03331dSJerome Brunet goto free_host; 12891e03331dSJerome Brunet } 12901e03331dSJerome Brunet 12911e03331dSJerome Brunet host->pins_clk_gate = pinctrl_lookup_state(host->pinctrl, 12921e03331dSJerome Brunet "clk-gate"); 12931e03331dSJerome Brunet if (IS_ERR(host->pins_clk_gate)) { 12941e03331dSJerome Brunet dev_warn(&pdev->dev, 12951e03331dSJerome Brunet "can't get clk-gate pinctrl, using clk_stop bit\n"); 12961e03331dSJerome Brunet host->pins_clk_gate = NULL; 12971e03331dSJerome Brunet } 12981e03331dSJerome Brunet 129951c5d844SKevin Hilman host->core_clk = devm_clk_get(&pdev->dev, "core"); 130051c5d844SKevin Hilman if (IS_ERR(host->core_clk)) { 130151c5d844SKevin Hilman ret = PTR_ERR(host->core_clk); 130251c5d844SKevin Hilman goto free_host; 130351c5d844SKevin Hilman } 130451c5d844SKevin Hilman 130551c5d844SKevin Hilman ret = clk_prepare_enable(host->core_clk); 130651c5d844SKevin Hilman if (ret) 130751c5d844SKevin Hilman goto free_host; 130851c5d844SKevin Hilman 130951c5d844SKevin Hilman ret = meson_mmc_clk_init(host); 131051c5d844SKevin Hilman if (ret) 1311ce473d5bSMichał Zegan goto err_core_clk; 131251c5d844SKevin Hilman 13133c39e2caSJerome Brunet /* set config to sane default */ 13143c39e2caSJerome Brunet meson_mmc_cfg_init(host); 13153c39e2caSJerome Brunet 131651c5d844SKevin Hilman /* Stop execution */ 131751c5d844SKevin Hilman writel(0, host->regs + SD_EMMC_START); 131851c5d844SKevin Hilman 131974858655SJerome Brunet /* clear, ack and enable interrupts */ 132051c5d844SKevin Hilman writel(0, host->regs + SD_EMMC_IRQ_EN); 132174858655SJerome Brunet writel(IRQ_CRC_ERR | IRQ_TIMEOUTS | IRQ_END_OF_CHAIN, 132274858655SJerome Brunet host->regs + SD_EMMC_STATUS); 132374858655SJerome Brunet writel(IRQ_CRC_ERR | IRQ_TIMEOUTS | IRQ_END_OF_CHAIN, 132474858655SJerome Brunet host->regs + SD_EMMC_IRQ_EN); 132551c5d844SKevin Hilman 1326bb364890SRemi Pommarel ret = request_threaded_irq(host->irq, meson_mmc_irq, 1327eb4d8112SJerome Brunet meson_mmc_irq_thread, IRQF_ONESHOT, 132883e418a8SMartin Blumenstingl dev_name(&pdev->dev), host); 132951c5d844SKevin Hilman if (ret) 1330bd911ec4SJerome Brunet goto err_init_clk; 133151c5d844SKevin Hilman 1332e5e4a3ebSHeiner Kallweit mmc->caps |= MMC_CAP_CMD23; 1333efe0b669SHeiner Kallweit mmc->max_blk_count = CMD_CFG_LENGTH_MASK; 1334efe0b669SHeiner Kallweit mmc->max_req_size = mmc->max_blk_count * mmc->max_blk_size; 133579ed05e3SHeiner Kallweit mmc->max_segs = SD_EMMC_DESC_BUF_LEN / sizeof(struct sd_emmc_desc); 133679ed05e3SHeiner Kallweit mmc->max_seg_size = mmc->max_req_size; 1337efe0b669SHeiner Kallweit 1338d5f758f2SJerome Brunet /* 1339d5f758f2SJerome Brunet * At the moment, we don't know how to reliably enable HS400. 1340d5f758f2SJerome Brunet * From the different datasheets, it is not even clear if this mode 1341d5f758f2SJerome Brunet * is officially supported by any of the SoCs 1342d5f758f2SJerome Brunet */ 1343d5f758f2SJerome Brunet mmc->caps2 &= ~MMC_CAP2_HS400; 1344d5f758f2SJerome Brunet 134551c5d844SKevin Hilman /* data bounce buffer */ 13464136fcb5SHeiner Kallweit host->bounce_buf_size = mmc->max_req_size; 134751c5d844SKevin Hilman host->bounce_buf = 134851c5d844SKevin Hilman dma_alloc_coherent(host->dev, host->bounce_buf_size, 134951c5d844SKevin Hilman &host->bounce_dma_addr, GFP_KERNEL); 135051c5d844SKevin Hilman if (host->bounce_buf == NULL) { 135151c5d844SKevin Hilman dev_err(host->dev, "Unable to map allocate DMA bounce buffer.\n"); 135251c5d844SKevin Hilman ret = -ENOMEM; 1353bb364890SRemi Pommarel goto err_free_irq; 135451c5d844SKevin Hilman } 135551c5d844SKevin Hilman 135679ed05e3SHeiner Kallweit host->descs = dma_alloc_coherent(host->dev, SD_EMMC_DESC_BUF_LEN, 135779ed05e3SHeiner Kallweit &host->descs_dma_addr, GFP_KERNEL); 135879ed05e3SHeiner Kallweit if (!host->descs) { 135979ed05e3SHeiner Kallweit dev_err(host->dev, "Allocating descriptor DMA buffer failed\n"); 136079ed05e3SHeiner Kallweit ret = -ENOMEM; 136179ed05e3SHeiner Kallweit goto err_bounce_buf; 136279ed05e3SHeiner Kallweit } 136379ed05e3SHeiner Kallweit 136451c5d844SKevin Hilman mmc->ops = &meson_mmc_ops; 136551c5d844SKevin Hilman mmc_add_host(mmc); 136651c5d844SKevin Hilman 136751c5d844SKevin Hilman return 0; 136851c5d844SKevin Hilman 136979ed05e3SHeiner Kallweit err_bounce_buf: 137079ed05e3SHeiner Kallweit dma_free_coherent(host->dev, host->bounce_buf_size, 137179ed05e3SHeiner Kallweit host->bounce_buf, host->bounce_dma_addr); 1372bb364890SRemi Pommarel err_free_irq: 1373bb364890SRemi Pommarel free_irq(host->irq, host); 1374bd911ec4SJerome Brunet err_init_clk: 1375bd911ec4SJerome Brunet clk_disable_unprepare(host->mmc_clk); 1376ce473d5bSMichał Zegan err_core_clk: 137751c5d844SKevin Hilman clk_disable_unprepare(host->core_clk); 1378ce473d5bSMichał Zegan free_host: 137951c5d844SKevin Hilman mmc_free_host(mmc); 138051c5d844SKevin Hilman return ret; 138151c5d844SKevin Hilman } 138251c5d844SKevin Hilman 138351c5d844SKevin Hilman static int meson_mmc_remove(struct platform_device *pdev) 138451c5d844SKevin Hilman { 138551c5d844SKevin Hilman struct meson_host *host = dev_get_drvdata(&pdev->dev); 138651c5d844SKevin Hilman 1387a01fc2a2SMichał Zegan mmc_remove_host(host->mmc); 1388a01fc2a2SMichał Zegan 138992763b99SHeiner Kallweit /* disable interrupts */ 139092763b99SHeiner Kallweit writel(0, host->regs + SD_EMMC_IRQ_EN); 1391bb364890SRemi Pommarel free_irq(host->irq, host); 139292763b99SHeiner Kallweit 139379ed05e3SHeiner Kallweit dma_free_coherent(host->dev, SD_EMMC_DESC_BUF_LEN, 139479ed05e3SHeiner Kallweit host->descs, host->descs_dma_addr); 139551c5d844SKevin Hilman dma_free_coherent(host->dev, host->bounce_buf_size, 139651c5d844SKevin Hilman host->bounce_buf, host->bounce_dma_addr); 139751c5d844SKevin Hilman 1398bd911ec4SJerome Brunet clk_disable_unprepare(host->mmc_clk); 139951c5d844SKevin Hilman clk_disable_unprepare(host->core_clk); 140051c5d844SKevin Hilman 140151c5d844SKevin Hilman mmc_free_host(host->mmc); 140251c5d844SKevin Hilman return 0; 140351c5d844SKevin Hilman } 140451c5d844SKevin Hilman 1405df069815SNan Li static const struct meson_mmc_data meson_gx_data = { 1406df069815SNan Li .tx_delay_mask = CLK_V2_TX_DELAY_MASK, 1407df069815SNan Li .rx_delay_mask = CLK_V2_RX_DELAY_MASK, 1408df069815SNan Li .always_on = CLK_V2_ALWAYS_ON, 140971645e65SJerome Brunet .adjust = SD_EMMC_ADJUST, 1410df069815SNan Li }; 1411df069815SNan Li 1412df069815SNan Li static const struct meson_mmc_data meson_axg_data = { 1413df069815SNan Li .tx_delay_mask = CLK_V3_TX_DELAY_MASK, 1414df069815SNan Li .rx_delay_mask = CLK_V3_RX_DELAY_MASK, 1415df069815SNan Li .always_on = CLK_V3_ALWAYS_ON, 141671645e65SJerome Brunet .adjust = SD_EMMC_V3_ADJUST, 1417df069815SNan Li }; 1418df069815SNan Li 141951c5d844SKevin Hilman static const struct of_device_id meson_mmc_of_match[] = { 1420df069815SNan Li { .compatible = "amlogic,meson-gx-mmc", .data = &meson_gx_data }, 1421df069815SNan Li { .compatible = "amlogic,meson-gxbb-mmc", .data = &meson_gx_data }, 1422df069815SNan Li { .compatible = "amlogic,meson-gxl-mmc", .data = &meson_gx_data }, 1423df069815SNan Li { .compatible = "amlogic,meson-gxm-mmc", .data = &meson_gx_data }, 1424df069815SNan Li { .compatible = "amlogic,meson-axg-mmc", .data = &meson_axg_data }, 142551c5d844SKevin Hilman {} 142651c5d844SKevin Hilman }; 142751c5d844SKevin Hilman MODULE_DEVICE_TABLE(of, meson_mmc_of_match); 142851c5d844SKevin Hilman 142951c5d844SKevin Hilman static struct platform_driver meson_mmc_driver = { 143051c5d844SKevin Hilman .probe = meson_mmc_probe, 143151c5d844SKevin Hilman .remove = meson_mmc_remove, 143251c5d844SKevin Hilman .driver = { 143351c5d844SKevin Hilman .name = DRIVER_NAME, 143451c5d844SKevin Hilman .of_match_table = of_match_ptr(meson_mmc_of_match), 143551c5d844SKevin Hilman }, 143651c5d844SKevin Hilman }; 143751c5d844SKevin Hilman 143851c5d844SKevin Hilman module_platform_driver(meson_mmc_driver); 143951c5d844SKevin Hilman 1440e79dc1b4SNan Li MODULE_DESCRIPTION("Amlogic S905*/GX*/AXG SD/eMMC driver"); 144151c5d844SKevin Hilman MODULE_AUTHOR("Kevin Hilman <khilman@baylibre.com>"); 144251c5d844SKevin Hilman MODULE_LICENSE("GPL v2"); 1443