151c5d844SKevin Hilman /* 251c5d844SKevin Hilman * Amlogic SD/eMMC driver for the GX/S905 family SoCs 351c5d844SKevin Hilman * 451c5d844SKevin Hilman * Copyright (c) 2016 BayLibre, SAS. 551c5d844SKevin Hilman * Author: Kevin Hilman <khilman@baylibre.com> 651c5d844SKevin Hilman * 751c5d844SKevin Hilman * This program is free software; you can redistribute it and/or modify 851c5d844SKevin Hilman * it under the terms of version 2 of the GNU General Public License as 951c5d844SKevin Hilman * published by the Free Software Foundation. 1051c5d844SKevin Hilman * 1151c5d844SKevin Hilman * This program is distributed in the hope that it will be useful, but 1251c5d844SKevin Hilman * WITHOUT ANY WARRANTY; without even the implied warranty of 1351c5d844SKevin Hilman * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 1451c5d844SKevin Hilman * General Public License for more details. 1551c5d844SKevin Hilman * 1651c5d844SKevin Hilman * You should have received a copy of the GNU General Public License 1751c5d844SKevin Hilman * along with this program; if not, see <http://www.gnu.org/licenses/>. 1851c5d844SKevin Hilman * The full GNU General Public License is included in this distribution 1951c5d844SKevin Hilman * in the file called COPYING. 2051c5d844SKevin Hilman */ 2151c5d844SKevin Hilman #include <linux/kernel.h> 2251c5d844SKevin Hilman #include <linux/module.h> 2351c5d844SKevin Hilman #include <linux/init.h> 2418f92bc0SJerome Brunet #include <linux/delay.h> 2551c5d844SKevin Hilman #include <linux/device.h> 2698849da6SJerome Brunet #include <linux/iopoll.h> 2751c5d844SKevin Hilman #include <linux/of_device.h> 2851c5d844SKevin Hilman #include <linux/platform_device.h> 2951c5d844SKevin Hilman #include <linux/ioport.h> 3051c5d844SKevin Hilman #include <linux/dma-mapping.h> 3151c5d844SKevin Hilman #include <linux/mmc/host.h> 3251c5d844SKevin Hilman #include <linux/mmc/mmc.h> 3351c5d844SKevin Hilman #include <linux/mmc/sdio.h> 3451c5d844SKevin Hilman #include <linux/mmc/slot-gpio.h> 3551c5d844SKevin Hilman #include <linux/io.h> 3651c5d844SKevin Hilman #include <linux/clk.h> 3751c5d844SKevin Hilman #include <linux/clk-provider.h> 3851c5d844SKevin Hilman #include <linux/regulator/consumer.h> 3919c6beaaSJerome Brunet #include <linux/reset.h> 40b8789ec4SUlf Hansson #include <linux/interrupt.h> 411231e7ebSHeiner Kallweit #include <linux/bitfield.h> 428fb572acSThierry Reding #include <linux/pinctrl/consumer.h> 4351c5d844SKevin Hilman 4451c5d844SKevin Hilman #define DRIVER_NAME "meson-gx-mmc" 4551c5d844SKevin Hilman 4651c5d844SKevin Hilman #define SD_EMMC_CLOCK 0x0 471231e7ebSHeiner Kallweit #define CLK_DIV_MASK GENMASK(5, 0) 481231e7ebSHeiner Kallweit #define CLK_SRC_MASK GENMASK(7, 6) 491231e7ebSHeiner Kallweit #define CLK_CORE_PHASE_MASK GENMASK(9, 8) 50c08bcb6cSHeiner Kallweit #define CLK_TX_PHASE_MASK GENMASK(11, 10) 51c08bcb6cSHeiner Kallweit #define CLK_RX_PHASE_MASK GENMASK(13, 12) 525e6f75f4SJerome Brunet #define CLK_PHASE_0 0 535e6f75f4SJerome Brunet #define CLK_PHASE_180 2 54df069815SNan Li #define CLK_V2_TX_DELAY_MASK GENMASK(19, 16) 55df069815SNan Li #define CLK_V2_RX_DELAY_MASK GENMASK(23, 20) 56df069815SNan Li #define CLK_V2_ALWAYS_ON BIT(24) 57df069815SNan Li 58df069815SNan Li #define CLK_V3_TX_DELAY_MASK GENMASK(21, 16) 59df069815SNan Li #define CLK_V3_RX_DELAY_MASK GENMASK(27, 22) 60df069815SNan Li #define CLK_V3_ALWAYS_ON BIT(28) 61df069815SNan Li 62df069815SNan Li #define CLK_TX_DELAY_MASK(h) (h->data->tx_delay_mask) 63df069815SNan Li #define CLK_RX_DELAY_MASK(h) (h->data->rx_delay_mask) 64df069815SNan Li #define CLK_ALWAYS_ON(h) (h->data->always_on) 6551c5d844SKevin Hilman 6652899b99SJerome Brunet #define SD_EMMC_DELAY 0x4 6751c5d844SKevin Hilman #define SD_EMMC_ADJUST 0x8 6871645e65SJerome Brunet #define ADJUST_ADJ_DELAY_MASK GENMASK(21, 16) 6971645e65SJerome Brunet #define ADJUST_DS_EN BIT(15) 7071645e65SJerome Brunet #define ADJUST_ADJ_EN BIT(13) 71df069815SNan Li 72df069815SNan Li #define SD_EMMC_DELAY1 0x4 73df069815SNan Li #define SD_EMMC_DELAY2 0x8 74df069815SNan Li #define SD_EMMC_V3_ADJUST 0xc 75df069815SNan Li 7651c5d844SKevin Hilman #define SD_EMMC_CALOUT 0x10 7751c5d844SKevin Hilman #define SD_EMMC_START 0x40 7851c5d844SKevin Hilman #define START_DESC_INIT BIT(0) 7951c5d844SKevin Hilman #define START_DESC_BUSY BIT(1) 801231e7ebSHeiner Kallweit #define START_DESC_ADDR_MASK GENMASK(31, 2) 8151c5d844SKevin Hilman 8251c5d844SKevin Hilman #define SD_EMMC_CFG 0x44 831231e7ebSHeiner Kallweit #define CFG_BUS_WIDTH_MASK GENMASK(1, 0) 8451c5d844SKevin Hilman #define CFG_BUS_WIDTH_1 0x0 8551c5d844SKevin Hilman #define CFG_BUS_WIDTH_4 0x1 8651c5d844SKevin Hilman #define CFG_BUS_WIDTH_8 0x2 8751c5d844SKevin Hilman #define CFG_DDR BIT(2) 881231e7ebSHeiner Kallweit #define CFG_BLK_LEN_MASK GENMASK(7, 4) 891231e7ebSHeiner Kallweit #define CFG_RESP_TIMEOUT_MASK GENMASK(11, 8) 901231e7ebSHeiner Kallweit #define CFG_RC_CC_MASK GENMASK(15, 12) 9151c5d844SKevin Hilman #define CFG_STOP_CLOCK BIT(22) 9251c5d844SKevin Hilman #define CFG_CLK_ALWAYS_ON BIT(18) 93e21e6fddSHeiner Kallweit #define CFG_CHK_DS BIT(20) 9451c5d844SKevin Hilman #define CFG_AUTO_CLK BIT(23) 9518f92bc0SJerome Brunet #define CFG_ERR_ABORT BIT(27) 9651c5d844SKevin Hilman 9751c5d844SKevin Hilman #define SD_EMMC_STATUS 0x48 9851c5d844SKevin Hilman #define STATUS_BUSY BIT(31) 9918f92bc0SJerome Brunet #define STATUS_DESC_BUSY BIT(30) 100186cd8b7SJerome Brunet #define STATUS_DATI GENMASK(23, 16) 10151c5d844SKevin Hilman 10251c5d844SKevin Hilman #define SD_EMMC_IRQ_EN 0x4c 1031231e7ebSHeiner Kallweit #define IRQ_RXD_ERR_MASK GENMASK(7, 0) 10451c5d844SKevin Hilman #define IRQ_TXD_ERR BIT(8) 10551c5d844SKevin Hilman #define IRQ_DESC_ERR BIT(9) 10651c5d844SKevin Hilman #define IRQ_RESP_ERR BIT(10) 10774858655SJerome Brunet #define IRQ_CRC_ERR \ 10874858655SJerome Brunet (IRQ_RXD_ERR_MASK | IRQ_TXD_ERR | IRQ_DESC_ERR | IRQ_RESP_ERR) 10951c5d844SKevin Hilman #define IRQ_RESP_TIMEOUT BIT(11) 11051c5d844SKevin Hilman #define IRQ_DESC_TIMEOUT BIT(12) 11174858655SJerome Brunet #define IRQ_TIMEOUTS \ 11274858655SJerome Brunet (IRQ_RESP_TIMEOUT | IRQ_DESC_TIMEOUT) 11351c5d844SKevin Hilman #define IRQ_END_OF_CHAIN BIT(13) 11451c5d844SKevin Hilman #define IRQ_RESP_STATUS BIT(14) 11551c5d844SKevin Hilman #define IRQ_SDIO BIT(15) 11674858655SJerome Brunet #define IRQ_EN_MASK \ 11774858655SJerome Brunet (IRQ_CRC_ERR | IRQ_TIMEOUTS | IRQ_END_OF_CHAIN | IRQ_RESP_STATUS |\ 11874858655SJerome Brunet IRQ_SDIO) 11951c5d844SKevin Hilman 12051c5d844SKevin Hilman #define SD_EMMC_CMD_CFG 0x50 12151c5d844SKevin Hilman #define SD_EMMC_CMD_ARG 0x54 12251c5d844SKevin Hilman #define SD_EMMC_CMD_DAT 0x58 12351c5d844SKevin Hilman #define SD_EMMC_CMD_RSP 0x5c 12451c5d844SKevin Hilman #define SD_EMMC_CMD_RSP1 0x60 12551c5d844SKevin Hilman #define SD_EMMC_CMD_RSP2 0x64 12651c5d844SKevin Hilman #define SD_EMMC_CMD_RSP3 0x68 12751c5d844SKevin Hilman 12851c5d844SKevin Hilman #define SD_EMMC_RXD 0x94 12951c5d844SKevin Hilman #define SD_EMMC_TXD 0x94 13051c5d844SKevin Hilman #define SD_EMMC_LAST_REG SD_EMMC_TXD 13151c5d844SKevin Hilman 13251c5d844SKevin Hilman #define SD_EMMC_CFG_BLK_SIZE 512 /* internal buffer max: 512 bytes */ 13351c5d844SKevin Hilman #define SD_EMMC_CFG_RESP_TIMEOUT 256 /* in clock cycles */ 134bb11eff1SHeiner Kallweit #define SD_EMMC_CMD_TIMEOUT 1024 /* in ms */ 135bb11eff1SHeiner Kallweit #define SD_EMMC_CMD_TIMEOUT_DATA 4096 /* in ms */ 13651c5d844SKevin Hilman #define SD_EMMC_CFG_CMD_GAP 16 /* in clock cycles */ 13779ed05e3SHeiner Kallweit #define SD_EMMC_DESC_BUF_LEN PAGE_SIZE 13879ed05e3SHeiner Kallweit 13979ed05e3SHeiner Kallweit #define SD_EMMC_PRE_REQ_DONE BIT(0) 14079ed05e3SHeiner Kallweit #define SD_EMMC_DESC_CHAIN_MODE BIT(1) 14179ed05e3SHeiner Kallweit 14251c5d844SKevin Hilman #define MUX_CLK_NUM_PARENTS 2 14351c5d844SKevin Hilman 144df069815SNan Li struct meson_mmc_data { 145df069815SNan Li unsigned int tx_delay_mask; 146df069815SNan Li unsigned int rx_delay_mask; 147df069815SNan Li unsigned int always_on; 14871645e65SJerome Brunet unsigned int adjust; 149df069815SNan Li }; 150df069815SNan Li 15179ed05e3SHeiner Kallweit struct sd_emmc_desc { 15279ed05e3SHeiner Kallweit u32 cmd_cfg; 15379ed05e3SHeiner Kallweit u32 cmd_arg; 15479ed05e3SHeiner Kallweit u32 cmd_data; 15579ed05e3SHeiner Kallweit u32 cmd_resp; 15679ed05e3SHeiner Kallweit }; 15779ed05e3SHeiner Kallweit 15851c5d844SKevin Hilman struct meson_host { 15951c5d844SKevin Hilman struct device *dev; 160df069815SNan Li struct meson_mmc_data *data; 16151c5d844SKevin Hilman struct mmc_host *mmc; 16251c5d844SKevin Hilman struct mmc_command *cmd; 16351c5d844SKevin Hilman 16451c5d844SKevin Hilman void __iomem *regs; 16551c5d844SKevin Hilman struct clk *core_clk; 1665e6f75f4SJerome Brunet struct clk *mux_clk; 167bd911ec4SJerome Brunet struct clk *mmc_clk; 168f89f55dfSJerome Brunet unsigned long req_rate; 169dc38ac81SJerome Brunet bool ddr; 17051c5d844SKevin Hilman 1711e03331dSJerome Brunet struct pinctrl *pinctrl; 1721e03331dSJerome Brunet struct pinctrl_state *pins_default; 1731e03331dSJerome Brunet struct pinctrl_state *pins_clk_gate; 1741e03331dSJerome Brunet 17551c5d844SKevin Hilman unsigned int bounce_buf_size; 17651c5d844SKevin Hilman void *bounce_buf; 17751c5d844SKevin Hilman dma_addr_t bounce_dma_addr; 17879ed05e3SHeiner Kallweit struct sd_emmc_desc *descs; 17979ed05e3SHeiner Kallweit dma_addr_t descs_dma_addr; 18051c5d844SKevin Hilman 181bb364890SRemi Pommarel int irq; 182bb364890SRemi Pommarel 18351c5d844SKevin Hilman bool vqmmc_enabled; 18451c5d844SKevin Hilman }; 18551c5d844SKevin Hilman 1861231e7ebSHeiner Kallweit #define CMD_CFG_LENGTH_MASK GENMASK(8, 0) 18751c5d844SKevin Hilman #define CMD_CFG_BLOCK_MODE BIT(9) 18851c5d844SKevin Hilman #define CMD_CFG_R1B BIT(10) 18951c5d844SKevin Hilman #define CMD_CFG_END_OF_CHAIN BIT(11) 1901231e7ebSHeiner Kallweit #define CMD_CFG_TIMEOUT_MASK GENMASK(15, 12) 19151c5d844SKevin Hilman #define CMD_CFG_NO_RESP BIT(16) 19251c5d844SKevin Hilman #define CMD_CFG_NO_CMD BIT(17) 19351c5d844SKevin Hilman #define CMD_CFG_DATA_IO BIT(18) 19451c5d844SKevin Hilman #define CMD_CFG_DATA_WR BIT(19) 19551c5d844SKevin Hilman #define CMD_CFG_RESP_NOCRC BIT(20) 19651c5d844SKevin Hilman #define CMD_CFG_RESP_128 BIT(21) 19751c5d844SKevin Hilman #define CMD_CFG_RESP_NUM BIT(22) 19851c5d844SKevin Hilman #define CMD_CFG_DATA_NUM BIT(23) 1991231e7ebSHeiner Kallweit #define CMD_CFG_CMD_INDEX_MASK GENMASK(29, 24) 20051c5d844SKevin Hilman #define CMD_CFG_ERROR BIT(30) 20151c5d844SKevin Hilman #define CMD_CFG_OWNER BIT(31) 20251c5d844SKevin Hilman 2031231e7ebSHeiner Kallweit #define CMD_DATA_MASK GENMASK(31, 2) 20451c5d844SKevin Hilman #define CMD_DATA_BIG_ENDIAN BIT(1) 20551c5d844SKevin Hilman #define CMD_DATA_SRAM BIT(0) 2061231e7ebSHeiner Kallweit #define CMD_RESP_MASK GENMASK(31, 1) 20751c5d844SKevin Hilman #define CMD_RESP_SRAM BIT(0) 20851c5d844SKevin Hilman 2094eee86c3SHeiner Kallweit static unsigned int meson_mmc_get_timeout_msecs(struct mmc_data *data) 2104eee86c3SHeiner Kallweit { 2114eee86c3SHeiner Kallweit unsigned int timeout = data->timeout_ns / NSEC_PER_MSEC; 2124eee86c3SHeiner Kallweit 2134eee86c3SHeiner Kallweit if (!timeout) 2144eee86c3SHeiner Kallweit return SD_EMMC_CMD_TIMEOUT_DATA; 2154eee86c3SHeiner Kallweit 2164eee86c3SHeiner Kallweit timeout = roundup_pow_of_two(timeout); 2174eee86c3SHeiner Kallweit 2184eee86c3SHeiner Kallweit return min(timeout, 32768U); /* max. 2^15 ms */ 2194eee86c3SHeiner Kallweit } 2204eee86c3SHeiner Kallweit 221e5e4a3ebSHeiner Kallweit static struct mmc_command *meson_mmc_get_next_command(struct mmc_command *cmd) 222e5e4a3ebSHeiner Kallweit { 223e5e4a3ebSHeiner Kallweit if (cmd->opcode == MMC_SET_BLOCK_COUNT && !cmd->error) 224e5e4a3ebSHeiner Kallweit return cmd->mrq->cmd; 225e5e4a3ebSHeiner Kallweit else if (mmc_op_multi(cmd->opcode) && 226e5e4a3ebSHeiner Kallweit (!cmd->mrq->sbc || cmd->error || cmd->data->error)) 227e5e4a3ebSHeiner Kallweit return cmd->mrq->stop; 228e5e4a3ebSHeiner Kallweit else 229e5e4a3ebSHeiner Kallweit return NULL; 230e5e4a3ebSHeiner Kallweit } 231e5e4a3ebSHeiner Kallweit 23279ed05e3SHeiner Kallweit static void meson_mmc_get_transfer_mode(struct mmc_host *mmc, 23379ed05e3SHeiner Kallweit struct mmc_request *mrq) 23479ed05e3SHeiner Kallweit { 23579ed05e3SHeiner Kallweit struct mmc_data *data = mrq->data; 23679ed05e3SHeiner Kallweit struct scatterlist *sg; 23779ed05e3SHeiner Kallweit int i; 23879ed05e3SHeiner Kallweit bool use_desc_chain_mode = true; 23979ed05e3SHeiner Kallweit 24024835611SHeiner Kallweit /* 24124835611SHeiner Kallweit * Broken SDIO with AP6255-based WiFi on Khadas VIM Pro has been 24224835611SHeiner Kallweit * reported. For some strange reason this occurs in descriptor 24324835611SHeiner Kallweit * chain mode only. So let's fall back to bounce buffer mode 24424835611SHeiner Kallweit * for command SD_IO_RW_EXTENDED. 24524835611SHeiner Kallweit */ 24624835611SHeiner Kallweit if (mrq->cmd->opcode == SD_IO_RW_EXTENDED) 24724835611SHeiner Kallweit return; 24824835611SHeiner Kallweit 24979ed05e3SHeiner Kallweit for_each_sg(data->sg, sg, data->sg_len, i) 25079ed05e3SHeiner Kallweit /* check for 8 byte alignment */ 25179ed05e3SHeiner Kallweit if (sg->offset & 7) { 25279ed05e3SHeiner Kallweit WARN_ONCE(1, "unaligned scatterlist buffer\n"); 25379ed05e3SHeiner Kallweit use_desc_chain_mode = false; 25479ed05e3SHeiner Kallweit break; 25579ed05e3SHeiner Kallweit } 25679ed05e3SHeiner Kallweit 25779ed05e3SHeiner Kallweit if (use_desc_chain_mode) 25879ed05e3SHeiner Kallweit data->host_cookie |= SD_EMMC_DESC_CHAIN_MODE; 25979ed05e3SHeiner Kallweit } 26079ed05e3SHeiner Kallweit 26179ed05e3SHeiner Kallweit static inline bool meson_mmc_desc_chain_mode(const struct mmc_data *data) 26279ed05e3SHeiner Kallweit { 26379ed05e3SHeiner Kallweit return data->host_cookie & SD_EMMC_DESC_CHAIN_MODE; 26479ed05e3SHeiner Kallweit } 26579ed05e3SHeiner Kallweit 26679ed05e3SHeiner Kallweit static inline bool meson_mmc_bounce_buf_read(const struct mmc_data *data) 26779ed05e3SHeiner Kallweit { 26879ed05e3SHeiner Kallweit return data && data->flags & MMC_DATA_READ && 26979ed05e3SHeiner Kallweit !meson_mmc_desc_chain_mode(data); 27079ed05e3SHeiner Kallweit } 27179ed05e3SHeiner Kallweit 27279ed05e3SHeiner Kallweit static void meson_mmc_pre_req(struct mmc_host *mmc, struct mmc_request *mrq) 27379ed05e3SHeiner Kallweit { 27479ed05e3SHeiner Kallweit struct mmc_data *data = mrq->data; 27579ed05e3SHeiner Kallweit 27679ed05e3SHeiner Kallweit if (!data) 27779ed05e3SHeiner Kallweit return; 27879ed05e3SHeiner Kallweit 27979ed05e3SHeiner Kallweit meson_mmc_get_transfer_mode(mmc, mrq); 28079ed05e3SHeiner Kallweit data->host_cookie |= SD_EMMC_PRE_REQ_DONE; 28179ed05e3SHeiner Kallweit 28279ed05e3SHeiner Kallweit if (!meson_mmc_desc_chain_mode(data)) 28379ed05e3SHeiner Kallweit return; 28479ed05e3SHeiner Kallweit 28579ed05e3SHeiner Kallweit data->sg_count = dma_map_sg(mmc_dev(mmc), data->sg, data->sg_len, 28679ed05e3SHeiner Kallweit mmc_get_dma_dir(data)); 28779ed05e3SHeiner Kallweit if (!data->sg_count) 28879ed05e3SHeiner Kallweit dev_err(mmc_dev(mmc), "dma_map_sg failed"); 28979ed05e3SHeiner Kallweit } 29079ed05e3SHeiner Kallweit 29179ed05e3SHeiner Kallweit static void meson_mmc_post_req(struct mmc_host *mmc, struct mmc_request *mrq, 29279ed05e3SHeiner Kallweit int err) 29379ed05e3SHeiner Kallweit { 29479ed05e3SHeiner Kallweit struct mmc_data *data = mrq->data; 29579ed05e3SHeiner Kallweit 29679ed05e3SHeiner Kallweit if (data && meson_mmc_desc_chain_mode(data) && data->sg_count) 29779ed05e3SHeiner Kallweit dma_unmap_sg(mmc_dev(mmc), data->sg, data->sg_len, 29879ed05e3SHeiner Kallweit mmc_get_dma_dir(data)); 29979ed05e3SHeiner Kallweit } 30079ed05e3SHeiner Kallweit 3011e03331dSJerome Brunet /* 3021e03331dSJerome Brunet * Gating the clock on this controller is tricky. It seems the mmc clock 3031e03331dSJerome Brunet * is also used by the controller. It may crash during some operation if the 3041e03331dSJerome Brunet * clock is stopped. The safest thing to do, whenever possible, is to keep 3051e03331dSJerome Brunet * clock running at stop it at the pad using the pinmux. 3061e03331dSJerome Brunet */ 3071e03331dSJerome Brunet static void meson_mmc_clk_gate(struct meson_host *host) 3081e03331dSJerome Brunet { 3091e03331dSJerome Brunet u32 cfg; 3101e03331dSJerome Brunet 3111e03331dSJerome Brunet if (host->pins_clk_gate) { 3121e03331dSJerome Brunet pinctrl_select_state(host->pinctrl, host->pins_clk_gate); 3131e03331dSJerome Brunet } else { 3141e03331dSJerome Brunet /* 3151e03331dSJerome Brunet * If the pinmux is not provided - default to the classic and 3161e03331dSJerome Brunet * unsafe method 3171e03331dSJerome Brunet */ 3181e03331dSJerome Brunet cfg = readl(host->regs + SD_EMMC_CFG); 3191e03331dSJerome Brunet cfg |= CFG_STOP_CLOCK; 3201e03331dSJerome Brunet writel(cfg, host->regs + SD_EMMC_CFG); 3211e03331dSJerome Brunet } 3221e03331dSJerome Brunet } 3231e03331dSJerome Brunet 3241e03331dSJerome Brunet static void meson_mmc_clk_ungate(struct meson_host *host) 3251e03331dSJerome Brunet { 3261e03331dSJerome Brunet u32 cfg; 3271e03331dSJerome Brunet 3281e03331dSJerome Brunet if (host->pins_clk_gate) 3291e03331dSJerome Brunet pinctrl_select_state(host->pinctrl, host->pins_default); 3301e03331dSJerome Brunet 3311e03331dSJerome Brunet /* Make sure the clock is not stopped in the controller */ 3321e03331dSJerome Brunet cfg = readl(host->regs + SD_EMMC_CFG); 3331e03331dSJerome Brunet cfg &= ~CFG_STOP_CLOCK; 3341e03331dSJerome Brunet writel(cfg, host->regs + SD_EMMC_CFG); 3351e03331dSJerome Brunet } 3361e03331dSJerome Brunet 337dc38ac81SJerome Brunet static int meson_mmc_clk_set(struct meson_host *host, unsigned long rate, 338dc38ac81SJerome Brunet bool ddr) 33951c5d844SKevin Hilman { 34051c5d844SKevin Hilman struct mmc_host *mmc = host->mmc; 3415da86887SHeiner Kallweit int ret; 34251c5d844SKevin Hilman u32 cfg; 34351c5d844SKevin Hilman 344f89f55dfSJerome Brunet /* Same request - bail-out */ 345dc38ac81SJerome Brunet if (host->ddr == ddr && host->req_rate == rate) 34651c5d844SKevin Hilman return 0; 34751c5d844SKevin Hilman 34851c5d844SKevin Hilman /* stop clock */ 3491e03331dSJerome Brunet meson_mmc_clk_gate(host); 350f89f55dfSJerome Brunet host->req_rate = 0; 35151c5d844SKevin Hilman mmc->actual_clock = 0; 352dc38ac81SJerome Brunet 3535da86887SHeiner Kallweit /* return with clock being stopped */ 354dc38ac81SJerome Brunet if (!rate) 35551c5d844SKevin Hilman return 0; 35651c5d844SKevin Hilman 3571e03331dSJerome Brunet /* Stop the clock during rate change to avoid glitches */ 3581e03331dSJerome Brunet cfg = readl(host->regs + SD_EMMC_CFG); 3591e03331dSJerome Brunet cfg |= CFG_STOP_CLOCK; 3601e03331dSJerome Brunet writel(cfg, host->regs + SD_EMMC_CFG); 3611e03331dSJerome Brunet 362dc38ac81SJerome Brunet if (ddr) { 363dc38ac81SJerome Brunet /* DDR modes require higher module clock */ 364dc38ac81SJerome Brunet rate <<= 1; 365dc38ac81SJerome Brunet cfg |= CFG_DDR; 366dc38ac81SJerome Brunet } else { 367dc38ac81SJerome Brunet cfg &= ~CFG_DDR; 368dc38ac81SJerome Brunet } 369dc38ac81SJerome Brunet writel(cfg, host->regs + SD_EMMC_CFG); 370dc38ac81SJerome Brunet host->ddr = ddr; 371dc38ac81SJerome Brunet 372844c8a75SJerome Brunet ret = clk_set_rate(host->mmc_clk, rate); 3735da86887SHeiner Kallweit if (ret) { 3745da86887SHeiner Kallweit dev_err(host->dev, "Unable to set cfg_div_clk to %lu. ret=%d\n", 375844c8a75SJerome Brunet rate, ret); 3765da86887SHeiner Kallweit return ret; 3775da86887SHeiner Kallweit } 37851c5d844SKevin Hilman 379844c8a75SJerome Brunet host->req_rate = rate; 380bd911ec4SJerome Brunet mmc->actual_clock = clk_get_rate(host->mmc_clk); 3815da86887SHeiner Kallweit 382844c8a75SJerome Brunet /* We should report the real output frequency of the controller */ 383dc38ac81SJerome Brunet if (ddr) { 384dc38ac81SJerome Brunet host->req_rate >>= 1; 385844c8a75SJerome Brunet mmc->actual_clock >>= 1; 386dc38ac81SJerome Brunet } 387844c8a75SJerome Brunet 388f89f55dfSJerome Brunet dev_dbg(host->dev, "clk rate: %u Hz\n", mmc->actual_clock); 389dc38ac81SJerome Brunet if (rate != mmc->actual_clock) 390dc38ac81SJerome Brunet dev_dbg(host->dev, "requested rate was %lu\n", rate); 3915da86887SHeiner Kallweit 3925da86887SHeiner Kallweit /* (re)start clock */ 3931e03331dSJerome Brunet meson_mmc_clk_ungate(host); 39451c5d844SKevin Hilman 3955da86887SHeiner Kallweit return 0; 39651c5d844SKevin Hilman } 39751c5d844SKevin Hilman 39851c5d844SKevin Hilman /* 39951c5d844SKevin Hilman * The SD/eMMC IP block has an internal mux and divider used for 40051c5d844SKevin Hilman * generating the MMC clock. Use the clock framework to create and 40151c5d844SKevin Hilman * manage these clocks. 40251c5d844SKevin Hilman */ 40351c5d844SKevin Hilman static int meson_mmc_clk_init(struct meson_host *host) 40451c5d844SKevin Hilman { 40551c5d844SKevin Hilman struct clk_init_data init; 406bd911ec4SJerome Brunet struct clk_mux *mux; 407bd911ec4SJerome Brunet struct clk_divider *div; 40851c5d844SKevin Hilman char clk_name[32]; 40951c5d844SKevin Hilman int i, ret = 0; 41051c5d844SKevin Hilman const char *mux_parent_names[MUX_CLK_NUM_PARENTS]; 411bd911ec4SJerome Brunet const char *clk_parent[1]; 4123c39e2caSJerome Brunet u32 clk_reg; 41351c5d844SKevin Hilman 414ef5c4815SJerome Brunet /* init SD_EMMC_CLOCK to sane defaults w/min clock rate */ 4155e6f75f4SJerome Brunet clk_reg = CLK_ALWAYS_ON(host); 416ef5c4815SJerome Brunet clk_reg |= CLK_DIV_MASK; 4175e6f75f4SJerome Brunet clk_reg |= FIELD_PREP(CLK_CORE_PHASE_MASK, CLK_PHASE_180); 4185e6f75f4SJerome Brunet clk_reg |= FIELD_PREP(CLK_TX_PHASE_MASK, CLK_PHASE_0); 4195e6f75f4SJerome Brunet clk_reg |= FIELD_PREP(CLK_RX_PHASE_MASK, CLK_PHASE_0); 420ef5c4815SJerome Brunet writel(clk_reg, host->regs + SD_EMMC_CLOCK); 421ef5c4815SJerome Brunet 42251c5d844SKevin Hilman /* get the mux parents */ 42351c5d844SKevin Hilman for (i = 0; i < MUX_CLK_NUM_PARENTS; i++) { 424e9883ef2SHeiner Kallweit struct clk *clk; 42551c5d844SKevin Hilman char name[16]; 42651c5d844SKevin Hilman 42751c5d844SKevin Hilman snprintf(name, sizeof(name), "clkin%d", i); 428e9883ef2SHeiner Kallweit clk = devm_clk_get(host->dev, name); 429e9883ef2SHeiner Kallweit if (IS_ERR(clk)) { 430e9883ef2SHeiner Kallweit if (clk != ERR_PTR(-EPROBE_DEFER)) 43151c5d844SKevin Hilman dev_err(host->dev, "Missing clock %s\n", name); 432e9883ef2SHeiner Kallweit return PTR_ERR(clk); 43351c5d844SKevin Hilman } 43451c5d844SKevin Hilman 435e9883ef2SHeiner Kallweit mux_parent_names[i] = __clk_get_name(clk); 43651c5d844SKevin Hilman } 43751c5d844SKevin Hilman 43851c5d844SKevin Hilman /* create the mux */ 439bd911ec4SJerome Brunet mux = devm_kzalloc(host->dev, sizeof(*mux), GFP_KERNEL); 440bd911ec4SJerome Brunet if (!mux) 441bd911ec4SJerome Brunet return -ENOMEM; 442bd911ec4SJerome Brunet 44351c5d844SKevin Hilman snprintf(clk_name, sizeof(clk_name), "%s#mux", dev_name(host->dev)); 44451c5d844SKevin Hilman init.name = clk_name; 44551c5d844SKevin Hilman init.ops = &clk_mux_ops; 44651c5d844SKevin Hilman init.flags = 0; 44751c5d844SKevin Hilman init.parent_names = mux_parent_names; 4487558c113SHeiner Kallweit init.num_parents = MUX_CLK_NUM_PARENTS; 44951c5d844SKevin Hilman 450bd911ec4SJerome Brunet mux->reg = host->regs + SD_EMMC_CLOCK; 451795c633fSJerome Brunet mux->shift = __ffs(CLK_SRC_MASK); 452bd911ec4SJerome Brunet mux->mask = CLK_SRC_MASK >> mux->shift; 453bd911ec4SJerome Brunet mux->hw.init = &init; 454bd911ec4SJerome Brunet 4555e6f75f4SJerome Brunet host->mux_clk = devm_clk_register(host->dev, &mux->hw); 4565e6f75f4SJerome Brunet if (WARN_ON(IS_ERR(host->mux_clk))) 4575e6f75f4SJerome Brunet return PTR_ERR(host->mux_clk); 45851c5d844SKevin Hilman 45951c5d844SKevin Hilman /* create the divider */ 460bd911ec4SJerome Brunet div = devm_kzalloc(host->dev, sizeof(*div), GFP_KERNEL); 461bd911ec4SJerome Brunet if (!div) 462bd911ec4SJerome Brunet return -ENOMEM; 463bd911ec4SJerome Brunet 46451c5d844SKevin Hilman snprintf(clk_name, sizeof(clk_name), "%s#div", dev_name(host->dev)); 4657b9ebad3SHeiner Kallweit init.name = clk_name; 46651c5d844SKevin Hilman init.ops = &clk_divider_ops; 46751c5d844SKevin Hilman init.flags = CLK_SET_RATE_PARENT; 4685e6f75f4SJerome Brunet clk_parent[0] = __clk_get_name(host->mux_clk); 469bd911ec4SJerome Brunet init.parent_names = clk_parent; 470bd911ec4SJerome Brunet init.num_parents = 1; 47151c5d844SKevin Hilman 472bd911ec4SJerome Brunet div->reg = host->regs + SD_EMMC_CLOCK; 473795c633fSJerome Brunet div->shift = __ffs(CLK_DIV_MASK); 474bd911ec4SJerome Brunet div->width = __builtin_popcountl(CLK_DIV_MASK); 475bd911ec4SJerome Brunet div->hw.init = &init; 476ca3dcd3fSJerome Brunet div->flags = CLK_DIVIDER_ONE_BASED; 47751c5d844SKevin Hilman 4785e6f75f4SJerome Brunet host->mmc_clk = devm_clk_register(host->dev, &div->hw); 4795e6f75f4SJerome Brunet if (WARN_ON(IS_ERR(host->mmc_clk))) 480bd911ec4SJerome Brunet return PTR_ERR(host->mmc_clk); 48151c5d844SKevin Hilman 482bd911ec4SJerome Brunet /* init SD_EMMC_CLOCK to sane defaults w/min clock rate */ 483bd911ec4SJerome Brunet host->mmc->f_min = clk_round_rate(host->mmc_clk, 400000); 484bd911ec4SJerome Brunet ret = clk_set_rate(host->mmc_clk, host->mmc->f_min); 485a4c38c8dSUlf Hansson if (ret) 486a4c38c8dSUlf Hansson return ret; 48751c5d844SKevin Hilman 488bd911ec4SJerome Brunet return clk_prepare_enable(host->mmc_clk); 48951c5d844SKevin Hilman } 49051c5d844SKevin Hilman 491dc38ac81SJerome Brunet static int meson_mmc_prepare_ios_clock(struct meson_host *host, 492dc38ac81SJerome Brunet struct mmc_ios *ios) 493dc38ac81SJerome Brunet { 494dc38ac81SJerome Brunet bool ddr; 495dc38ac81SJerome Brunet 496dc38ac81SJerome Brunet switch (ios->timing) { 497dc38ac81SJerome Brunet case MMC_TIMING_MMC_DDR52: 498dc38ac81SJerome Brunet case MMC_TIMING_UHS_DDR50: 499dc38ac81SJerome Brunet ddr = true; 500dc38ac81SJerome Brunet break; 501dc38ac81SJerome Brunet 502dc38ac81SJerome Brunet default: 503dc38ac81SJerome Brunet ddr = false; 504dc38ac81SJerome Brunet break; 505dc38ac81SJerome Brunet } 506dc38ac81SJerome Brunet 507dc38ac81SJerome Brunet return meson_mmc_clk_set(host, ios->clock, ddr); 508dc38ac81SJerome Brunet } 509dc38ac81SJerome Brunet 51051c5d844SKevin Hilman static void meson_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 51151c5d844SKevin Hilman { 51251c5d844SKevin Hilman struct meson_host *host = mmc_priv(mmc); 513c36cf125SJerome Brunet u32 bus_width, val; 514c36cf125SJerome Brunet int err; 51551c5d844SKevin Hilman 51651c5d844SKevin Hilman /* 51751c5d844SKevin Hilman * GPIO regulator, only controls switching between 1v8 and 51851c5d844SKevin Hilman * 3v3, doesn't support MMC_POWER_OFF, MMC_POWER_ON. 51951c5d844SKevin Hilman */ 52051c5d844SKevin Hilman switch (ios->power_mode) { 52151c5d844SKevin Hilman case MMC_POWER_OFF: 52251c5d844SKevin Hilman if (!IS_ERR(mmc->supply.vmmc)) 52351c5d844SKevin Hilman mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0); 52451c5d844SKevin Hilman 52551c5d844SKevin Hilman if (!IS_ERR(mmc->supply.vqmmc) && host->vqmmc_enabled) { 52651c5d844SKevin Hilman regulator_disable(mmc->supply.vqmmc); 52751c5d844SKevin Hilman host->vqmmc_enabled = false; 52851c5d844SKevin Hilman } 52951c5d844SKevin Hilman 53051c5d844SKevin Hilman break; 53151c5d844SKevin Hilman 53251c5d844SKevin Hilman case MMC_POWER_UP: 53351c5d844SKevin Hilman if (!IS_ERR(mmc->supply.vmmc)) 53451c5d844SKevin Hilman mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd); 5353e2b0af4SJerome Brunet 53671e3e00cSAndreas Fenkart /* disable signal resampling */ 53771e3e00cSAndreas Fenkart writel(0, host->regs + host->data->adjust); 53871e3e00cSAndreas Fenkart 53951c5d844SKevin Hilman break; 54051c5d844SKevin Hilman 54151c5d844SKevin Hilman case MMC_POWER_ON: 54251c5d844SKevin Hilman if (!IS_ERR(mmc->supply.vqmmc) && !host->vqmmc_enabled) { 54351c5d844SKevin Hilman int ret = regulator_enable(mmc->supply.vqmmc); 54451c5d844SKevin Hilman 54551c5d844SKevin Hilman if (ret < 0) 546c36cf125SJerome Brunet dev_err(host->dev, 54751c5d844SKevin Hilman "failed to enable vqmmc regulator\n"); 54851c5d844SKevin Hilman else 54951c5d844SKevin Hilman host->vqmmc_enabled = true; 55051c5d844SKevin Hilman } 55151c5d844SKevin Hilman 55251c5d844SKevin Hilman break; 55351c5d844SKevin Hilman } 55451c5d844SKevin Hilman 55551c5d844SKevin Hilman /* Bus width */ 55651c5d844SKevin Hilman switch (ios->bus_width) { 55751c5d844SKevin Hilman case MMC_BUS_WIDTH_1: 55851c5d844SKevin Hilman bus_width = CFG_BUS_WIDTH_1; 55951c5d844SKevin Hilman break; 56051c5d844SKevin Hilman case MMC_BUS_WIDTH_4: 56151c5d844SKevin Hilman bus_width = CFG_BUS_WIDTH_4; 56251c5d844SKevin Hilman break; 56351c5d844SKevin Hilman case MMC_BUS_WIDTH_8: 56451c5d844SKevin Hilman bus_width = CFG_BUS_WIDTH_8; 56551c5d844SKevin Hilman break; 56651c5d844SKevin Hilman default: 56751c5d844SKevin Hilman dev_err(host->dev, "Invalid ios->bus_width: %u. Setting to 4.\n", 56851c5d844SKevin Hilman ios->bus_width); 56951c5d844SKevin Hilman bus_width = CFG_BUS_WIDTH_4; 57051c5d844SKevin Hilman } 57151c5d844SKevin Hilman 57251c5d844SKevin Hilman val = readl(host->regs + SD_EMMC_CFG); 5731231e7ebSHeiner Kallweit val &= ~CFG_BUS_WIDTH_MASK; 5741231e7ebSHeiner Kallweit val |= FIELD_PREP(CFG_BUS_WIDTH_MASK, bus_width); 575dc38ac81SJerome Brunet writel(val, host->regs + SD_EMMC_CFG); 57651c5d844SKevin Hilman 577dc38ac81SJerome Brunet err = meson_mmc_prepare_ios_clock(host, ios); 578c36cf125SJerome Brunet if (err) 579c36cf125SJerome Brunet dev_err(host->dev, "Failed to set clock: %d\n,", err); 580c36cf125SJerome Brunet 581c36cf125SJerome Brunet dev_dbg(host->dev, "SD_EMMC_CFG: 0x%08x\n", val); 582c01d1219SHeiner Kallweit } 58351c5d844SKevin Hilman 5843d6c991bSHeiner Kallweit static void meson_mmc_request_done(struct mmc_host *mmc, 5853d6c991bSHeiner Kallweit struct mmc_request *mrq) 58651c5d844SKevin Hilman { 58751c5d844SKevin Hilman struct meson_host *host = mmc_priv(mmc); 58851c5d844SKevin Hilman 58951c5d844SKevin Hilman host->cmd = NULL; 59051c5d844SKevin Hilman mmc_request_done(host->mmc, mrq); 59151c5d844SKevin Hilman } 59251c5d844SKevin Hilman 5933d03f6a9SHeiner Kallweit static void meson_mmc_set_blksz(struct mmc_host *mmc, unsigned int blksz) 5943d03f6a9SHeiner Kallweit { 5953d03f6a9SHeiner Kallweit struct meson_host *host = mmc_priv(mmc); 5963d03f6a9SHeiner Kallweit u32 cfg, blksz_old; 5973d03f6a9SHeiner Kallweit 5983d03f6a9SHeiner Kallweit cfg = readl(host->regs + SD_EMMC_CFG); 5993d03f6a9SHeiner Kallweit blksz_old = FIELD_GET(CFG_BLK_LEN_MASK, cfg); 6003d03f6a9SHeiner Kallweit 6013d03f6a9SHeiner Kallweit if (!is_power_of_2(blksz)) 6023d03f6a9SHeiner Kallweit dev_err(host->dev, "blksz %u is not a power of 2\n", blksz); 6033d03f6a9SHeiner Kallweit 6043d03f6a9SHeiner Kallweit blksz = ilog2(blksz); 6053d03f6a9SHeiner Kallweit 6063d03f6a9SHeiner Kallweit /* check if block-size matches, if not update */ 6073d03f6a9SHeiner Kallweit if (blksz == blksz_old) 6083d03f6a9SHeiner Kallweit return; 6093d03f6a9SHeiner Kallweit 6103d03f6a9SHeiner Kallweit dev_dbg(host->dev, "%s: update blk_len %d -> %d\n", __func__, 6113d03f6a9SHeiner Kallweit blksz_old, blksz); 6123d03f6a9SHeiner Kallweit 6133d03f6a9SHeiner Kallweit cfg &= ~CFG_BLK_LEN_MASK; 6143d03f6a9SHeiner Kallweit cfg |= FIELD_PREP(CFG_BLK_LEN_MASK, blksz); 6153d03f6a9SHeiner Kallweit writel(cfg, host->regs + SD_EMMC_CFG); 6163d03f6a9SHeiner Kallweit } 6173d03f6a9SHeiner Kallweit 61875c7fd96SHeiner Kallweit static void meson_mmc_set_response_bits(struct mmc_command *cmd, u32 *cmd_cfg) 61975c7fd96SHeiner Kallweit { 62075c7fd96SHeiner Kallweit if (cmd->flags & MMC_RSP_PRESENT) { 62175c7fd96SHeiner Kallweit if (cmd->flags & MMC_RSP_136) 62275c7fd96SHeiner Kallweit *cmd_cfg |= CMD_CFG_RESP_128; 62375c7fd96SHeiner Kallweit *cmd_cfg |= CMD_CFG_RESP_NUM; 62475c7fd96SHeiner Kallweit 62575c7fd96SHeiner Kallweit if (!(cmd->flags & MMC_RSP_CRC)) 62675c7fd96SHeiner Kallweit *cmd_cfg |= CMD_CFG_RESP_NOCRC; 62775c7fd96SHeiner Kallweit 62875c7fd96SHeiner Kallweit if (cmd->flags & MMC_RSP_BUSY) 62975c7fd96SHeiner Kallweit *cmd_cfg |= CMD_CFG_R1B; 63075c7fd96SHeiner Kallweit } else { 63175c7fd96SHeiner Kallweit *cmd_cfg |= CMD_CFG_NO_RESP; 63275c7fd96SHeiner Kallweit } 63375c7fd96SHeiner Kallweit } 63475c7fd96SHeiner Kallweit 63579ed05e3SHeiner Kallweit static void meson_mmc_desc_chain_transfer(struct mmc_host *mmc, u32 cmd_cfg) 63679ed05e3SHeiner Kallweit { 63779ed05e3SHeiner Kallweit struct meson_host *host = mmc_priv(mmc); 63879ed05e3SHeiner Kallweit struct sd_emmc_desc *desc = host->descs; 63979ed05e3SHeiner Kallweit struct mmc_data *data = host->cmd->data; 64079ed05e3SHeiner Kallweit struct scatterlist *sg; 64179ed05e3SHeiner Kallweit u32 start; 64279ed05e3SHeiner Kallweit int i; 64379ed05e3SHeiner Kallweit 64479ed05e3SHeiner Kallweit if (data->flags & MMC_DATA_WRITE) 64579ed05e3SHeiner Kallweit cmd_cfg |= CMD_CFG_DATA_WR; 64679ed05e3SHeiner Kallweit 64779ed05e3SHeiner Kallweit if (data->blocks > 1) { 64879ed05e3SHeiner Kallweit cmd_cfg |= CMD_CFG_BLOCK_MODE; 64979ed05e3SHeiner Kallweit meson_mmc_set_blksz(mmc, data->blksz); 65079ed05e3SHeiner Kallweit } 65179ed05e3SHeiner Kallweit 65279ed05e3SHeiner Kallweit for_each_sg(data->sg, sg, data->sg_count, i) { 65379ed05e3SHeiner Kallweit unsigned int len = sg_dma_len(sg); 65479ed05e3SHeiner Kallweit 65579ed05e3SHeiner Kallweit if (data->blocks > 1) 65679ed05e3SHeiner Kallweit len /= data->blksz; 65779ed05e3SHeiner Kallweit 65879ed05e3SHeiner Kallweit desc[i].cmd_cfg = cmd_cfg; 65979ed05e3SHeiner Kallweit desc[i].cmd_cfg |= FIELD_PREP(CMD_CFG_LENGTH_MASK, len); 66079ed05e3SHeiner Kallweit if (i > 0) 66179ed05e3SHeiner Kallweit desc[i].cmd_cfg |= CMD_CFG_NO_CMD; 66279ed05e3SHeiner Kallweit desc[i].cmd_arg = host->cmd->arg; 66379ed05e3SHeiner Kallweit desc[i].cmd_resp = 0; 66479ed05e3SHeiner Kallweit desc[i].cmd_data = sg_dma_address(sg); 66579ed05e3SHeiner Kallweit } 66679ed05e3SHeiner Kallweit desc[data->sg_count - 1].cmd_cfg |= CMD_CFG_END_OF_CHAIN; 66779ed05e3SHeiner Kallweit 66879ed05e3SHeiner Kallweit dma_wmb(); /* ensure descriptor is written before kicked */ 66979ed05e3SHeiner Kallweit start = host->descs_dma_addr | START_DESC_BUSY; 67079ed05e3SHeiner Kallweit writel(start, host->regs + SD_EMMC_START); 67179ed05e3SHeiner Kallweit } 67279ed05e3SHeiner Kallweit 67351c5d844SKevin Hilman static void meson_mmc_start_cmd(struct mmc_host *mmc, struct mmc_command *cmd) 67451c5d844SKevin Hilman { 67551c5d844SKevin Hilman struct meson_host *host = mmc_priv(mmc); 67600412ddcSHeiner Kallweit struct mmc_data *data = cmd->data; 6773d03f6a9SHeiner Kallweit u32 cmd_cfg = 0, cmd_data = 0; 67851c5d844SKevin Hilman unsigned int xfer_bytes = 0; 67951c5d844SKevin Hilman 68051c5d844SKevin Hilman /* Setup descriptors */ 68151c5d844SKevin Hilman dma_rmb(); 68251c5d844SKevin Hilman 68379ed05e3SHeiner Kallweit host->cmd = cmd; 68479ed05e3SHeiner Kallweit 6851231e7ebSHeiner Kallweit cmd_cfg |= FIELD_PREP(CMD_CFG_CMD_INDEX_MASK, cmd->opcode); 686a322febeSHeiner Kallweit cmd_cfg |= CMD_CFG_OWNER; /* owned by CPU */ 68718f92bc0SJerome Brunet cmd_cfg |= CMD_CFG_ERROR; /* stop in case of error */ 68851c5d844SKevin Hilman 68975c7fd96SHeiner Kallweit meson_mmc_set_response_bits(cmd, &cmd_cfg); 69051c5d844SKevin Hilman 69151c5d844SKevin Hilman /* data? */ 69200412ddcSHeiner Kallweit if (data) { 69379ed05e3SHeiner Kallweit data->bytes_xfered = 0; 694a322febeSHeiner Kallweit cmd_cfg |= CMD_CFG_DATA_IO; 6951231e7ebSHeiner Kallweit cmd_cfg |= FIELD_PREP(CMD_CFG_TIMEOUT_MASK, 6964eee86c3SHeiner Kallweit ilog2(meson_mmc_get_timeout_msecs(data))); 697a744c6feSHeiner Kallweit 69879ed05e3SHeiner Kallweit if (meson_mmc_desc_chain_mode(data)) { 69979ed05e3SHeiner Kallweit meson_mmc_desc_chain_transfer(mmc, cmd_cfg); 70079ed05e3SHeiner Kallweit return; 70179ed05e3SHeiner Kallweit } 70279ed05e3SHeiner Kallweit 70300412ddcSHeiner Kallweit if (data->blocks > 1) { 704a322febeSHeiner Kallweit cmd_cfg |= CMD_CFG_BLOCK_MODE; 7051231e7ebSHeiner Kallweit cmd_cfg |= FIELD_PREP(CMD_CFG_LENGTH_MASK, 7061231e7ebSHeiner Kallweit data->blocks); 7073d03f6a9SHeiner Kallweit meson_mmc_set_blksz(mmc, data->blksz); 70851c5d844SKevin Hilman } else { 7091231e7ebSHeiner Kallweit cmd_cfg |= FIELD_PREP(CMD_CFG_LENGTH_MASK, data->blksz); 71051c5d844SKevin Hilman } 71151c5d844SKevin Hilman 71200412ddcSHeiner Kallweit xfer_bytes = data->blksz * data->blocks; 71300412ddcSHeiner Kallweit if (data->flags & MMC_DATA_WRITE) { 714a322febeSHeiner Kallweit cmd_cfg |= CMD_CFG_DATA_WR; 71551c5d844SKevin Hilman WARN_ON(xfer_bytes > host->bounce_buf_size); 71600412ddcSHeiner Kallweit sg_copy_to_buffer(data->sg, data->sg_len, 71751c5d844SKevin Hilman host->bounce_buf, xfer_bytes); 71851c5d844SKevin Hilman dma_wmb(); 71951c5d844SKevin Hilman } 72051c5d844SKevin Hilman 721a322febeSHeiner Kallweit cmd_data = host->bounce_dma_addr & CMD_DATA_MASK; 72251c5d844SKevin Hilman } else { 7231231e7ebSHeiner Kallweit cmd_cfg |= FIELD_PREP(CMD_CFG_TIMEOUT_MASK, 7241231e7ebSHeiner Kallweit ilog2(SD_EMMC_CMD_TIMEOUT)); 72551c5d844SKevin Hilman } 72651c5d844SKevin Hilman 72751c5d844SKevin Hilman /* Last descriptor */ 728a322febeSHeiner Kallweit cmd_cfg |= CMD_CFG_END_OF_CHAIN; 729a322febeSHeiner Kallweit writel(cmd_cfg, host->regs + SD_EMMC_CMD_CFG); 730a322febeSHeiner Kallweit writel(cmd_data, host->regs + SD_EMMC_CMD_DAT); 731a322febeSHeiner Kallweit writel(0, host->regs + SD_EMMC_CMD_RSP); 73251c5d844SKevin Hilman wmb(); /* ensure descriptor is written before kicked */ 733a322febeSHeiner Kallweit writel(cmd->arg, host->regs + SD_EMMC_CMD_ARG); 73451c5d844SKevin Hilman } 73551c5d844SKevin Hilman 73651c5d844SKevin Hilman static void meson_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq) 73751c5d844SKevin Hilman { 73851c5d844SKevin Hilman struct meson_host *host = mmc_priv(mmc); 73979ed05e3SHeiner Kallweit bool needs_pre_post_req = mrq->data && 74079ed05e3SHeiner Kallweit !(mrq->data->host_cookie & SD_EMMC_PRE_REQ_DONE); 74179ed05e3SHeiner Kallweit 74279ed05e3SHeiner Kallweit if (needs_pre_post_req) { 74379ed05e3SHeiner Kallweit meson_mmc_get_transfer_mode(mmc, mrq); 74479ed05e3SHeiner Kallweit if (!meson_mmc_desc_chain_mode(mrq->data)) 74579ed05e3SHeiner Kallweit needs_pre_post_req = false; 74679ed05e3SHeiner Kallweit } 74779ed05e3SHeiner Kallweit 74879ed05e3SHeiner Kallweit if (needs_pre_post_req) 74979ed05e3SHeiner Kallweit meson_mmc_pre_req(mmc, mrq); 75051c5d844SKevin Hilman 75151c5d844SKevin Hilman /* Stop execution */ 75251c5d844SKevin Hilman writel(0, host->regs + SD_EMMC_START); 75351c5d844SKevin Hilman 75479ed05e3SHeiner Kallweit meson_mmc_start_cmd(mmc, mrq->sbc ?: mrq->cmd); 75579ed05e3SHeiner Kallweit 75679ed05e3SHeiner Kallweit if (needs_pre_post_req) 75779ed05e3SHeiner Kallweit meson_mmc_post_req(mmc, mrq, 0); 75851c5d844SKevin Hilman } 75951c5d844SKevin Hilman 7603d6c991bSHeiner Kallweit static void meson_mmc_read_resp(struct mmc_host *mmc, struct mmc_command *cmd) 76151c5d844SKevin Hilman { 76251c5d844SKevin Hilman struct meson_host *host = mmc_priv(mmc); 76351c5d844SKevin Hilman 76451c5d844SKevin Hilman if (cmd->flags & MMC_RSP_136) { 76551c5d844SKevin Hilman cmd->resp[0] = readl(host->regs + SD_EMMC_CMD_RSP3); 76651c5d844SKevin Hilman cmd->resp[1] = readl(host->regs + SD_EMMC_CMD_RSP2); 76751c5d844SKevin Hilman cmd->resp[2] = readl(host->regs + SD_EMMC_CMD_RSP1); 76851c5d844SKevin Hilman cmd->resp[3] = readl(host->regs + SD_EMMC_CMD_RSP); 76951c5d844SKevin Hilman } else if (cmd->flags & MMC_RSP_PRESENT) { 77051c5d844SKevin Hilman cmd->resp[0] = readl(host->regs + SD_EMMC_CMD_RSP); 77151c5d844SKevin Hilman } 77251c5d844SKevin Hilman } 77351c5d844SKevin Hilman 77451c5d844SKevin Hilman static irqreturn_t meson_mmc_irq(int irq, void *dev_id) 77551c5d844SKevin Hilman { 77651c5d844SKevin Hilman struct meson_host *host = dev_id; 77719a91dd4SHeinrich Schuchardt struct mmc_command *cmd; 7782c8d96a4SHeiner Kallweit struct mmc_data *data; 77951c5d844SKevin Hilman u32 irq_en, status, raw_status; 78074858655SJerome Brunet irqreturn_t ret = IRQ_NONE; 78151c5d844SKevin Hilman 78218f92bc0SJerome Brunet irq_en = readl(host->regs + SD_EMMC_IRQ_EN); 78318f92bc0SJerome Brunet raw_status = readl(host->regs + SD_EMMC_STATUS); 78418f92bc0SJerome Brunet status = raw_status & irq_en; 78518f92bc0SJerome Brunet 78618f92bc0SJerome Brunet if (!status) { 78718f92bc0SJerome Brunet dev_dbg(host->dev, 78818f92bc0SJerome Brunet "Unexpected IRQ! irq_en 0x%08x - status 0x%08x\n", 78918f92bc0SJerome Brunet irq_en, raw_status); 79018f92bc0SJerome Brunet return IRQ_NONE; 79118f92bc0SJerome Brunet } 79218f92bc0SJerome Brunet 79374858655SJerome Brunet if (WARN_ON(!host) || WARN_ON(!host->cmd)) 79451c5d844SKevin Hilman return IRQ_NONE; 79551c5d844SKevin Hilman 79674858655SJerome Brunet cmd = host->cmd; 79774858655SJerome Brunet data = cmd->data; 79874858655SJerome Brunet cmd->error = 0; 79974858655SJerome Brunet if (status & IRQ_CRC_ERR) { 80074858655SJerome Brunet dev_dbg(host->dev, "CRC Error - status 0x%08x\n", status); 80174858655SJerome Brunet cmd->error = -EILSEQ; 80218f92bc0SJerome Brunet ret = IRQ_WAKE_THREAD; 80374858655SJerome Brunet goto out; 80474858655SJerome Brunet } 80574858655SJerome Brunet 80674858655SJerome Brunet if (status & IRQ_TIMEOUTS) { 80774858655SJerome Brunet dev_dbg(host->dev, "Timeout - status 0x%08x\n", status); 80874858655SJerome Brunet cmd->error = -ETIMEDOUT; 80918f92bc0SJerome Brunet ret = IRQ_WAKE_THREAD; 81051c5d844SKevin Hilman goto out; 81151c5d844SKevin Hilman } 81251c5d844SKevin Hilman 8131f8066d9SHeiner Kallweit meson_mmc_read_resp(host->mmc, cmd); 8141f8066d9SHeiner Kallweit 81574858655SJerome Brunet if (status & IRQ_SDIO) { 81674858655SJerome Brunet dev_dbg(host->dev, "IRQ: SDIO TODO.\n"); 81774858655SJerome Brunet ret = IRQ_HANDLED; 81851c5d844SKevin Hilman } 81951c5d844SKevin Hilman 8202c8d96a4SHeiner Kallweit if (status & (IRQ_END_OF_CHAIN | IRQ_RESP_STATUS)) { 8212c8d96a4SHeiner Kallweit if (data && !cmd->error) 8222c8d96a4SHeiner Kallweit data->bytes_xfered = data->blksz * data->blocks; 82379ed05e3SHeiner Kallweit if (meson_mmc_bounce_buf_read(data) || 82479ed05e3SHeiner Kallweit meson_mmc_get_next_command(cmd)) 82551c5d844SKevin Hilman ret = IRQ_WAKE_THREAD; 82674858655SJerome Brunet else 82774858655SJerome Brunet ret = IRQ_HANDLED; 82851c5d844SKevin Hilman } 82951c5d844SKevin Hilman 83051c5d844SKevin Hilman out: 83118f92bc0SJerome Brunet if (cmd->error) { 83218f92bc0SJerome Brunet /* Stop desc in case of errors */ 83318f92bc0SJerome Brunet u32 start = readl(host->regs + SD_EMMC_START); 83418f92bc0SJerome Brunet 83518f92bc0SJerome Brunet start &= ~START_DESC_BUSY; 83618f92bc0SJerome Brunet writel(start, host->regs + SD_EMMC_START); 83718f92bc0SJerome Brunet } 83818f92bc0SJerome Brunet 8391f8066d9SHeiner Kallweit if (ret == IRQ_HANDLED) 84051c5d844SKevin Hilman meson_mmc_request_done(host->mmc, cmd->mrq); 84151c5d844SKevin Hilman 8429c5fdb07SJerome Brunet /* ack all raised interrupts */ 8439c5fdb07SJerome Brunet writel(status, host->regs + SD_EMMC_STATUS); 8449c5fdb07SJerome Brunet 84551c5d844SKevin Hilman return ret; 84651c5d844SKevin Hilman } 84751c5d844SKevin Hilman 84818f92bc0SJerome Brunet static int meson_mmc_wait_desc_stop(struct meson_host *host) 84918f92bc0SJerome Brunet { 85018f92bc0SJerome Brunet u32 status; 85118f92bc0SJerome Brunet 85218f92bc0SJerome Brunet /* 85318f92bc0SJerome Brunet * It may sometimes take a while for it to actually halt. Here, we 85418f92bc0SJerome Brunet * are giving it 5ms to comply 85518f92bc0SJerome Brunet * 85618f92bc0SJerome Brunet * If we don't confirm the descriptor is stopped, it might raise new 85718f92bc0SJerome Brunet * IRQs after we have called mmc_request_done() which is bad. 85818f92bc0SJerome Brunet */ 85918f92bc0SJerome Brunet 86098849da6SJerome Brunet return readl_poll_timeout(host->regs + SD_EMMC_STATUS, status, 86198849da6SJerome Brunet !(status & (STATUS_BUSY | STATUS_DESC_BUSY)), 86298849da6SJerome Brunet 100, 5000); 86318f92bc0SJerome Brunet } 86418f92bc0SJerome Brunet 86551c5d844SKevin Hilman static irqreturn_t meson_mmc_irq_thread(int irq, void *dev_id) 86651c5d844SKevin Hilman { 86751c5d844SKevin Hilman struct meson_host *host = dev_id; 868e5e4a3ebSHeiner Kallweit struct mmc_command *next_cmd, *cmd = host->cmd; 86951c5d844SKevin Hilman struct mmc_data *data; 87051c5d844SKevin Hilman unsigned int xfer_bytes; 87151c5d844SKevin Hilman 87251c5d844SKevin Hilman if (WARN_ON(!cmd)) 87319a91dd4SHeinrich Schuchardt return IRQ_NONE; 87451c5d844SKevin Hilman 87518f92bc0SJerome Brunet if (cmd->error) { 87618f92bc0SJerome Brunet meson_mmc_wait_desc_stop(host); 87718f92bc0SJerome Brunet meson_mmc_request_done(host->mmc, cmd->mrq); 87818f92bc0SJerome Brunet 87918f92bc0SJerome Brunet return IRQ_HANDLED; 88018f92bc0SJerome Brunet } 88118f92bc0SJerome Brunet 88251c5d844SKevin Hilman data = cmd->data; 88379ed05e3SHeiner Kallweit if (meson_mmc_bounce_buf_read(data)) { 88451c5d844SKevin Hilman xfer_bytes = data->blksz * data->blocks; 88551c5d844SKevin Hilman WARN_ON(xfer_bytes > host->bounce_buf_size); 88651c5d844SKevin Hilman sg_copy_from_buffer(data->sg, data->sg_len, 88751c5d844SKevin Hilman host->bounce_buf, xfer_bytes); 88851c5d844SKevin Hilman } 88951c5d844SKevin Hilman 890e5e4a3ebSHeiner Kallweit next_cmd = meson_mmc_get_next_command(cmd); 891e5e4a3ebSHeiner Kallweit if (next_cmd) 892e5e4a3ebSHeiner Kallweit meson_mmc_start_cmd(host->mmc, next_cmd); 89351c5d844SKevin Hilman else 894e5e4a3ebSHeiner Kallweit meson_mmc_request_done(host->mmc, cmd->mrq); 89551c5d844SKevin Hilman 896690f90b6SHeiner Kallweit return IRQ_HANDLED; 89751c5d844SKevin Hilman } 89851c5d844SKevin Hilman 89951c5d844SKevin Hilman /* 90051c5d844SKevin Hilman * NOTE: we only need this until the GPIO/pinctrl driver can handle 90151c5d844SKevin Hilman * interrupts. For now, the MMC core will use this for polling. 90251c5d844SKevin Hilman */ 90351c5d844SKevin Hilman static int meson_mmc_get_cd(struct mmc_host *mmc) 90451c5d844SKevin Hilman { 90551c5d844SKevin Hilman int status = mmc_gpio_get_cd(mmc); 90651c5d844SKevin Hilman 90751c5d844SKevin Hilman if (status == -ENOSYS) 90851c5d844SKevin Hilman return 1; /* assume present */ 90951c5d844SKevin Hilman 91051c5d844SKevin Hilman return status; 91151c5d844SKevin Hilman } 91251c5d844SKevin Hilman 913c01d1219SHeiner Kallweit static void meson_mmc_cfg_init(struct meson_host *host) 914c01d1219SHeiner Kallweit { 91571e3e00cSAndreas Fenkart u32 cfg = 0; 916c01d1219SHeiner Kallweit 9171231e7ebSHeiner Kallweit cfg |= FIELD_PREP(CFG_RESP_TIMEOUT_MASK, 9181231e7ebSHeiner Kallweit ilog2(SD_EMMC_CFG_RESP_TIMEOUT)); 9191231e7ebSHeiner Kallweit cfg |= FIELD_PREP(CFG_RC_CC_MASK, ilog2(SD_EMMC_CFG_CMD_GAP)); 9201231e7ebSHeiner Kallweit cfg |= FIELD_PREP(CFG_BLK_LEN_MASK, ilog2(SD_EMMC_CFG_BLK_SIZE)); 921c01d1219SHeiner Kallweit 92218f92bc0SJerome Brunet /* abort chain on R/W errors */ 92318f92bc0SJerome Brunet cfg |= CFG_ERR_ABORT; 92418f92bc0SJerome Brunet 925c01d1219SHeiner Kallweit writel(cfg, host->regs + SD_EMMC_CFG); 926c01d1219SHeiner Kallweit } 927c01d1219SHeiner Kallweit 928186cd8b7SJerome Brunet static int meson_mmc_card_busy(struct mmc_host *mmc) 929186cd8b7SJerome Brunet { 930186cd8b7SJerome Brunet struct meson_host *host = mmc_priv(mmc); 931186cd8b7SJerome Brunet u32 regval; 932186cd8b7SJerome Brunet 933186cd8b7SJerome Brunet regval = readl(host->regs + SD_EMMC_STATUS); 934186cd8b7SJerome Brunet 935186cd8b7SJerome Brunet /* We are only interrested in lines 0 to 3, so mask the other ones */ 936186cd8b7SJerome Brunet return !(FIELD_GET(STATUS_DATI, regval) & 0xf); 937186cd8b7SJerome Brunet } 938186cd8b7SJerome Brunet 939b1231b2fSJerome Brunet static int meson_mmc_voltage_switch(struct mmc_host *mmc, struct mmc_ios *ios) 940b1231b2fSJerome Brunet { 941b1231b2fSJerome Brunet /* vqmmc regulator is available */ 942b1231b2fSJerome Brunet if (!IS_ERR(mmc->supply.vqmmc)) { 943b1231b2fSJerome Brunet /* 944b1231b2fSJerome Brunet * The usual amlogic setup uses a GPIO to switch from one 945b1231b2fSJerome Brunet * regulator to the other. While the voltage ramp up is 946b1231b2fSJerome Brunet * pretty fast, care must be taken when switching from 3.3v 947b1231b2fSJerome Brunet * to 1.8v. Please make sure the regulator framework is aware 948b1231b2fSJerome Brunet * of your own regulator constraints 949b1231b2fSJerome Brunet */ 950b1231b2fSJerome Brunet return mmc_regulator_set_vqmmc(mmc, ios); 951b1231b2fSJerome Brunet } 952b1231b2fSJerome Brunet 953b1231b2fSJerome Brunet /* no vqmmc regulator, assume fixed regulator at 3/3.3V */ 954b1231b2fSJerome Brunet if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330) 955b1231b2fSJerome Brunet return 0; 956b1231b2fSJerome Brunet 957b1231b2fSJerome Brunet return -EINVAL; 958b1231b2fSJerome Brunet } 959b1231b2fSJerome Brunet 96051c5d844SKevin Hilman static const struct mmc_host_ops meson_mmc_ops = { 96151c5d844SKevin Hilman .request = meson_mmc_request, 96251c5d844SKevin Hilman .set_ios = meson_mmc_set_ios, 96351c5d844SKevin Hilman .get_cd = meson_mmc_get_cd, 96479ed05e3SHeiner Kallweit .pre_req = meson_mmc_pre_req, 96579ed05e3SHeiner Kallweit .post_req = meson_mmc_post_req, 966186cd8b7SJerome Brunet .card_busy = meson_mmc_card_busy, 967b1231b2fSJerome Brunet .start_signal_voltage_switch = meson_mmc_voltage_switch, 96851c5d844SKevin Hilman }; 96951c5d844SKevin Hilman 97051c5d844SKevin Hilman static int meson_mmc_probe(struct platform_device *pdev) 97151c5d844SKevin Hilman { 97251c5d844SKevin Hilman struct resource *res; 97351c5d844SKevin Hilman struct meson_host *host; 97451c5d844SKevin Hilman struct mmc_host *mmc; 975bb364890SRemi Pommarel int ret; 97651c5d844SKevin Hilman 97751c5d844SKevin Hilman mmc = mmc_alloc_host(sizeof(struct meson_host), &pdev->dev); 97851c5d844SKevin Hilman if (!mmc) 97951c5d844SKevin Hilman return -ENOMEM; 98051c5d844SKevin Hilman host = mmc_priv(mmc); 98151c5d844SKevin Hilman host->mmc = mmc; 98251c5d844SKevin Hilman host->dev = &pdev->dev; 98351c5d844SKevin Hilman dev_set_drvdata(&pdev->dev, host); 98451c5d844SKevin Hilman 98551c5d844SKevin Hilman /* Get regulators and the supported OCR mask */ 98651c5d844SKevin Hilman host->vqmmc_enabled = false; 98751c5d844SKevin Hilman ret = mmc_regulator_get_supply(mmc); 988fa54f3e3SWolfram Sang if (ret) 98951c5d844SKevin Hilman goto free_host; 99051c5d844SKevin Hilman 99151c5d844SKevin Hilman ret = mmc_of_parse(mmc); 99251c5d844SKevin Hilman if (ret) { 993dc012058SKevin Hilman if (ret != -EPROBE_DEFER) 99451c5d844SKevin Hilman dev_warn(&pdev->dev, "error parsing DT: %d\n", ret); 99551c5d844SKevin Hilman goto free_host; 99651c5d844SKevin Hilman } 99751c5d844SKevin Hilman 998df069815SNan Li host->data = (struct meson_mmc_data *) 999df069815SNan Li of_device_get_match_data(&pdev->dev); 1000df069815SNan Li if (!host->data) { 1001df069815SNan Li ret = -EINVAL; 1002df069815SNan Li goto free_host; 1003df069815SNan Li } 1004df069815SNan Li 100519c6beaaSJerome Brunet ret = device_reset_optional(&pdev->dev); 100619c6beaaSJerome Brunet if (ret) { 100719c6beaaSJerome Brunet if (ret != -EPROBE_DEFER) 100819c6beaaSJerome Brunet dev_err(&pdev->dev, "device reset failed: %d\n", ret); 100919c6beaaSJerome Brunet 101019c6beaaSJerome Brunet return ret; 101119c6beaaSJerome Brunet } 101219c6beaaSJerome Brunet 101351c5d844SKevin Hilman res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 101451c5d844SKevin Hilman host->regs = devm_ioremap_resource(&pdev->dev, res); 101551c5d844SKevin Hilman if (IS_ERR(host->regs)) { 101651c5d844SKevin Hilman ret = PTR_ERR(host->regs); 101751c5d844SKevin Hilman goto free_host; 101851c5d844SKevin Hilman } 101951c5d844SKevin Hilman 1020bb364890SRemi Pommarel host->irq = platform_get_irq(pdev, 0); 1021bb364890SRemi Pommarel if (host->irq <= 0) { 102251c5d844SKevin Hilman dev_err(&pdev->dev, "failed to get interrupt resource.\n"); 102351c5d844SKevin Hilman ret = -EINVAL; 102451c5d844SKevin Hilman goto free_host; 102551c5d844SKevin Hilman } 102651c5d844SKevin Hilman 10271e03331dSJerome Brunet host->pinctrl = devm_pinctrl_get(&pdev->dev); 10281e03331dSJerome Brunet if (IS_ERR(host->pinctrl)) { 10291e03331dSJerome Brunet ret = PTR_ERR(host->pinctrl); 10301e03331dSJerome Brunet goto free_host; 10311e03331dSJerome Brunet } 10321e03331dSJerome Brunet 10331e03331dSJerome Brunet host->pins_default = pinctrl_lookup_state(host->pinctrl, 10341e03331dSJerome Brunet PINCTRL_STATE_DEFAULT); 10351e03331dSJerome Brunet if (IS_ERR(host->pins_default)) { 10361e03331dSJerome Brunet ret = PTR_ERR(host->pins_default); 10371e03331dSJerome Brunet goto free_host; 10381e03331dSJerome Brunet } 10391e03331dSJerome Brunet 10401e03331dSJerome Brunet host->pins_clk_gate = pinctrl_lookup_state(host->pinctrl, 10411e03331dSJerome Brunet "clk-gate"); 10421e03331dSJerome Brunet if (IS_ERR(host->pins_clk_gate)) { 10431e03331dSJerome Brunet dev_warn(&pdev->dev, 10441e03331dSJerome Brunet "can't get clk-gate pinctrl, using clk_stop bit\n"); 10451e03331dSJerome Brunet host->pins_clk_gate = NULL; 10461e03331dSJerome Brunet } 10471e03331dSJerome Brunet 104851c5d844SKevin Hilman host->core_clk = devm_clk_get(&pdev->dev, "core"); 104951c5d844SKevin Hilman if (IS_ERR(host->core_clk)) { 105051c5d844SKevin Hilman ret = PTR_ERR(host->core_clk); 105151c5d844SKevin Hilman goto free_host; 105251c5d844SKevin Hilman } 105351c5d844SKevin Hilman 105451c5d844SKevin Hilman ret = clk_prepare_enable(host->core_clk); 105551c5d844SKevin Hilman if (ret) 105651c5d844SKevin Hilman goto free_host; 105751c5d844SKevin Hilman 105851c5d844SKevin Hilman ret = meson_mmc_clk_init(host); 105951c5d844SKevin Hilman if (ret) 1060ce473d5bSMichał Zegan goto err_core_clk; 106151c5d844SKevin Hilman 10623c39e2caSJerome Brunet /* set config to sane default */ 10633c39e2caSJerome Brunet meson_mmc_cfg_init(host); 10643c39e2caSJerome Brunet 106551c5d844SKevin Hilman /* Stop execution */ 106651c5d844SKevin Hilman writel(0, host->regs + SD_EMMC_START); 106751c5d844SKevin Hilman 106874858655SJerome Brunet /* clear, ack and enable interrupts */ 106951c5d844SKevin Hilman writel(0, host->regs + SD_EMMC_IRQ_EN); 107074858655SJerome Brunet writel(IRQ_CRC_ERR | IRQ_TIMEOUTS | IRQ_END_OF_CHAIN, 107174858655SJerome Brunet host->regs + SD_EMMC_STATUS); 107274858655SJerome Brunet writel(IRQ_CRC_ERR | IRQ_TIMEOUTS | IRQ_END_OF_CHAIN, 107374858655SJerome Brunet host->regs + SD_EMMC_IRQ_EN); 107451c5d844SKevin Hilman 1075bb364890SRemi Pommarel ret = request_threaded_irq(host->irq, meson_mmc_irq, 1076eb4d8112SJerome Brunet meson_mmc_irq_thread, IRQF_ONESHOT, 107783e418a8SMartin Blumenstingl dev_name(&pdev->dev), host); 107851c5d844SKevin Hilman if (ret) 1079bd911ec4SJerome Brunet goto err_init_clk; 108051c5d844SKevin Hilman 1081e5e4a3ebSHeiner Kallweit mmc->caps |= MMC_CAP_CMD23; 1082efe0b669SHeiner Kallweit mmc->max_blk_count = CMD_CFG_LENGTH_MASK; 1083efe0b669SHeiner Kallweit mmc->max_req_size = mmc->max_blk_count * mmc->max_blk_size; 108479ed05e3SHeiner Kallweit mmc->max_segs = SD_EMMC_DESC_BUF_LEN / sizeof(struct sd_emmc_desc); 108579ed05e3SHeiner Kallweit mmc->max_seg_size = mmc->max_req_size; 1086efe0b669SHeiner Kallweit 1087d5f758f2SJerome Brunet /* 1088d5f758f2SJerome Brunet * At the moment, we don't know how to reliably enable HS400. 1089d5f758f2SJerome Brunet * From the different datasheets, it is not even clear if this mode 1090d5f758f2SJerome Brunet * is officially supported by any of the SoCs 1091d5f758f2SJerome Brunet */ 1092d5f758f2SJerome Brunet mmc->caps2 &= ~MMC_CAP2_HS400; 1093d5f758f2SJerome Brunet 109451c5d844SKevin Hilman /* data bounce buffer */ 10954136fcb5SHeiner Kallweit host->bounce_buf_size = mmc->max_req_size; 109651c5d844SKevin Hilman host->bounce_buf = 109751c5d844SKevin Hilman dma_alloc_coherent(host->dev, host->bounce_buf_size, 109851c5d844SKevin Hilman &host->bounce_dma_addr, GFP_KERNEL); 109951c5d844SKevin Hilman if (host->bounce_buf == NULL) { 110051c5d844SKevin Hilman dev_err(host->dev, "Unable to map allocate DMA bounce buffer.\n"); 110151c5d844SKevin Hilman ret = -ENOMEM; 1102bb364890SRemi Pommarel goto err_free_irq; 110351c5d844SKevin Hilman } 110451c5d844SKevin Hilman 110579ed05e3SHeiner Kallweit host->descs = dma_alloc_coherent(host->dev, SD_EMMC_DESC_BUF_LEN, 110679ed05e3SHeiner Kallweit &host->descs_dma_addr, GFP_KERNEL); 110779ed05e3SHeiner Kallweit if (!host->descs) { 110879ed05e3SHeiner Kallweit dev_err(host->dev, "Allocating descriptor DMA buffer failed\n"); 110979ed05e3SHeiner Kallweit ret = -ENOMEM; 111079ed05e3SHeiner Kallweit goto err_bounce_buf; 111179ed05e3SHeiner Kallweit } 111279ed05e3SHeiner Kallweit 111351c5d844SKevin Hilman mmc->ops = &meson_mmc_ops; 111451c5d844SKevin Hilman mmc_add_host(mmc); 111551c5d844SKevin Hilman 111651c5d844SKevin Hilman return 0; 111751c5d844SKevin Hilman 111879ed05e3SHeiner Kallweit err_bounce_buf: 111979ed05e3SHeiner Kallweit dma_free_coherent(host->dev, host->bounce_buf_size, 112079ed05e3SHeiner Kallweit host->bounce_buf, host->bounce_dma_addr); 1121bb364890SRemi Pommarel err_free_irq: 1122bb364890SRemi Pommarel free_irq(host->irq, host); 1123bd911ec4SJerome Brunet err_init_clk: 1124bd911ec4SJerome Brunet clk_disable_unprepare(host->mmc_clk); 1125ce473d5bSMichał Zegan err_core_clk: 112651c5d844SKevin Hilman clk_disable_unprepare(host->core_clk); 1127ce473d5bSMichał Zegan free_host: 112851c5d844SKevin Hilman mmc_free_host(mmc); 112951c5d844SKevin Hilman return ret; 113051c5d844SKevin Hilman } 113151c5d844SKevin Hilman 113251c5d844SKevin Hilman static int meson_mmc_remove(struct platform_device *pdev) 113351c5d844SKevin Hilman { 113451c5d844SKevin Hilman struct meson_host *host = dev_get_drvdata(&pdev->dev); 113551c5d844SKevin Hilman 1136a01fc2a2SMichał Zegan mmc_remove_host(host->mmc); 1137a01fc2a2SMichał Zegan 113892763b99SHeiner Kallweit /* disable interrupts */ 113992763b99SHeiner Kallweit writel(0, host->regs + SD_EMMC_IRQ_EN); 1140bb364890SRemi Pommarel free_irq(host->irq, host); 114192763b99SHeiner Kallweit 114279ed05e3SHeiner Kallweit dma_free_coherent(host->dev, SD_EMMC_DESC_BUF_LEN, 114379ed05e3SHeiner Kallweit host->descs, host->descs_dma_addr); 114451c5d844SKevin Hilman dma_free_coherent(host->dev, host->bounce_buf_size, 114551c5d844SKevin Hilman host->bounce_buf, host->bounce_dma_addr); 114651c5d844SKevin Hilman 1147bd911ec4SJerome Brunet clk_disable_unprepare(host->mmc_clk); 114851c5d844SKevin Hilman clk_disable_unprepare(host->core_clk); 114951c5d844SKevin Hilman 115051c5d844SKevin Hilman mmc_free_host(host->mmc); 115151c5d844SKevin Hilman return 0; 115251c5d844SKevin Hilman } 115351c5d844SKevin Hilman 1154df069815SNan Li static const struct meson_mmc_data meson_gx_data = { 1155df069815SNan Li .tx_delay_mask = CLK_V2_TX_DELAY_MASK, 1156df069815SNan Li .rx_delay_mask = CLK_V2_RX_DELAY_MASK, 1157df069815SNan Li .always_on = CLK_V2_ALWAYS_ON, 115871645e65SJerome Brunet .adjust = SD_EMMC_ADJUST, 1159df069815SNan Li }; 1160df069815SNan Li 1161df069815SNan Li static const struct meson_mmc_data meson_axg_data = { 1162df069815SNan Li .tx_delay_mask = CLK_V3_TX_DELAY_MASK, 1163df069815SNan Li .rx_delay_mask = CLK_V3_RX_DELAY_MASK, 1164df069815SNan Li .always_on = CLK_V3_ALWAYS_ON, 116571645e65SJerome Brunet .adjust = SD_EMMC_V3_ADJUST, 1166df069815SNan Li }; 1167df069815SNan Li 116851c5d844SKevin Hilman static const struct of_device_id meson_mmc_of_match[] = { 1169df069815SNan Li { .compatible = "amlogic,meson-gx-mmc", .data = &meson_gx_data }, 1170df069815SNan Li { .compatible = "amlogic,meson-gxbb-mmc", .data = &meson_gx_data }, 1171df069815SNan Li { .compatible = "amlogic,meson-gxl-mmc", .data = &meson_gx_data }, 1172df069815SNan Li { .compatible = "amlogic,meson-gxm-mmc", .data = &meson_gx_data }, 1173df069815SNan Li { .compatible = "amlogic,meson-axg-mmc", .data = &meson_axg_data }, 117451c5d844SKevin Hilman {} 117551c5d844SKevin Hilman }; 117651c5d844SKevin Hilman MODULE_DEVICE_TABLE(of, meson_mmc_of_match); 117751c5d844SKevin Hilman 117851c5d844SKevin Hilman static struct platform_driver meson_mmc_driver = { 117951c5d844SKevin Hilman .probe = meson_mmc_probe, 118051c5d844SKevin Hilman .remove = meson_mmc_remove, 118151c5d844SKevin Hilman .driver = { 118251c5d844SKevin Hilman .name = DRIVER_NAME, 118351c5d844SKevin Hilman .of_match_table = of_match_ptr(meson_mmc_of_match), 118451c5d844SKevin Hilman }, 118551c5d844SKevin Hilman }; 118651c5d844SKevin Hilman 118751c5d844SKevin Hilman module_platform_driver(meson_mmc_driver); 118851c5d844SKevin Hilman 1189e79dc1b4SNan Li MODULE_DESCRIPTION("Amlogic S905*/GX*/AXG SD/eMMC driver"); 119051c5d844SKevin Hilman MODULE_AUTHOR("Kevin Hilman <khilman@baylibre.com>"); 119151c5d844SKevin Hilman MODULE_LICENSE("GPL v2"); 1192