151c5d844SKevin Hilman /* 251c5d844SKevin Hilman * Amlogic SD/eMMC driver for the GX/S905 family SoCs 351c5d844SKevin Hilman * 451c5d844SKevin Hilman * Copyright (c) 2016 BayLibre, SAS. 551c5d844SKevin Hilman * Author: Kevin Hilman <khilman@baylibre.com> 651c5d844SKevin Hilman * 751c5d844SKevin Hilman * This program is free software; you can redistribute it and/or modify 851c5d844SKevin Hilman * it under the terms of version 2 of the GNU General Public License as 951c5d844SKevin Hilman * published by the Free Software Foundation. 1051c5d844SKevin Hilman * 1151c5d844SKevin Hilman * This program is distributed in the hope that it will be useful, but 1251c5d844SKevin Hilman * WITHOUT ANY WARRANTY; without even the implied warranty of 1351c5d844SKevin Hilman * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 1451c5d844SKevin Hilman * General Public License for more details. 1551c5d844SKevin Hilman * 1651c5d844SKevin Hilman * You should have received a copy of the GNU General Public License 1751c5d844SKevin Hilman * along with this program; if not, see <http://www.gnu.org/licenses/>. 1851c5d844SKevin Hilman * The full GNU General Public License is included in this distribution 1951c5d844SKevin Hilman * in the file called COPYING. 2051c5d844SKevin Hilman */ 2151c5d844SKevin Hilman #include <linux/kernel.h> 2251c5d844SKevin Hilman #include <linux/module.h> 2351c5d844SKevin Hilman #include <linux/init.h> 2451c5d844SKevin Hilman #include <linux/device.h> 2551c5d844SKevin Hilman #include <linux/of_device.h> 2651c5d844SKevin Hilman #include <linux/platform_device.h> 2751c5d844SKevin Hilman #include <linux/ioport.h> 2851c5d844SKevin Hilman #include <linux/spinlock.h> 2951c5d844SKevin Hilman #include <linux/dma-mapping.h> 3051c5d844SKevin Hilman #include <linux/mmc/host.h> 3151c5d844SKevin Hilman #include <linux/mmc/mmc.h> 3251c5d844SKevin Hilman #include <linux/mmc/sdio.h> 3351c5d844SKevin Hilman #include <linux/mmc/slot-gpio.h> 3451c5d844SKevin Hilman #include <linux/io.h> 3551c5d844SKevin Hilman #include <linux/clk.h> 3651c5d844SKevin Hilman #include <linux/clk-provider.h> 3751c5d844SKevin Hilman #include <linux/regulator/consumer.h> 38b8789ec4SUlf Hansson #include <linux/interrupt.h> 391231e7ebSHeiner Kallweit #include <linux/bitfield.h> 4051c5d844SKevin Hilman 4151c5d844SKevin Hilman #define DRIVER_NAME "meson-gx-mmc" 4251c5d844SKevin Hilman 4351c5d844SKevin Hilman #define SD_EMMC_CLOCK 0x0 441231e7ebSHeiner Kallweit #define CLK_DIV_MASK GENMASK(5, 0) 4551c5d844SKevin Hilman #define CLK_DIV_MAX 63 461231e7ebSHeiner Kallweit #define CLK_SRC_MASK GENMASK(7, 6) 4751c5d844SKevin Hilman #define CLK_SRC_XTAL 0 /* external crystal */ 4851c5d844SKevin Hilman #define CLK_SRC_PLL 1 /* FCLK_DIV2 */ 491231e7ebSHeiner Kallweit #define CLK_CORE_PHASE_MASK GENMASK(9, 8) 50c08bcb6cSHeiner Kallweit #define CLK_TX_PHASE_MASK GENMASK(11, 10) 51c08bcb6cSHeiner Kallweit #define CLK_RX_PHASE_MASK GENMASK(13, 12) 5251c5d844SKevin Hilman #define CLK_PHASE_0 0 5351c5d844SKevin Hilman #define CLK_PHASE_90 1 5451c5d844SKevin Hilman #define CLK_PHASE_180 2 5551c5d844SKevin Hilman #define CLK_PHASE_270 3 5651c5d844SKevin Hilman #define CLK_ALWAYS_ON BIT(24) 5751c5d844SKevin Hilman 5852899b99SJerome Brunet #define SD_EMMC_DELAY 0x4 5951c5d844SKevin Hilman #define SD_EMMC_ADJUST 0x8 6051c5d844SKevin Hilman #define SD_EMMC_CALOUT 0x10 6151c5d844SKevin Hilman #define SD_EMMC_START 0x40 6251c5d844SKevin Hilman #define START_DESC_INIT BIT(0) 6351c5d844SKevin Hilman #define START_DESC_BUSY BIT(1) 641231e7ebSHeiner Kallweit #define START_DESC_ADDR_MASK GENMASK(31, 2) 6551c5d844SKevin Hilman 6651c5d844SKevin Hilman #define SD_EMMC_CFG 0x44 671231e7ebSHeiner Kallweit #define CFG_BUS_WIDTH_MASK GENMASK(1, 0) 6851c5d844SKevin Hilman #define CFG_BUS_WIDTH_1 0x0 6951c5d844SKevin Hilman #define CFG_BUS_WIDTH_4 0x1 7051c5d844SKevin Hilman #define CFG_BUS_WIDTH_8 0x2 7151c5d844SKevin Hilman #define CFG_DDR BIT(2) 721231e7ebSHeiner Kallweit #define CFG_BLK_LEN_MASK GENMASK(7, 4) 731231e7ebSHeiner Kallweit #define CFG_RESP_TIMEOUT_MASK GENMASK(11, 8) 741231e7ebSHeiner Kallweit #define CFG_RC_CC_MASK GENMASK(15, 12) 7551c5d844SKevin Hilman #define CFG_STOP_CLOCK BIT(22) 7651c5d844SKevin Hilman #define CFG_CLK_ALWAYS_ON BIT(18) 77e21e6fddSHeiner Kallweit #define CFG_CHK_DS BIT(20) 7851c5d844SKevin Hilman #define CFG_AUTO_CLK BIT(23) 7951c5d844SKevin Hilman 8051c5d844SKevin Hilman #define SD_EMMC_STATUS 0x48 8151c5d844SKevin Hilman #define STATUS_BUSY BIT(31) 8251c5d844SKevin Hilman 8351c5d844SKevin Hilman #define SD_EMMC_IRQ_EN 0x4c 841231e7ebSHeiner Kallweit #define IRQ_EN_MASK GENMASK(13, 0) 851231e7ebSHeiner Kallweit #define IRQ_RXD_ERR_MASK GENMASK(7, 0) 8651c5d844SKevin Hilman #define IRQ_TXD_ERR BIT(8) 8751c5d844SKevin Hilman #define IRQ_DESC_ERR BIT(9) 8851c5d844SKevin Hilman #define IRQ_RESP_ERR BIT(10) 8951c5d844SKevin Hilman #define IRQ_RESP_TIMEOUT BIT(11) 9051c5d844SKevin Hilman #define IRQ_DESC_TIMEOUT BIT(12) 9151c5d844SKevin Hilman #define IRQ_END_OF_CHAIN BIT(13) 9251c5d844SKevin Hilman #define IRQ_RESP_STATUS BIT(14) 9351c5d844SKevin Hilman #define IRQ_SDIO BIT(15) 9451c5d844SKevin Hilman 9551c5d844SKevin Hilman #define SD_EMMC_CMD_CFG 0x50 9651c5d844SKevin Hilman #define SD_EMMC_CMD_ARG 0x54 9751c5d844SKevin Hilman #define SD_EMMC_CMD_DAT 0x58 9851c5d844SKevin Hilman #define SD_EMMC_CMD_RSP 0x5c 9951c5d844SKevin Hilman #define SD_EMMC_CMD_RSP1 0x60 10051c5d844SKevin Hilman #define SD_EMMC_CMD_RSP2 0x64 10151c5d844SKevin Hilman #define SD_EMMC_CMD_RSP3 0x68 10251c5d844SKevin Hilman 10351c5d844SKevin Hilman #define SD_EMMC_RXD 0x94 10451c5d844SKevin Hilman #define SD_EMMC_TXD 0x94 10551c5d844SKevin Hilman #define SD_EMMC_LAST_REG SD_EMMC_TXD 10651c5d844SKevin Hilman 10751c5d844SKevin Hilman #define SD_EMMC_CFG_BLK_SIZE 512 /* internal buffer max: 512 bytes */ 10851c5d844SKevin Hilman #define SD_EMMC_CFG_RESP_TIMEOUT 256 /* in clock cycles */ 109bb11eff1SHeiner Kallweit #define SD_EMMC_CMD_TIMEOUT 1024 /* in ms */ 110bb11eff1SHeiner Kallweit #define SD_EMMC_CMD_TIMEOUT_DATA 4096 /* in ms */ 11151c5d844SKevin Hilman #define SD_EMMC_CFG_CMD_GAP 16 /* in clock cycles */ 11279ed05e3SHeiner Kallweit #define SD_EMMC_DESC_BUF_LEN PAGE_SIZE 11379ed05e3SHeiner Kallweit 11479ed05e3SHeiner Kallweit #define SD_EMMC_PRE_REQ_DONE BIT(0) 11579ed05e3SHeiner Kallweit #define SD_EMMC_DESC_CHAIN_MODE BIT(1) 11679ed05e3SHeiner Kallweit 11751c5d844SKevin Hilman #define MUX_CLK_NUM_PARENTS 2 11851c5d844SKevin Hilman 119c08bcb6cSHeiner Kallweit struct meson_tuning_params { 120c08bcb6cSHeiner Kallweit u8 core_phase; 121c08bcb6cSHeiner Kallweit u8 tx_phase; 122c08bcb6cSHeiner Kallweit u8 rx_phase; 123c08bcb6cSHeiner Kallweit }; 124c08bcb6cSHeiner Kallweit 12579ed05e3SHeiner Kallweit struct sd_emmc_desc { 12679ed05e3SHeiner Kallweit u32 cmd_cfg; 12779ed05e3SHeiner Kallweit u32 cmd_arg; 12879ed05e3SHeiner Kallweit u32 cmd_data; 12979ed05e3SHeiner Kallweit u32 cmd_resp; 13079ed05e3SHeiner Kallweit }; 13179ed05e3SHeiner Kallweit 13251c5d844SKevin Hilman struct meson_host { 13351c5d844SKevin Hilman struct device *dev; 13451c5d844SKevin Hilman struct mmc_host *mmc; 13551c5d844SKevin Hilman struct mmc_command *cmd; 13651c5d844SKevin Hilman 13751c5d844SKevin Hilman spinlock_t lock; 13851c5d844SKevin Hilman void __iomem *regs; 13951c5d844SKevin Hilman struct clk *core_clk; 14051c5d844SKevin Hilman struct clk_mux mux; 14151c5d844SKevin Hilman struct clk *mux_clk; 1425da86887SHeiner Kallweit unsigned long current_clock; 14351c5d844SKevin Hilman 14451c5d844SKevin Hilman struct clk_divider cfg_div; 14551c5d844SKevin Hilman struct clk *cfg_div_clk; 14651c5d844SKevin Hilman 14751c5d844SKevin Hilman unsigned int bounce_buf_size; 14851c5d844SKevin Hilman void *bounce_buf; 14951c5d844SKevin Hilman dma_addr_t bounce_dma_addr; 15079ed05e3SHeiner Kallweit struct sd_emmc_desc *descs; 15179ed05e3SHeiner Kallweit dma_addr_t descs_dma_addr; 15251c5d844SKevin Hilman 153c08bcb6cSHeiner Kallweit struct meson_tuning_params tp; 15451c5d844SKevin Hilman bool vqmmc_enabled; 15551c5d844SKevin Hilman }; 15651c5d844SKevin Hilman 1571231e7ebSHeiner Kallweit #define CMD_CFG_LENGTH_MASK GENMASK(8, 0) 15851c5d844SKevin Hilman #define CMD_CFG_BLOCK_MODE BIT(9) 15951c5d844SKevin Hilman #define CMD_CFG_R1B BIT(10) 16051c5d844SKevin Hilman #define CMD_CFG_END_OF_CHAIN BIT(11) 1611231e7ebSHeiner Kallweit #define CMD_CFG_TIMEOUT_MASK GENMASK(15, 12) 16251c5d844SKevin Hilman #define CMD_CFG_NO_RESP BIT(16) 16351c5d844SKevin Hilman #define CMD_CFG_NO_CMD BIT(17) 16451c5d844SKevin Hilman #define CMD_CFG_DATA_IO BIT(18) 16551c5d844SKevin Hilman #define CMD_CFG_DATA_WR BIT(19) 16651c5d844SKevin Hilman #define CMD_CFG_RESP_NOCRC BIT(20) 16751c5d844SKevin Hilman #define CMD_CFG_RESP_128 BIT(21) 16851c5d844SKevin Hilman #define CMD_CFG_RESP_NUM BIT(22) 16951c5d844SKevin Hilman #define CMD_CFG_DATA_NUM BIT(23) 1701231e7ebSHeiner Kallweit #define CMD_CFG_CMD_INDEX_MASK GENMASK(29, 24) 17151c5d844SKevin Hilman #define CMD_CFG_ERROR BIT(30) 17251c5d844SKevin Hilman #define CMD_CFG_OWNER BIT(31) 17351c5d844SKevin Hilman 1741231e7ebSHeiner Kallweit #define CMD_DATA_MASK GENMASK(31, 2) 17551c5d844SKevin Hilman #define CMD_DATA_BIG_ENDIAN BIT(1) 17651c5d844SKevin Hilman #define CMD_DATA_SRAM BIT(0) 1771231e7ebSHeiner Kallweit #define CMD_RESP_MASK GENMASK(31, 1) 17851c5d844SKevin Hilman #define CMD_RESP_SRAM BIT(0) 17951c5d844SKevin Hilman 1804eee86c3SHeiner Kallweit static unsigned int meson_mmc_get_timeout_msecs(struct mmc_data *data) 1814eee86c3SHeiner Kallweit { 1824eee86c3SHeiner Kallweit unsigned int timeout = data->timeout_ns / NSEC_PER_MSEC; 1834eee86c3SHeiner Kallweit 1844eee86c3SHeiner Kallweit if (!timeout) 1854eee86c3SHeiner Kallweit return SD_EMMC_CMD_TIMEOUT_DATA; 1864eee86c3SHeiner Kallweit 1874eee86c3SHeiner Kallweit timeout = roundup_pow_of_two(timeout); 1884eee86c3SHeiner Kallweit 1894eee86c3SHeiner Kallweit return min(timeout, 32768U); /* max. 2^15 ms */ 1904eee86c3SHeiner Kallweit } 1914eee86c3SHeiner Kallweit 192e5e4a3ebSHeiner Kallweit static struct mmc_command *meson_mmc_get_next_command(struct mmc_command *cmd) 193e5e4a3ebSHeiner Kallweit { 194e5e4a3ebSHeiner Kallweit if (cmd->opcode == MMC_SET_BLOCK_COUNT && !cmd->error) 195e5e4a3ebSHeiner Kallweit return cmd->mrq->cmd; 196e5e4a3ebSHeiner Kallweit else if (mmc_op_multi(cmd->opcode) && 197e5e4a3ebSHeiner Kallweit (!cmd->mrq->sbc || cmd->error || cmd->data->error)) 198e5e4a3ebSHeiner Kallweit return cmd->mrq->stop; 199e5e4a3ebSHeiner Kallweit else 200e5e4a3ebSHeiner Kallweit return NULL; 201e5e4a3ebSHeiner Kallweit } 202e5e4a3ebSHeiner Kallweit 20379ed05e3SHeiner Kallweit static void meson_mmc_get_transfer_mode(struct mmc_host *mmc, 20479ed05e3SHeiner Kallweit struct mmc_request *mrq) 20579ed05e3SHeiner Kallweit { 20679ed05e3SHeiner Kallweit struct mmc_data *data = mrq->data; 20779ed05e3SHeiner Kallweit struct scatterlist *sg; 20879ed05e3SHeiner Kallweit int i; 20979ed05e3SHeiner Kallweit bool use_desc_chain_mode = true; 21079ed05e3SHeiner Kallweit 21124835611SHeiner Kallweit /* 21224835611SHeiner Kallweit * Broken SDIO with AP6255-based WiFi on Khadas VIM Pro has been 21324835611SHeiner Kallweit * reported. For some strange reason this occurs in descriptor 21424835611SHeiner Kallweit * chain mode only. So let's fall back to bounce buffer mode 21524835611SHeiner Kallweit * for command SD_IO_RW_EXTENDED. 21624835611SHeiner Kallweit */ 21724835611SHeiner Kallweit if (mrq->cmd->opcode == SD_IO_RW_EXTENDED) 21824835611SHeiner Kallweit return; 21924835611SHeiner Kallweit 22079ed05e3SHeiner Kallweit for_each_sg(data->sg, sg, data->sg_len, i) 22179ed05e3SHeiner Kallweit /* check for 8 byte alignment */ 22279ed05e3SHeiner Kallweit if (sg->offset & 7) { 22379ed05e3SHeiner Kallweit WARN_ONCE(1, "unaligned scatterlist buffer\n"); 22479ed05e3SHeiner Kallweit use_desc_chain_mode = false; 22579ed05e3SHeiner Kallweit break; 22679ed05e3SHeiner Kallweit } 22779ed05e3SHeiner Kallweit 22879ed05e3SHeiner Kallweit if (use_desc_chain_mode) 22979ed05e3SHeiner Kallweit data->host_cookie |= SD_EMMC_DESC_CHAIN_MODE; 23079ed05e3SHeiner Kallweit } 23179ed05e3SHeiner Kallweit 23279ed05e3SHeiner Kallweit static inline bool meson_mmc_desc_chain_mode(const struct mmc_data *data) 23379ed05e3SHeiner Kallweit { 23479ed05e3SHeiner Kallweit return data->host_cookie & SD_EMMC_DESC_CHAIN_MODE; 23579ed05e3SHeiner Kallweit } 23679ed05e3SHeiner Kallweit 23779ed05e3SHeiner Kallweit static inline bool meson_mmc_bounce_buf_read(const struct mmc_data *data) 23879ed05e3SHeiner Kallweit { 23979ed05e3SHeiner Kallweit return data && data->flags & MMC_DATA_READ && 24079ed05e3SHeiner Kallweit !meson_mmc_desc_chain_mode(data); 24179ed05e3SHeiner Kallweit } 24279ed05e3SHeiner Kallweit 24379ed05e3SHeiner Kallweit static void meson_mmc_pre_req(struct mmc_host *mmc, struct mmc_request *mrq) 24479ed05e3SHeiner Kallweit { 24579ed05e3SHeiner Kallweit struct mmc_data *data = mrq->data; 24679ed05e3SHeiner Kallweit 24779ed05e3SHeiner Kallweit if (!data) 24879ed05e3SHeiner Kallweit return; 24979ed05e3SHeiner Kallweit 25079ed05e3SHeiner Kallweit meson_mmc_get_transfer_mode(mmc, mrq); 25179ed05e3SHeiner Kallweit data->host_cookie |= SD_EMMC_PRE_REQ_DONE; 25279ed05e3SHeiner Kallweit 25379ed05e3SHeiner Kallweit if (!meson_mmc_desc_chain_mode(data)) 25479ed05e3SHeiner Kallweit return; 25579ed05e3SHeiner Kallweit 25679ed05e3SHeiner Kallweit data->sg_count = dma_map_sg(mmc_dev(mmc), data->sg, data->sg_len, 25779ed05e3SHeiner Kallweit mmc_get_dma_dir(data)); 25879ed05e3SHeiner Kallweit if (!data->sg_count) 25979ed05e3SHeiner Kallweit dev_err(mmc_dev(mmc), "dma_map_sg failed"); 26079ed05e3SHeiner Kallweit } 26179ed05e3SHeiner Kallweit 26279ed05e3SHeiner Kallweit static void meson_mmc_post_req(struct mmc_host *mmc, struct mmc_request *mrq, 26379ed05e3SHeiner Kallweit int err) 26479ed05e3SHeiner Kallweit { 26579ed05e3SHeiner Kallweit struct mmc_data *data = mrq->data; 26679ed05e3SHeiner Kallweit 26779ed05e3SHeiner Kallweit if (data && meson_mmc_desc_chain_mode(data) && data->sg_count) 26879ed05e3SHeiner Kallweit dma_unmap_sg(mmc_dev(mmc), data->sg, data->sg_len, 26979ed05e3SHeiner Kallweit mmc_get_dma_dir(data)); 27079ed05e3SHeiner Kallweit } 27179ed05e3SHeiner Kallweit 27251c5d844SKevin Hilman static int meson_mmc_clk_set(struct meson_host *host, unsigned long clk_rate) 27351c5d844SKevin Hilman { 27451c5d844SKevin Hilman struct mmc_host *mmc = host->mmc; 2755da86887SHeiner Kallweit int ret; 27651c5d844SKevin Hilman u32 cfg; 27751c5d844SKevin Hilman 27851c5d844SKevin Hilman if (clk_rate) { 27951c5d844SKevin Hilman if (WARN_ON(clk_rate > mmc->f_max)) 28051c5d844SKevin Hilman clk_rate = mmc->f_max; 28151c5d844SKevin Hilman else if (WARN_ON(clk_rate < mmc->f_min)) 28251c5d844SKevin Hilman clk_rate = mmc->f_min; 28351c5d844SKevin Hilman } 28451c5d844SKevin Hilman 2855da86887SHeiner Kallweit if (clk_rate == host->current_clock) 28651c5d844SKevin Hilman return 0; 28751c5d844SKevin Hilman 28851c5d844SKevin Hilman /* stop clock */ 28951c5d844SKevin Hilman cfg = readl(host->regs + SD_EMMC_CFG); 29051c5d844SKevin Hilman if (!(cfg & CFG_STOP_CLOCK)) { 29151c5d844SKevin Hilman cfg |= CFG_STOP_CLOCK; 29251c5d844SKevin Hilman writel(cfg, host->regs + SD_EMMC_CFG); 29351c5d844SKevin Hilman } 29451c5d844SKevin Hilman 29551c5d844SKevin Hilman dev_dbg(host->dev, "change clock rate %u -> %lu\n", 29651c5d844SKevin Hilman mmc->actual_clock, clk_rate); 29751c5d844SKevin Hilman 2985da86887SHeiner Kallweit if (!clk_rate) { 29951c5d844SKevin Hilman mmc->actual_clock = 0; 3005da86887SHeiner Kallweit host->current_clock = 0; 3015da86887SHeiner Kallweit /* return with clock being stopped */ 30251c5d844SKevin Hilman return 0; 30351c5d844SKevin Hilman } 30451c5d844SKevin Hilman 30551c5d844SKevin Hilman ret = clk_set_rate(host->cfg_div_clk, clk_rate); 3065da86887SHeiner Kallweit if (ret) { 3075da86887SHeiner Kallweit dev_err(host->dev, "Unable to set cfg_div_clk to %lu. ret=%d\n", 30851c5d844SKevin Hilman clk_rate, ret); 3095da86887SHeiner Kallweit return ret; 3105da86887SHeiner Kallweit } 31151c5d844SKevin Hilman 3125da86887SHeiner Kallweit mmc->actual_clock = clk_get_rate(host->cfg_div_clk); 3135da86887SHeiner Kallweit host->current_clock = clk_rate; 3145da86887SHeiner Kallweit 3155da86887SHeiner Kallweit if (clk_rate != mmc->actual_clock) 3165da86887SHeiner Kallweit dev_dbg(host->dev, 3175da86887SHeiner Kallweit "divider requested rate %lu != actual rate %u\n", 3185da86887SHeiner Kallweit clk_rate, mmc->actual_clock); 3195da86887SHeiner Kallweit 3205da86887SHeiner Kallweit /* (re)start clock */ 32151c5d844SKevin Hilman cfg = readl(host->regs + SD_EMMC_CFG); 32251c5d844SKevin Hilman cfg &= ~CFG_STOP_CLOCK; 32351c5d844SKevin Hilman writel(cfg, host->regs + SD_EMMC_CFG); 32451c5d844SKevin Hilman 3255da86887SHeiner Kallweit return 0; 32651c5d844SKevin Hilman } 32751c5d844SKevin Hilman 32851c5d844SKevin Hilman /* 32951c5d844SKevin Hilman * The SD/eMMC IP block has an internal mux and divider used for 33051c5d844SKevin Hilman * generating the MMC clock. Use the clock framework to create and 33151c5d844SKevin Hilman * manage these clocks. 33251c5d844SKevin Hilman */ 33351c5d844SKevin Hilman static int meson_mmc_clk_init(struct meson_host *host) 33451c5d844SKevin Hilman { 33551c5d844SKevin Hilman struct clk_init_data init; 33651c5d844SKevin Hilman char clk_name[32]; 33751c5d844SKevin Hilman int i, ret = 0; 33851c5d844SKevin Hilman const char *mux_parent_names[MUX_CLK_NUM_PARENTS]; 33951c5d844SKevin Hilman const char *clk_div_parents[1]; 3403c39e2caSJerome Brunet u32 clk_reg; 34151c5d844SKevin Hilman 342ef5c4815SJerome Brunet /* init SD_EMMC_CLOCK to sane defaults w/min clock rate */ 343ef5c4815SJerome Brunet clk_reg = 0; 344ef5c4815SJerome Brunet clk_reg |= CLK_ALWAYS_ON; 345ef5c4815SJerome Brunet clk_reg |= CLK_DIV_MASK; 346ef5c4815SJerome Brunet clk_reg |= FIELD_PREP(CLK_CORE_PHASE_MASK, host->tp.core_phase); 347ef5c4815SJerome Brunet clk_reg |= FIELD_PREP(CLK_TX_PHASE_MASK, host->tp.tx_phase); 348ef5c4815SJerome Brunet clk_reg |= FIELD_PREP(CLK_RX_PHASE_MASK, host->tp.rx_phase); 349ef5c4815SJerome Brunet writel(clk_reg, host->regs + SD_EMMC_CLOCK); 350ef5c4815SJerome Brunet 35151c5d844SKevin Hilman /* get the mux parents */ 35251c5d844SKevin Hilman for (i = 0; i < MUX_CLK_NUM_PARENTS; i++) { 353e9883ef2SHeiner Kallweit struct clk *clk; 35451c5d844SKevin Hilman char name[16]; 35551c5d844SKevin Hilman 35651c5d844SKevin Hilman snprintf(name, sizeof(name), "clkin%d", i); 357e9883ef2SHeiner Kallweit clk = devm_clk_get(host->dev, name); 358e9883ef2SHeiner Kallweit if (IS_ERR(clk)) { 359e9883ef2SHeiner Kallweit if (clk != ERR_PTR(-EPROBE_DEFER)) 36051c5d844SKevin Hilman dev_err(host->dev, "Missing clock %s\n", name); 361e9883ef2SHeiner Kallweit return PTR_ERR(clk); 36251c5d844SKevin Hilman } 36351c5d844SKevin Hilman 364e9883ef2SHeiner Kallweit mux_parent_names[i] = __clk_get_name(clk); 36551c5d844SKevin Hilman } 36651c5d844SKevin Hilman 36751c5d844SKevin Hilman /* create the mux */ 36851c5d844SKevin Hilman snprintf(clk_name, sizeof(clk_name), "%s#mux", dev_name(host->dev)); 36951c5d844SKevin Hilman init.name = clk_name; 37051c5d844SKevin Hilman init.ops = &clk_mux_ops; 37151c5d844SKevin Hilman init.flags = 0; 37251c5d844SKevin Hilman init.parent_names = mux_parent_names; 3737558c113SHeiner Kallweit init.num_parents = MUX_CLK_NUM_PARENTS; 37451c5d844SKevin Hilman host->mux.reg = host->regs + SD_EMMC_CLOCK; 3751231e7ebSHeiner Kallweit host->mux.shift = __bf_shf(CLK_SRC_MASK); 376c1d04caaSJerome Brunet host->mux.mask = CLK_SRC_MASK >> host->mux.shift; 37751c5d844SKevin Hilman host->mux.flags = 0; 37851c5d844SKevin Hilman host->mux.table = NULL; 37951c5d844SKevin Hilman host->mux.hw.init = &init; 38051c5d844SKevin Hilman 38151c5d844SKevin Hilman host->mux_clk = devm_clk_register(host->dev, &host->mux.hw); 38251c5d844SKevin Hilman if (WARN_ON(IS_ERR(host->mux_clk))) 38351c5d844SKevin Hilman return PTR_ERR(host->mux_clk); 38451c5d844SKevin Hilman 38551c5d844SKevin Hilman /* create the divider */ 38651c5d844SKevin Hilman snprintf(clk_name, sizeof(clk_name), "%s#div", dev_name(host->dev)); 3877b9ebad3SHeiner Kallweit init.name = clk_name; 38851c5d844SKevin Hilman init.ops = &clk_divider_ops; 38951c5d844SKevin Hilman init.flags = CLK_SET_RATE_PARENT; 39051c5d844SKevin Hilman clk_div_parents[0] = __clk_get_name(host->mux_clk); 39151c5d844SKevin Hilman init.parent_names = clk_div_parents; 39251c5d844SKevin Hilman init.num_parents = ARRAY_SIZE(clk_div_parents); 39351c5d844SKevin Hilman 39451c5d844SKevin Hilman host->cfg_div.reg = host->regs + SD_EMMC_CLOCK; 3951231e7ebSHeiner Kallweit host->cfg_div.shift = __bf_shf(CLK_DIV_MASK); 3961231e7ebSHeiner Kallweit host->cfg_div.width = __builtin_popcountl(CLK_DIV_MASK); 39751c5d844SKevin Hilman host->cfg_div.hw.init = &init; 39851c5d844SKevin Hilman host->cfg_div.flags = CLK_DIVIDER_ONE_BASED | 399130b4bd8SJerome Brunet CLK_DIVIDER_ROUND_CLOSEST; 40051c5d844SKevin Hilman 40151c5d844SKevin Hilman host->cfg_div_clk = devm_clk_register(host->dev, &host->cfg_div.hw); 40251c5d844SKevin Hilman if (WARN_ON(PTR_ERR_OR_ZERO(host->cfg_div_clk))) 40351c5d844SKevin Hilman return PTR_ERR(host->cfg_div_clk); 40451c5d844SKevin Hilman 40551c5d844SKevin Hilman ret = clk_prepare_enable(host->cfg_div_clk); 406a4c38c8dSUlf Hansson if (ret) 407a4c38c8dSUlf Hansson return ret; 40851c5d844SKevin Hilman 409a4c38c8dSUlf Hansson /* Get the nearest minimum clock to 400KHz */ 410a4c38c8dSUlf Hansson host->mmc->f_min = clk_round_rate(host->cfg_div_clk, 400000); 411a4c38c8dSUlf Hansson 412a4c38c8dSUlf Hansson ret = meson_mmc_clk_set(host, host->mmc->f_min); 413cac3a478SHeiner Kallweit if (ret) 41451c5d844SKevin Hilman clk_disable_unprepare(host->cfg_div_clk); 41551c5d844SKevin Hilman 41651c5d844SKevin Hilman return ret; 41751c5d844SKevin Hilman } 41851c5d844SKevin Hilman 4190b6ed71cSHeiner Kallweit static void meson_mmc_set_tuning_params(struct mmc_host *mmc) 4200b6ed71cSHeiner Kallweit { 4210b6ed71cSHeiner Kallweit struct meson_host *host = mmc_priv(mmc); 4220b6ed71cSHeiner Kallweit u32 regval; 4230b6ed71cSHeiner Kallweit 4240b6ed71cSHeiner Kallweit /* stop clock */ 4250b6ed71cSHeiner Kallweit regval = readl(host->regs + SD_EMMC_CFG); 4260b6ed71cSHeiner Kallweit regval |= CFG_STOP_CLOCK; 4270b6ed71cSHeiner Kallweit writel(regval, host->regs + SD_EMMC_CFG); 4280b6ed71cSHeiner Kallweit 4290b6ed71cSHeiner Kallweit regval = readl(host->regs + SD_EMMC_CLOCK); 4300b6ed71cSHeiner Kallweit regval &= ~CLK_CORE_PHASE_MASK; 4310b6ed71cSHeiner Kallweit regval |= FIELD_PREP(CLK_CORE_PHASE_MASK, host->tp.core_phase); 4320b6ed71cSHeiner Kallweit regval &= ~CLK_TX_PHASE_MASK; 4330b6ed71cSHeiner Kallweit regval |= FIELD_PREP(CLK_TX_PHASE_MASK, host->tp.tx_phase); 4340b6ed71cSHeiner Kallweit regval &= ~CLK_RX_PHASE_MASK; 4350b6ed71cSHeiner Kallweit regval |= FIELD_PREP(CLK_RX_PHASE_MASK, host->tp.rx_phase); 4360b6ed71cSHeiner Kallweit writel(regval, host->regs + SD_EMMC_CLOCK); 4370b6ed71cSHeiner Kallweit 4380b6ed71cSHeiner Kallweit /* start clock */ 4390b6ed71cSHeiner Kallweit regval = readl(host->regs + SD_EMMC_CFG); 4400b6ed71cSHeiner Kallweit regval &= ~CFG_STOP_CLOCK; 4410b6ed71cSHeiner Kallweit writel(regval, host->regs + SD_EMMC_CFG); 4420b6ed71cSHeiner Kallweit } 4430b6ed71cSHeiner Kallweit 44451c5d844SKevin Hilman static void meson_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 44551c5d844SKevin Hilman { 44651c5d844SKevin Hilman struct meson_host *host = mmc_priv(mmc); 447c36cf125SJerome Brunet u32 bus_width, val; 448c36cf125SJerome Brunet int err; 44951c5d844SKevin Hilman 45051c5d844SKevin Hilman /* 45151c5d844SKevin Hilman * GPIO regulator, only controls switching between 1v8 and 45251c5d844SKevin Hilman * 3v3, doesn't support MMC_POWER_OFF, MMC_POWER_ON. 45351c5d844SKevin Hilman */ 45451c5d844SKevin Hilman switch (ios->power_mode) { 45551c5d844SKevin Hilman case MMC_POWER_OFF: 45651c5d844SKevin Hilman if (!IS_ERR(mmc->supply.vmmc)) 45751c5d844SKevin Hilman mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0); 45851c5d844SKevin Hilman 45951c5d844SKevin Hilman if (!IS_ERR(mmc->supply.vqmmc) && host->vqmmc_enabled) { 46051c5d844SKevin Hilman regulator_disable(mmc->supply.vqmmc); 46151c5d844SKevin Hilman host->vqmmc_enabled = false; 46251c5d844SKevin Hilman } 46351c5d844SKevin Hilman 46451c5d844SKevin Hilman break; 46551c5d844SKevin Hilman 46651c5d844SKevin Hilman case MMC_POWER_UP: 46751c5d844SKevin Hilman if (!IS_ERR(mmc->supply.vmmc)) 46851c5d844SKevin Hilman mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd); 46951c5d844SKevin Hilman break; 47051c5d844SKevin Hilman 47151c5d844SKevin Hilman case MMC_POWER_ON: 47251c5d844SKevin Hilman if (!IS_ERR(mmc->supply.vqmmc) && !host->vqmmc_enabled) { 47351c5d844SKevin Hilman int ret = regulator_enable(mmc->supply.vqmmc); 47451c5d844SKevin Hilman 47551c5d844SKevin Hilman if (ret < 0) 476c36cf125SJerome Brunet dev_err(host->dev, 47751c5d844SKevin Hilman "failed to enable vqmmc regulator\n"); 47851c5d844SKevin Hilman else 47951c5d844SKevin Hilman host->vqmmc_enabled = true; 48051c5d844SKevin Hilman } 48151c5d844SKevin Hilman 48251c5d844SKevin Hilman break; 48351c5d844SKevin Hilman } 48451c5d844SKevin Hilman 48551c5d844SKevin Hilman /* Bus width */ 48651c5d844SKevin Hilman switch (ios->bus_width) { 48751c5d844SKevin Hilman case MMC_BUS_WIDTH_1: 48851c5d844SKevin Hilman bus_width = CFG_BUS_WIDTH_1; 48951c5d844SKevin Hilman break; 49051c5d844SKevin Hilman case MMC_BUS_WIDTH_4: 49151c5d844SKevin Hilman bus_width = CFG_BUS_WIDTH_4; 49251c5d844SKevin Hilman break; 49351c5d844SKevin Hilman case MMC_BUS_WIDTH_8: 49451c5d844SKevin Hilman bus_width = CFG_BUS_WIDTH_8; 49551c5d844SKevin Hilman break; 49651c5d844SKevin Hilman default: 49751c5d844SKevin Hilman dev_err(host->dev, "Invalid ios->bus_width: %u. Setting to 4.\n", 49851c5d844SKevin Hilman ios->bus_width); 49951c5d844SKevin Hilman bus_width = CFG_BUS_WIDTH_4; 50051c5d844SKevin Hilman } 50151c5d844SKevin Hilman 50251c5d844SKevin Hilman val = readl(host->regs + SD_EMMC_CFG); 5031231e7ebSHeiner Kallweit val &= ~CFG_BUS_WIDTH_MASK; 5041231e7ebSHeiner Kallweit val |= FIELD_PREP(CFG_BUS_WIDTH_MASK, bus_width); 50551c5d844SKevin Hilman 506e21e6fddSHeiner Kallweit val &= ~CFG_DDR; 507e21e6fddSHeiner Kallweit if (ios->timing == MMC_TIMING_UHS_DDR50 || 508e21e6fddSHeiner Kallweit ios->timing == MMC_TIMING_MMC_DDR52 || 509e21e6fddSHeiner Kallweit ios->timing == MMC_TIMING_MMC_HS400) 510e21e6fddSHeiner Kallweit val |= CFG_DDR; 511e21e6fddSHeiner Kallweit 512e21e6fddSHeiner Kallweit val &= ~CFG_CHK_DS; 513e21e6fddSHeiner Kallweit if (ios->timing == MMC_TIMING_MMC_HS400) 514e21e6fddSHeiner Kallweit val |= CFG_CHK_DS; 515e21e6fddSHeiner Kallweit 516c36cf125SJerome Brunet err = meson_mmc_clk_set(host, ios->clock); 517c36cf125SJerome Brunet if (err) 518c36cf125SJerome Brunet dev_err(host->dev, "Failed to set clock: %d\n,", err); 519c36cf125SJerome Brunet 52051c5d844SKevin Hilman writel(val, host->regs + SD_EMMC_CFG); 521c36cf125SJerome Brunet dev_dbg(host->dev, "SD_EMMC_CFG: 0x%08x\n", val); 522c01d1219SHeiner Kallweit } 52351c5d844SKevin Hilman 5243d6c991bSHeiner Kallweit static void meson_mmc_request_done(struct mmc_host *mmc, 5253d6c991bSHeiner Kallweit struct mmc_request *mrq) 52651c5d844SKevin Hilman { 52751c5d844SKevin Hilman struct meson_host *host = mmc_priv(mmc); 52851c5d844SKevin Hilman 52951c5d844SKevin Hilman host->cmd = NULL; 53051c5d844SKevin Hilman mmc_request_done(host->mmc, mrq); 53151c5d844SKevin Hilman } 53251c5d844SKevin Hilman 5333d03f6a9SHeiner Kallweit static void meson_mmc_set_blksz(struct mmc_host *mmc, unsigned int blksz) 5343d03f6a9SHeiner Kallweit { 5353d03f6a9SHeiner Kallweit struct meson_host *host = mmc_priv(mmc); 5363d03f6a9SHeiner Kallweit u32 cfg, blksz_old; 5373d03f6a9SHeiner Kallweit 5383d03f6a9SHeiner Kallweit cfg = readl(host->regs + SD_EMMC_CFG); 5393d03f6a9SHeiner Kallweit blksz_old = FIELD_GET(CFG_BLK_LEN_MASK, cfg); 5403d03f6a9SHeiner Kallweit 5413d03f6a9SHeiner Kallweit if (!is_power_of_2(blksz)) 5423d03f6a9SHeiner Kallweit dev_err(host->dev, "blksz %u is not a power of 2\n", blksz); 5433d03f6a9SHeiner Kallweit 5443d03f6a9SHeiner Kallweit blksz = ilog2(blksz); 5453d03f6a9SHeiner Kallweit 5463d03f6a9SHeiner Kallweit /* check if block-size matches, if not update */ 5473d03f6a9SHeiner Kallweit if (blksz == blksz_old) 5483d03f6a9SHeiner Kallweit return; 5493d03f6a9SHeiner Kallweit 5503d03f6a9SHeiner Kallweit dev_dbg(host->dev, "%s: update blk_len %d -> %d\n", __func__, 5513d03f6a9SHeiner Kallweit blksz_old, blksz); 5523d03f6a9SHeiner Kallweit 5533d03f6a9SHeiner Kallweit cfg &= ~CFG_BLK_LEN_MASK; 5543d03f6a9SHeiner Kallweit cfg |= FIELD_PREP(CFG_BLK_LEN_MASK, blksz); 5553d03f6a9SHeiner Kallweit writel(cfg, host->regs + SD_EMMC_CFG); 5563d03f6a9SHeiner Kallweit } 5573d03f6a9SHeiner Kallweit 55875c7fd96SHeiner Kallweit static void meson_mmc_set_response_bits(struct mmc_command *cmd, u32 *cmd_cfg) 55975c7fd96SHeiner Kallweit { 56075c7fd96SHeiner Kallweit if (cmd->flags & MMC_RSP_PRESENT) { 56175c7fd96SHeiner Kallweit if (cmd->flags & MMC_RSP_136) 56275c7fd96SHeiner Kallweit *cmd_cfg |= CMD_CFG_RESP_128; 56375c7fd96SHeiner Kallweit *cmd_cfg |= CMD_CFG_RESP_NUM; 56475c7fd96SHeiner Kallweit 56575c7fd96SHeiner Kallweit if (!(cmd->flags & MMC_RSP_CRC)) 56675c7fd96SHeiner Kallweit *cmd_cfg |= CMD_CFG_RESP_NOCRC; 56775c7fd96SHeiner Kallweit 56875c7fd96SHeiner Kallweit if (cmd->flags & MMC_RSP_BUSY) 56975c7fd96SHeiner Kallweit *cmd_cfg |= CMD_CFG_R1B; 57075c7fd96SHeiner Kallweit } else { 57175c7fd96SHeiner Kallweit *cmd_cfg |= CMD_CFG_NO_RESP; 57275c7fd96SHeiner Kallweit } 57375c7fd96SHeiner Kallweit } 57475c7fd96SHeiner Kallweit 57579ed05e3SHeiner Kallweit static void meson_mmc_desc_chain_transfer(struct mmc_host *mmc, u32 cmd_cfg) 57679ed05e3SHeiner Kallweit { 57779ed05e3SHeiner Kallweit struct meson_host *host = mmc_priv(mmc); 57879ed05e3SHeiner Kallweit struct sd_emmc_desc *desc = host->descs; 57979ed05e3SHeiner Kallweit struct mmc_data *data = host->cmd->data; 58079ed05e3SHeiner Kallweit struct scatterlist *sg; 58179ed05e3SHeiner Kallweit u32 start; 58279ed05e3SHeiner Kallweit int i; 58379ed05e3SHeiner Kallweit 58479ed05e3SHeiner Kallweit if (data->flags & MMC_DATA_WRITE) 58579ed05e3SHeiner Kallweit cmd_cfg |= CMD_CFG_DATA_WR; 58679ed05e3SHeiner Kallweit 58779ed05e3SHeiner Kallweit if (data->blocks > 1) { 58879ed05e3SHeiner Kallweit cmd_cfg |= CMD_CFG_BLOCK_MODE; 58979ed05e3SHeiner Kallweit meson_mmc_set_blksz(mmc, data->blksz); 59079ed05e3SHeiner Kallweit } 59179ed05e3SHeiner Kallweit 59279ed05e3SHeiner Kallweit for_each_sg(data->sg, sg, data->sg_count, i) { 59379ed05e3SHeiner Kallweit unsigned int len = sg_dma_len(sg); 59479ed05e3SHeiner Kallweit 59579ed05e3SHeiner Kallweit if (data->blocks > 1) 59679ed05e3SHeiner Kallweit len /= data->blksz; 59779ed05e3SHeiner Kallweit 59879ed05e3SHeiner Kallweit desc[i].cmd_cfg = cmd_cfg; 59979ed05e3SHeiner Kallweit desc[i].cmd_cfg |= FIELD_PREP(CMD_CFG_LENGTH_MASK, len); 60079ed05e3SHeiner Kallweit if (i > 0) 60179ed05e3SHeiner Kallweit desc[i].cmd_cfg |= CMD_CFG_NO_CMD; 60279ed05e3SHeiner Kallweit desc[i].cmd_arg = host->cmd->arg; 60379ed05e3SHeiner Kallweit desc[i].cmd_resp = 0; 60479ed05e3SHeiner Kallweit desc[i].cmd_data = sg_dma_address(sg); 60579ed05e3SHeiner Kallweit } 60679ed05e3SHeiner Kallweit desc[data->sg_count - 1].cmd_cfg |= CMD_CFG_END_OF_CHAIN; 60779ed05e3SHeiner Kallweit 60879ed05e3SHeiner Kallweit dma_wmb(); /* ensure descriptor is written before kicked */ 60979ed05e3SHeiner Kallweit start = host->descs_dma_addr | START_DESC_BUSY; 61079ed05e3SHeiner Kallweit writel(start, host->regs + SD_EMMC_START); 61179ed05e3SHeiner Kallweit } 61279ed05e3SHeiner Kallweit 61351c5d844SKevin Hilman static void meson_mmc_start_cmd(struct mmc_host *mmc, struct mmc_command *cmd) 61451c5d844SKevin Hilman { 61551c5d844SKevin Hilman struct meson_host *host = mmc_priv(mmc); 61600412ddcSHeiner Kallweit struct mmc_data *data = cmd->data; 6173d03f6a9SHeiner Kallweit u32 cmd_cfg = 0, cmd_data = 0; 61851c5d844SKevin Hilman unsigned int xfer_bytes = 0; 61951c5d844SKevin Hilman 62051c5d844SKevin Hilman /* Setup descriptors */ 62151c5d844SKevin Hilman dma_rmb(); 62251c5d844SKevin Hilman 62379ed05e3SHeiner Kallweit host->cmd = cmd; 62479ed05e3SHeiner Kallweit 6251231e7ebSHeiner Kallweit cmd_cfg |= FIELD_PREP(CMD_CFG_CMD_INDEX_MASK, cmd->opcode); 626a322febeSHeiner Kallweit cmd_cfg |= CMD_CFG_OWNER; /* owned by CPU */ 62751c5d844SKevin Hilman 62875c7fd96SHeiner Kallweit meson_mmc_set_response_bits(cmd, &cmd_cfg); 62951c5d844SKevin Hilman 63051c5d844SKevin Hilman /* data? */ 63100412ddcSHeiner Kallweit if (data) { 63279ed05e3SHeiner Kallweit data->bytes_xfered = 0; 633a322febeSHeiner Kallweit cmd_cfg |= CMD_CFG_DATA_IO; 6341231e7ebSHeiner Kallweit cmd_cfg |= FIELD_PREP(CMD_CFG_TIMEOUT_MASK, 6354eee86c3SHeiner Kallweit ilog2(meson_mmc_get_timeout_msecs(data))); 636a744c6feSHeiner Kallweit 63779ed05e3SHeiner Kallweit if (meson_mmc_desc_chain_mode(data)) { 63879ed05e3SHeiner Kallweit meson_mmc_desc_chain_transfer(mmc, cmd_cfg); 63979ed05e3SHeiner Kallweit return; 64079ed05e3SHeiner Kallweit } 64179ed05e3SHeiner Kallweit 64200412ddcSHeiner Kallweit if (data->blocks > 1) { 643a322febeSHeiner Kallweit cmd_cfg |= CMD_CFG_BLOCK_MODE; 6441231e7ebSHeiner Kallweit cmd_cfg |= FIELD_PREP(CMD_CFG_LENGTH_MASK, 6451231e7ebSHeiner Kallweit data->blocks); 6463d03f6a9SHeiner Kallweit meson_mmc_set_blksz(mmc, data->blksz); 64751c5d844SKevin Hilman } else { 6481231e7ebSHeiner Kallweit cmd_cfg |= FIELD_PREP(CMD_CFG_LENGTH_MASK, data->blksz); 64951c5d844SKevin Hilman } 65051c5d844SKevin Hilman 65100412ddcSHeiner Kallweit xfer_bytes = data->blksz * data->blocks; 65200412ddcSHeiner Kallweit if (data->flags & MMC_DATA_WRITE) { 653a322febeSHeiner Kallweit cmd_cfg |= CMD_CFG_DATA_WR; 65451c5d844SKevin Hilman WARN_ON(xfer_bytes > host->bounce_buf_size); 65500412ddcSHeiner Kallweit sg_copy_to_buffer(data->sg, data->sg_len, 65651c5d844SKevin Hilman host->bounce_buf, xfer_bytes); 65751c5d844SKevin Hilman dma_wmb(); 65851c5d844SKevin Hilman } 65951c5d844SKevin Hilman 660a322febeSHeiner Kallweit cmd_data = host->bounce_dma_addr & CMD_DATA_MASK; 66151c5d844SKevin Hilman } else { 6621231e7ebSHeiner Kallweit cmd_cfg |= FIELD_PREP(CMD_CFG_TIMEOUT_MASK, 6631231e7ebSHeiner Kallweit ilog2(SD_EMMC_CMD_TIMEOUT)); 66451c5d844SKevin Hilman } 66551c5d844SKevin Hilman 66651c5d844SKevin Hilman /* Last descriptor */ 667a322febeSHeiner Kallweit cmd_cfg |= CMD_CFG_END_OF_CHAIN; 668a322febeSHeiner Kallweit writel(cmd_cfg, host->regs + SD_EMMC_CMD_CFG); 669a322febeSHeiner Kallweit writel(cmd_data, host->regs + SD_EMMC_CMD_DAT); 670a322febeSHeiner Kallweit writel(0, host->regs + SD_EMMC_CMD_RSP); 67151c5d844SKevin Hilman wmb(); /* ensure descriptor is written before kicked */ 672a322febeSHeiner Kallweit writel(cmd->arg, host->regs + SD_EMMC_CMD_ARG); 67351c5d844SKevin Hilman } 67451c5d844SKevin Hilman 67551c5d844SKevin Hilman static void meson_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq) 67651c5d844SKevin Hilman { 67751c5d844SKevin Hilman struct meson_host *host = mmc_priv(mmc); 67879ed05e3SHeiner Kallweit bool needs_pre_post_req = mrq->data && 67979ed05e3SHeiner Kallweit !(mrq->data->host_cookie & SD_EMMC_PRE_REQ_DONE); 68079ed05e3SHeiner Kallweit 68179ed05e3SHeiner Kallweit if (needs_pre_post_req) { 68279ed05e3SHeiner Kallweit meson_mmc_get_transfer_mode(mmc, mrq); 68379ed05e3SHeiner Kallweit if (!meson_mmc_desc_chain_mode(mrq->data)) 68479ed05e3SHeiner Kallweit needs_pre_post_req = false; 68579ed05e3SHeiner Kallweit } 68679ed05e3SHeiner Kallweit 68779ed05e3SHeiner Kallweit if (needs_pre_post_req) 68879ed05e3SHeiner Kallweit meson_mmc_pre_req(mmc, mrq); 68951c5d844SKevin Hilman 69051c5d844SKevin Hilman /* Stop execution */ 69151c5d844SKevin Hilman writel(0, host->regs + SD_EMMC_START); 69251c5d844SKevin Hilman 69379ed05e3SHeiner Kallweit meson_mmc_start_cmd(mmc, mrq->sbc ?: mrq->cmd); 69479ed05e3SHeiner Kallweit 69579ed05e3SHeiner Kallweit if (needs_pre_post_req) 69679ed05e3SHeiner Kallweit meson_mmc_post_req(mmc, mrq, 0); 69751c5d844SKevin Hilman } 69851c5d844SKevin Hilman 6993d6c991bSHeiner Kallweit static void meson_mmc_read_resp(struct mmc_host *mmc, struct mmc_command *cmd) 70051c5d844SKevin Hilman { 70151c5d844SKevin Hilman struct meson_host *host = mmc_priv(mmc); 70251c5d844SKevin Hilman 70351c5d844SKevin Hilman if (cmd->flags & MMC_RSP_136) { 70451c5d844SKevin Hilman cmd->resp[0] = readl(host->regs + SD_EMMC_CMD_RSP3); 70551c5d844SKevin Hilman cmd->resp[1] = readl(host->regs + SD_EMMC_CMD_RSP2); 70651c5d844SKevin Hilman cmd->resp[2] = readl(host->regs + SD_EMMC_CMD_RSP1); 70751c5d844SKevin Hilman cmd->resp[3] = readl(host->regs + SD_EMMC_CMD_RSP); 70851c5d844SKevin Hilman } else if (cmd->flags & MMC_RSP_PRESENT) { 70951c5d844SKevin Hilman cmd->resp[0] = readl(host->regs + SD_EMMC_CMD_RSP); 71051c5d844SKevin Hilman } 71151c5d844SKevin Hilman } 71251c5d844SKevin Hilman 71351c5d844SKevin Hilman static irqreturn_t meson_mmc_irq(int irq, void *dev_id) 71451c5d844SKevin Hilman { 71551c5d844SKevin Hilman struct meson_host *host = dev_id; 71619a91dd4SHeinrich Schuchardt struct mmc_command *cmd; 7172c8d96a4SHeiner Kallweit struct mmc_data *data; 71851c5d844SKevin Hilman u32 irq_en, status, raw_status; 71951c5d844SKevin Hilman irqreturn_t ret = IRQ_HANDLED; 72051c5d844SKevin Hilman 72151c5d844SKevin Hilman if (WARN_ON(!host)) 72251c5d844SKevin Hilman return IRQ_NONE; 72351c5d844SKevin Hilman 72419a91dd4SHeinrich Schuchardt cmd = host->cmd; 72519a91dd4SHeinrich Schuchardt 72651c5d844SKevin Hilman if (WARN_ON(!cmd)) 72751c5d844SKevin Hilman return IRQ_NONE; 72851c5d844SKevin Hilman 7292c8d96a4SHeiner Kallweit data = cmd->data; 7302c8d96a4SHeiner Kallweit 73151c5d844SKevin Hilman spin_lock(&host->lock); 73251c5d844SKevin Hilman irq_en = readl(host->regs + SD_EMMC_IRQ_EN); 73351c5d844SKevin Hilman raw_status = readl(host->regs + SD_EMMC_STATUS); 73451c5d844SKevin Hilman status = raw_status & irq_en; 73551c5d844SKevin Hilman 73651c5d844SKevin Hilman if (!status) { 73751c5d844SKevin Hilman dev_warn(host->dev, "Spurious IRQ! status=0x%08x, irq_en=0x%08x\n", 73851c5d844SKevin Hilman raw_status, irq_en); 73951c5d844SKevin Hilman ret = IRQ_NONE; 74051c5d844SKevin Hilman goto out; 74151c5d844SKevin Hilman } 74251c5d844SKevin Hilman 7431f8066d9SHeiner Kallweit meson_mmc_read_resp(host->mmc, cmd); 7441f8066d9SHeiner Kallweit 74551c5d844SKevin Hilman cmd->error = 0; 74651c5d844SKevin Hilman if (status & IRQ_RXD_ERR_MASK) { 74751c5d844SKevin Hilman dev_dbg(host->dev, "Unhandled IRQ: RXD error\n"); 74851c5d844SKevin Hilman cmd->error = -EILSEQ; 74951c5d844SKevin Hilman } 75051c5d844SKevin Hilman if (status & IRQ_TXD_ERR) { 75151c5d844SKevin Hilman dev_dbg(host->dev, "Unhandled IRQ: TXD error\n"); 75251c5d844SKevin Hilman cmd->error = -EILSEQ; 75351c5d844SKevin Hilman } 75451c5d844SKevin Hilman if (status & IRQ_DESC_ERR) 75551c5d844SKevin Hilman dev_dbg(host->dev, "Unhandled IRQ: Descriptor error\n"); 75651c5d844SKevin Hilman if (status & IRQ_RESP_ERR) { 75751c5d844SKevin Hilman dev_dbg(host->dev, "Unhandled IRQ: Response error\n"); 75851c5d844SKevin Hilman cmd->error = -EILSEQ; 75951c5d844SKevin Hilman } 76051c5d844SKevin Hilman if (status & IRQ_RESP_TIMEOUT) { 76151c5d844SKevin Hilman dev_dbg(host->dev, "Unhandled IRQ: Response timeout\n"); 76251c5d844SKevin Hilman cmd->error = -ETIMEDOUT; 76351c5d844SKevin Hilman } 76451c5d844SKevin Hilman if (status & IRQ_DESC_TIMEOUT) { 76551c5d844SKevin Hilman dev_dbg(host->dev, "Unhandled IRQ: Descriptor timeout\n"); 76651c5d844SKevin Hilman cmd->error = -ETIMEDOUT; 76751c5d844SKevin Hilman } 76851c5d844SKevin Hilman if (status & IRQ_SDIO) 76951c5d844SKevin Hilman dev_dbg(host->dev, "Unhandled IRQ: SDIO.\n"); 77051c5d844SKevin Hilman 7712c8d96a4SHeiner Kallweit if (status & (IRQ_END_OF_CHAIN | IRQ_RESP_STATUS)) { 7722c8d96a4SHeiner Kallweit if (data && !cmd->error) 7732c8d96a4SHeiner Kallweit data->bytes_xfered = data->blksz * data->blocks; 77479ed05e3SHeiner Kallweit if (meson_mmc_bounce_buf_read(data) || 77579ed05e3SHeiner Kallweit meson_mmc_get_next_command(cmd)) 77651c5d844SKevin Hilman ret = IRQ_WAKE_THREAD; 7772c8d96a4SHeiner Kallweit } else { 77851c5d844SKevin Hilman dev_warn(host->dev, "Unknown IRQ! status=0x%04x: MMC CMD%u arg=0x%08x flags=0x%08x stop=%d\n", 77951c5d844SKevin Hilman status, cmd->opcode, cmd->arg, 7807cdcc480SHeiner Kallweit cmd->flags, cmd->mrq->stop ? 1 : 0); 78151c5d844SKevin Hilman if (cmd->data) { 78251c5d844SKevin Hilman struct mmc_data *data = cmd->data; 78351c5d844SKevin Hilman 78451c5d844SKevin Hilman dev_warn(host->dev, "\tblksz %u blocks %u flags 0x%08x (%s%s)", 78551c5d844SKevin Hilman data->blksz, data->blocks, data->flags, 78651c5d844SKevin Hilman data->flags & MMC_DATA_WRITE ? "write" : "", 78751c5d844SKevin Hilman data->flags & MMC_DATA_READ ? "read" : ""); 78851c5d844SKevin Hilman } 78951c5d844SKevin Hilman } 79051c5d844SKevin Hilman 79151c5d844SKevin Hilman out: 79251c5d844SKevin Hilman /* ack all (enabled) interrupts */ 79351c5d844SKevin Hilman writel(status, host->regs + SD_EMMC_STATUS); 79451c5d844SKevin Hilman 7951f8066d9SHeiner Kallweit if (ret == IRQ_HANDLED) 79651c5d844SKevin Hilman meson_mmc_request_done(host->mmc, cmd->mrq); 79751c5d844SKevin Hilman 79851c5d844SKevin Hilman spin_unlock(&host->lock); 79951c5d844SKevin Hilman return ret; 80051c5d844SKevin Hilman } 80151c5d844SKevin Hilman 80251c5d844SKevin Hilman static irqreturn_t meson_mmc_irq_thread(int irq, void *dev_id) 80351c5d844SKevin Hilman { 80451c5d844SKevin Hilman struct meson_host *host = dev_id; 805e5e4a3ebSHeiner Kallweit struct mmc_command *next_cmd, *cmd = host->cmd; 80651c5d844SKevin Hilman struct mmc_data *data; 80751c5d844SKevin Hilman unsigned int xfer_bytes; 80851c5d844SKevin Hilman 80951c5d844SKevin Hilman if (WARN_ON(!cmd)) 81019a91dd4SHeinrich Schuchardt return IRQ_NONE; 81151c5d844SKevin Hilman 81251c5d844SKevin Hilman data = cmd->data; 81379ed05e3SHeiner Kallweit if (meson_mmc_bounce_buf_read(data)) { 81451c5d844SKevin Hilman xfer_bytes = data->blksz * data->blocks; 81551c5d844SKevin Hilman WARN_ON(xfer_bytes > host->bounce_buf_size); 81651c5d844SKevin Hilman sg_copy_from_buffer(data->sg, data->sg_len, 81751c5d844SKevin Hilman host->bounce_buf, xfer_bytes); 81851c5d844SKevin Hilman } 81951c5d844SKevin Hilman 820e5e4a3ebSHeiner Kallweit next_cmd = meson_mmc_get_next_command(cmd); 821e5e4a3ebSHeiner Kallweit if (next_cmd) 822e5e4a3ebSHeiner Kallweit meson_mmc_start_cmd(host->mmc, next_cmd); 82351c5d844SKevin Hilman else 824e5e4a3ebSHeiner Kallweit meson_mmc_request_done(host->mmc, cmd->mrq); 82551c5d844SKevin Hilman 826690f90b6SHeiner Kallweit return IRQ_HANDLED; 82751c5d844SKevin Hilman } 82851c5d844SKevin Hilman 8290b6ed71cSHeiner Kallweit static int meson_mmc_execute_tuning(struct mmc_host *mmc, u32 opcode) 8300b6ed71cSHeiner Kallweit { 8310b6ed71cSHeiner Kallweit struct meson_host *host = mmc_priv(mmc); 8320b6ed71cSHeiner Kallweit struct meson_tuning_params tp_old = host->tp; 8330b6ed71cSHeiner Kallweit int ret = -EINVAL, i, cmd_error; 8340b6ed71cSHeiner Kallweit 8350b6ed71cSHeiner Kallweit dev_info(mmc_dev(mmc), "(re)tuning...\n"); 8360b6ed71cSHeiner Kallweit 8370b6ed71cSHeiner Kallweit for (i = CLK_PHASE_0; i <= CLK_PHASE_270; i++) { 8380b6ed71cSHeiner Kallweit host->tp.rx_phase = i; 8390b6ed71cSHeiner Kallweit /* exclude the active parameter set if retuning */ 8400b6ed71cSHeiner Kallweit if (!memcmp(&tp_old, &host->tp, sizeof(tp_old)) && 8410b6ed71cSHeiner Kallweit mmc->doing_retune) 8420b6ed71cSHeiner Kallweit continue; 8430b6ed71cSHeiner Kallweit meson_mmc_set_tuning_params(mmc); 8440b6ed71cSHeiner Kallweit ret = mmc_send_tuning(mmc, opcode, &cmd_error); 8450b6ed71cSHeiner Kallweit if (!ret) 8460b6ed71cSHeiner Kallweit break; 8470b6ed71cSHeiner Kallweit } 8480b6ed71cSHeiner Kallweit 8490b6ed71cSHeiner Kallweit return ret; 8500b6ed71cSHeiner Kallweit } 8510b6ed71cSHeiner Kallweit 85251c5d844SKevin Hilman /* 85351c5d844SKevin Hilman * NOTE: we only need this until the GPIO/pinctrl driver can handle 85451c5d844SKevin Hilman * interrupts. For now, the MMC core will use this for polling. 85551c5d844SKevin Hilman */ 85651c5d844SKevin Hilman static int meson_mmc_get_cd(struct mmc_host *mmc) 85751c5d844SKevin Hilman { 85851c5d844SKevin Hilman int status = mmc_gpio_get_cd(mmc); 85951c5d844SKevin Hilman 86051c5d844SKevin Hilman if (status == -ENOSYS) 86151c5d844SKevin Hilman return 1; /* assume present */ 86251c5d844SKevin Hilman 86351c5d844SKevin Hilman return status; 86451c5d844SKevin Hilman } 86551c5d844SKevin Hilman 866c01d1219SHeiner Kallweit static void meson_mmc_cfg_init(struct meson_host *host) 867c01d1219SHeiner Kallweit { 868c01d1219SHeiner Kallweit u32 cfg = 0; 869c01d1219SHeiner Kallweit 8701231e7ebSHeiner Kallweit cfg |= FIELD_PREP(CFG_RESP_TIMEOUT_MASK, 8711231e7ebSHeiner Kallweit ilog2(SD_EMMC_CFG_RESP_TIMEOUT)); 8721231e7ebSHeiner Kallweit cfg |= FIELD_PREP(CFG_RC_CC_MASK, ilog2(SD_EMMC_CFG_CMD_GAP)); 8731231e7ebSHeiner Kallweit cfg |= FIELD_PREP(CFG_BLK_LEN_MASK, ilog2(SD_EMMC_CFG_BLK_SIZE)); 874c01d1219SHeiner Kallweit 875c01d1219SHeiner Kallweit writel(cfg, host->regs + SD_EMMC_CFG); 876c01d1219SHeiner Kallweit } 877c01d1219SHeiner Kallweit 87851c5d844SKevin Hilman static const struct mmc_host_ops meson_mmc_ops = { 87951c5d844SKevin Hilman .request = meson_mmc_request, 88051c5d844SKevin Hilman .set_ios = meson_mmc_set_ios, 88151c5d844SKevin Hilman .get_cd = meson_mmc_get_cd, 88279ed05e3SHeiner Kallweit .pre_req = meson_mmc_pre_req, 88379ed05e3SHeiner Kallweit .post_req = meson_mmc_post_req, 8840b6ed71cSHeiner Kallweit .execute_tuning = meson_mmc_execute_tuning, 88551c5d844SKevin Hilman }; 88651c5d844SKevin Hilman 88751c5d844SKevin Hilman static int meson_mmc_probe(struct platform_device *pdev) 88851c5d844SKevin Hilman { 88951c5d844SKevin Hilman struct resource *res; 89051c5d844SKevin Hilman struct meson_host *host; 89151c5d844SKevin Hilman struct mmc_host *mmc; 8929a1da4dfSHeiner Kallweit int ret, irq; 89351c5d844SKevin Hilman 89451c5d844SKevin Hilman mmc = mmc_alloc_host(sizeof(struct meson_host), &pdev->dev); 89551c5d844SKevin Hilman if (!mmc) 89651c5d844SKevin Hilman return -ENOMEM; 89751c5d844SKevin Hilman host = mmc_priv(mmc); 89851c5d844SKevin Hilman host->mmc = mmc; 89951c5d844SKevin Hilman host->dev = &pdev->dev; 90051c5d844SKevin Hilman dev_set_drvdata(&pdev->dev, host); 90151c5d844SKevin Hilman 90251c5d844SKevin Hilman spin_lock_init(&host->lock); 90351c5d844SKevin Hilman 90451c5d844SKevin Hilman /* Get regulators and the supported OCR mask */ 90551c5d844SKevin Hilman host->vqmmc_enabled = false; 90651c5d844SKevin Hilman ret = mmc_regulator_get_supply(mmc); 90751c5d844SKevin Hilman if (ret == -EPROBE_DEFER) 90851c5d844SKevin Hilman goto free_host; 90951c5d844SKevin Hilman 91051c5d844SKevin Hilman ret = mmc_of_parse(mmc); 91151c5d844SKevin Hilman if (ret) { 912dc012058SKevin Hilman if (ret != -EPROBE_DEFER) 91351c5d844SKevin Hilman dev_warn(&pdev->dev, "error parsing DT: %d\n", ret); 91451c5d844SKevin Hilman goto free_host; 91551c5d844SKevin Hilman } 91651c5d844SKevin Hilman 91751c5d844SKevin Hilman res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 91851c5d844SKevin Hilman host->regs = devm_ioremap_resource(&pdev->dev, res); 91951c5d844SKevin Hilman if (IS_ERR(host->regs)) { 92051c5d844SKevin Hilman ret = PTR_ERR(host->regs); 92151c5d844SKevin Hilman goto free_host; 92251c5d844SKevin Hilman } 92351c5d844SKevin Hilman 9249a1da4dfSHeiner Kallweit irq = platform_get_irq(pdev, 0); 9259a1da4dfSHeiner Kallweit if (!irq) { 92651c5d844SKevin Hilman dev_err(&pdev->dev, "failed to get interrupt resource.\n"); 92751c5d844SKevin Hilman ret = -EINVAL; 92851c5d844SKevin Hilman goto free_host; 92951c5d844SKevin Hilman } 93051c5d844SKevin Hilman 93151c5d844SKevin Hilman host->core_clk = devm_clk_get(&pdev->dev, "core"); 93251c5d844SKevin Hilman if (IS_ERR(host->core_clk)) { 93351c5d844SKevin Hilman ret = PTR_ERR(host->core_clk); 93451c5d844SKevin Hilman goto free_host; 93551c5d844SKevin Hilman } 93651c5d844SKevin Hilman 93751c5d844SKevin Hilman ret = clk_prepare_enable(host->core_clk); 93851c5d844SKevin Hilman if (ret) 93951c5d844SKevin Hilman goto free_host; 94051c5d844SKevin Hilman 941c08bcb6cSHeiner Kallweit host->tp.core_phase = CLK_PHASE_180; 942c08bcb6cSHeiner Kallweit host->tp.tx_phase = CLK_PHASE_0; 943c08bcb6cSHeiner Kallweit host->tp.rx_phase = CLK_PHASE_0; 944c08bcb6cSHeiner Kallweit 94551c5d844SKevin Hilman ret = meson_mmc_clk_init(host); 94651c5d844SKevin Hilman if (ret) 947ce473d5bSMichał Zegan goto err_core_clk; 94851c5d844SKevin Hilman 9493c39e2caSJerome Brunet /* set config to sane default */ 9503c39e2caSJerome Brunet meson_mmc_cfg_init(host); 9513c39e2caSJerome Brunet 95251c5d844SKevin Hilman /* Stop execution */ 95351c5d844SKevin Hilman writel(0, host->regs + SD_EMMC_START); 95451c5d844SKevin Hilman 95551c5d844SKevin Hilman /* clear, ack, enable all interrupts */ 95651c5d844SKevin Hilman writel(0, host->regs + SD_EMMC_IRQ_EN); 95751c5d844SKevin Hilman writel(IRQ_EN_MASK, host->regs + SD_EMMC_STATUS); 95892763b99SHeiner Kallweit writel(IRQ_EN_MASK, host->regs + SD_EMMC_IRQ_EN); 95951c5d844SKevin Hilman 9609a1da4dfSHeiner Kallweit ret = devm_request_threaded_irq(&pdev->dev, irq, meson_mmc_irq, 9619a1da4dfSHeiner Kallweit meson_mmc_irq_thread, IRQF_SHARED, 962f016c676SHeiner Kallweit NULL, host); 96351c5d844SKevin Hilman if (ret) 964cac3a478SHeiner Kallweit goto err_div_clk; 96551c5d844SKevin Hilman 966e5e4a3ebSHeiner Kallweit mmc->caps |= MMC_CAP_CMD23; 967efe0b669SHeiner Kallweit mmc->max_blk_count = CMD_CFG_LENGTH_MASK; 968efe0b669SHeiner Kallweit mmc->max_req_size = mmc->max_blk_count * mmc->max_blk_size; 96979ed05e3SHeiner Kallweit mmc->max_segs = SD_EMMC_DESC_BUF_LEN / sizeof(struct sd_emmc_desc); 97079ed05e3SHeiner Kallweit mmc->max_seg_size = mmc->max_req_size; 971efe0b669SHeiner Kallweit 97251c5d844SKevin Hilman /* data bounce buffer */ 9734136fcb5SHeiner Kallweit host->bounce_buf_size = mmc->max_req_size; 97451c5d844SKevin Hilman host->bounce_buf = 97551c5d844SKevin Hilman dma_alloc_coherent(host->dev, host->bounce_buf_size, 97651c5d844SKevin Hilman &host->bounce_dma_addr, GFP_KERNEL); 97751c5d844SKevin Hilman if (host->bounce_buf == NULL) { 97851c5d844SKevin Hilman dev_err(host->dev, "Unable to map allocate DMA bounce buffer.\n"); 97951c5d844SKevin Hilman ret = -ENOMEM; 980cac3a478SHeiner Kallweit goto err_div_clk; 98151c5d844SKevin Hilman } 98251c5d844SKevin Hilman 98379ed05e3SHeiner Kallweit host->descs = dma_alloc_coherent(host->dev, SD_EMMC_DESC_BUF_LEN, 98479ed05e3SHeiner Kallweit &host->descs_dma_addr, GFP_KERNEL); 98579ed05e3SHeiner Kallweit if (!host->descs) { 98679ed05e3SHeiner Kallweit dev_err(host->dev, "Allocating descriptor DMA buffer failed\n"); 98779ed05e3SHeiner Kallweit ret = -ENOMEM; 98879ed05e3SHeiner Kallweit goto err_bounce_buf; 98979ed05e3SHeiner Kallweit } 99079ed05e3SHeiner Kallweit 99151c5d844SKevin Hilman mmc->ops = &meson_mmc_ops; 99251c5d844SKevin Hilman mmc_add_host(mmc); 99351c5d844SKevin Hilman 99451c5d844SKevin Hilman return 0; 99551c5d844SKevin Hilman 99679ed05e3SHeiner Kallweit err_bounce_buf: 99779ed05e3SHeiner Kallweit dma_free_coherent(host->dev, host->bounce_buf_size, 99879ed05e3SHeiner Kallweit host->bounce_buf, host->bounce_dma_addr); 999cac3a478SHeiner Kallweit err_div_clk: 100051c5d844SKevin Hilman clk_disable_unprepare(host->cfg_div_clk); 1001ce473d5bSMichał Zegan err_core_clk: 100251c5d844SKevin Hilman clk_disable_unprepare(host->core_clk); 1003ce473d5bSMichał Zegan free_host: 100451c5d844SKevin Hilman mmc_free_host(mmc); 100551c5d844SKevin Hilman return ret; 100651c5d844SKevin Hilman } 100751c5d844SKevin Hilman 100851c5d844SKevin Hilman static int meson_mmc_remove(struct platform_device *pdev) 100951c5d844SKevin Hilman { 101051c5d844SKevin Hilman struct meson_host *host = dev_get_drvdata(&pdev->dev); 101151c5d844SKevin Hilman 1012a01fc2a2SMichał Zegan mmc_remove_host(host->mmc); 1013a01fc2a2SMichał Zegan 101492763b99SHeiner Kallweit /* disable interrupts */ 101592763b99SHeiner Kallweit writel(0, host->regs + SD_EMMC_IRQ_EN); 101692763b99SHeiner Kallweit 101779ed05e3SHeiner Kallweit dma_free_coherent(host->dev, SD_EMMC_DESC_BUF_LEN, 101879ed05e3SHeiner Kallweit host->descs, host->descs_dma_addr); 101951c5d844SKevin Hilman dma_free_coherent(host->dev, host->bounce_buf_size, 102051c5d844SKevin Hilman host->bounce_buf, host->bounce_dma_addr); 102151c5d844SKevin Hilman 102251c5d844SKevin Hilman clk_disable_unprepare(host->cfg_div_clk); 102351c5d844SKevin Hilman clk_disable_unprepare(host->core_clk); 102451c5d844SKevin Hilman 102551c5d844SKevin Hilman mmc_free_host(host->mmc); 102651c5d844SKevin Hilman return 0; 102751c5d844SKevin Hilman } 102851c5d844SKevin Hilman 102951c5d844SKevin Hilman static const struct of_device_id meson_mmc_of_match[] = { 103051c5d844SKevin Hilman { .compatible = "amlogic,meson-gx-mmc", }, 103151c5d844SKevin Hilman { .compatible = "amlogic,meson-gxbb-mmc", }, 103251c5d844SKevin Hilman { .compatible = "amlogic,meson-gxl-mmc", }, 103351c5d844SKevin Hilman { .compatible = "amlogic,meson-gxm-mmc", }, 103451c5d844SKevin Hilman {} 103551c5d844SKevin Hilman }; 103651c5d844SKevin Hilman MODULE_DEVICE_TABLE(of, meson_mmc_of_match); 103751c5d844SKevin Hilman 103851c5d844SKevin Hilman static struct platform_driver meson_mmc_driver = { 103951c5d844SKevin Hilman .probe = meson_mmc_probe, 104051c5d844SKevin Hilman .remove = meson_mmc_remove, 104151c5d844SKevin Hilman .driver = { 104251c5d844SKevin Hilman .name = DRIVER_NAME, 104351c5d844SKevin Hilman .of_match_table = of_match_ptr(meson_mmc_of_match), 104451c5d844SKevin Hilman }, 104551c5d844SKevin Hilman }; 104651c5d844SKevin Hilman 104751c5d844SKevin Hilman module_platform_driver(meson_mmc_driver); 104851c5d844SKevin Hilman 104951c5d844SKevin Hilman MODULE_DESCRIPTION("Amlogic S905*/GX* SD/eMMC driver"); 105051c5d844SKevin Hilman MODULE_AUTHOR("Kevin Hilman <khilman@baylibre.com>"); 105151c5d844SKevin Hilman MODULE_LICENSE("GPL v2"); 1052