1ba3869ffSJan Glauber /*
2ba3869ffSJan Glauber * Shared part of driver for MMC/SDHC controller on Cavium OCTEON and
3ba3869ffSJan Glauber * ThunderX SOCs.
4ba3869ffSJan Glauber *
5ba3869ffSJan Glauber * This file is subject to the terms and conditions of the GNU General Public
6ba3869ffSJan Glauber * License. See the file "COPYING" in the main directory of this archive
7ba3869ffSJan Glauber * for more details.
8ba3869ffSJan Glauber *
9ba3869ffSJan Glauber * Copyright (C) 2012-2017 Cavium Inc.
10ba3869ffSJan Glauber * Authors:
11ba3869ffSJan Glauber * David Daney <david.daney@cavium.com>
12ba3869ffSJan Glauber * Peter Swain <pswain@cavium.com>
13ba3869ffSJan Glauber * Steven J. Hill <steven.hill@cavium.com>
14ba3869ffSJan Glauber * Jan Glauber <jglauber@cavium.com>
15ba3869ffSJan Glauber */
16ba3869ffSJan Glauber #include <linux/bitfield.h>
17ba3869ffSJan Glauber #include <linux/delay.h>
18ba3869ffSJan Glauber #include <linux/dma-direction.h>
19ba3869ffSJan Glauber #include <linux/dma-mapping.h>
20ba3869ffSJan Glauber #include <linux/gpio/consumer.h>
21ba3869ffSJan Glauber #include <linux/interrupt.h>
22ba3869ffSJan Glauber #include <linux/mmc/mmc.h>
23ba3869ffSJan Glauber #include <linux/mmc/slot-gpio.h>
24ba3869ffSJan Glauber #include <linux/module.h>
25ba3869ffSJan Glauber #include <linux/regulator/consumer.h>
26ba3869ffSJan Glauber #include <linux/scatterlist.h>
27ba3869ffSJan Glauber #include <linux/time.h>
28ba3869ffSJan Glauber
29ba3869ffSJan Glauber #include "cavium.h"
30ba3869ffSJan Glauber
31ba3869ffSJan Glauber const char *cvm_mmc_irq_names[] = {
32ba3869ffSJan Glauber "MMC Buffer",
33ba3869ffSJan Glauber "MMC Command",
34ba3869ffSJan Glauber "MMC DMA",
35ba3869ffSJan Glauber "MMC Command Error",
36ba3869ffSJan Glauber "MMC DMA Error",
37ba3869ffSJan Glauber "MMC Switch",
38ba3869ffSJan Glauber "MMC Switch Error",
39ba3869ffSJan Glauber "MMC DMA int Fifo",
40ba3869ffSJan Glauber "MMC DMA int",
41ba3869ffSJan Glauber };
42ba3869ffSJan Glauber
43ba3869ffSJan Glauber /*
44ba3869ffSJan Glauber * The Cavium MMC host hardware assumes that all commands have fixed
45ba3869ffSJan Glauber * command and response types. These are correct if MMC devices are
46ba3869ffSJan Glauber * being used. However, non-MMC devices like SD use command and
47ba3869ffSJan Glauber * response types that are unexpected by the host hardware.
48ba3869ffSJan Glauber *
49ba3869ffSJan Glauber * The command and response types can be overridden by supplying an
50ba3869ffSJan Glauber * XOR value that is applied to the type. We calculate the XOR value
51ba3869ffSJan Glauber * from the values in this table and the flags passed from the MMC
52ba3869ffSJan Glauber * core.
53ba3869ffSJan Glauber */
54ba3869ffSJan Glauber static struct cvm_mmc_cr_type cvm_mmc_cr_types[] = {
55ba3869ffSJan Glauber {0, 0}, /* CMD0 */
56ba3869ffSJan Glauber {0, 3}, /* CMD1 */
57ba3869ffSJan Glauber {0, 2}, /* CMD2 */
58ba3869ffSJan Glauber {0, 1}, /* CMD3 */
59ba3869ffSJan Glauber {0, 0}, /* CMD4 */
60ba3869ffSJan Glauber {0, 1}, /* CMD5 */
61ba3869ffSJan Glauber {0, 1}, /* CMD6 */
62ba3869ffSJan Glauber {0, 1}, /* CMD7 */
63ba3869ffSJan Glauber {1, 1}, /* CMD8 */
64ba3869ffSJan Glauber {0, 2}, /* CMD9 */
65ba3869ffSJan Glauber {0, 2}, /* CMD10 */
66ba3869ffSJan Glauber {1, 1}, /* CMD11 */
67ba3869ffSJan Glauber {0, 1}, /* CMD12 */
68ba3869ffSJan Glauber {0, 1}, /* CMD13 */
69ba3869ffSJan Glauber {1, 1}, /* CMD14 */
70ba3869ffSJan Glauber {0, 0}, /* CMD15 */
71ba3869ffSJan Glauber {0, 1}, /* CMD16 */
72ba3869ffSJan Glauber {1, 1}, /* CMD17 */
73ba3869ffSJan Glauber {1, 1}, /* CMD18 */
74ba3869ffSJan Glauber {3, 1}, /* CMD19 */
75ba3869ffSJan Glauber {2, 1}, /* CMD20 */
76ba3869ffSJan Glauber {0, 0}, /* CMD21 */
77ba3869ffSJan Glauber {0, 0}, /* CMD22 */
78ba3869ffSJan Glauber {0, 1}, /* CMD23 */
79ba3869ffSJan Glauber {2, 1}, /* CMD24 */
80ba3869ffSJan Glauber {2, 1}, /* CMD25 */
81ba3869ffSJan Glauber {2, 1}, /* CMD26 */
82ba3869ffSJan Glauber {2, 1}, /* CMD27 */
83ba3869ffSJan Glauber {0, 1}, /* CMD28 */
84ba3869ffSJan Glauber {0, 1}, /* CMD29 */
85ba3869ffSJan Glauber {1, 1}, /* CMD30 */
86ba3869ffSJan Glauber {1, 1}, /* CMD31 */
87ba3869ffSJan Glauber {0, 0}, /* CMD32 */
88ba3869ffSJan Glauber {0, 0}, /* CMD33 */
89ba3869ffSJan Glauber {0, 0}, /* CMD34 */
90ba3869ffSJan Glauber {0, 1}, /* CMD35 */
91ba3869ffSJan Glauber {0, 1}, /* CMD36 */
92ba3869ffSJan Glauber {0, 0}, /* CMD37 */
93ba3869ffSJan Glauber {0, 1}, /* CMD38 */
94ba3869ffSJan Glauber {0, 4}, /* CMD39 */
95ba3869ffSJan Glauber {0, 5}, /* CMD40 */
96ba3869ffSJan Glauber {0, 0}, /* CMD41 */
97ba3869ffSJan Glauber {2, 1}, /* CMD42 */
98ba3869ffSJan Glauber {0, 0}, /* CMD43 */
99ba3869ffSJan Glauber {0, 0}, /* CMD44 */
100ba3869ffSJan Glauber {0, 0}, /* CMD45 */
101ba3869ffSJan Glauber {0, 0}, /* CMD46 */
102ba3869ffSJan Glauber {0, 0}, /* CMD47 */
103ba3869ffSJan Glauber {0, 0}, /* CMD48 */
104ba3869ffSJan Glauber {0, 0}, /* CMD49 */
105ba3869ffSJan Glauber {0, 0}, /* CMD50 */
106ba3869ffSJan Glauber {0, 0}, /* CMD51 */
107ba3869ffSJan Glauber {0, 0}, /* CMD52 */
108ba3869ffSJan Glauber {0, 0}, /* CMD53 */
109ba3869ffSJan Glauber {0, 0}, /* CMD54 */
110ba3869ffSJan Glauber {0, 1}, /* CMD55 */
111ba3869ffSJan Glauber {0xff, 0xff}, /* CMD56 */
112ba3869ffSJan Glauber {0, 0}, /* CMD57 */
113ba3869ffSJan Glauber {0, 0}, /* CMD58 */
114ba3869ffSJan Glauber {0, 0}, /* CMD59 */
115ba3869ffSJan Glauber {0, 0}, /* CMD60 */
116ba3869ffSJan Glauber {0, 0}, /* CMD61 */
117ba3869ffSJan Glauber {0, 0}, /* CMD62 */
118ba3869ffSJan Glauber {0, 0} /* CMD63 */
119ba3869ffSJan Glauber };
120ba3869ffSJan Glauber
cvm_mmc_get_cr_mods(struct mmc_command * cmd)121ba3869ffSJan Glauber static struct cvm_mmc_cr_mods cvm_mmc_get_cr_mods(struct mmc_command *cmd)
122ba3869ffSJan Glauber {
123ba3869ffSJan Glauber struct cvm_mmc_cr_type *cr;
124ba3869ffSJan Glauber u8 hardware_ctype, hardware_rtype;
125ba3869ffSJan Glauber u8 desired_ctype = 0, desired_rtype = 0;
126ba3869ffSJan Glauber struct cvm_mmc_cr_mods r;
127ba3869ffSJan Glauber
128ba3869ffSJan Glauber cr = cvm_mmc_cr_types + (cmd->opcode & 0x3f);
129ba3869ffSJan Glauber hardware_ctype = cr->ctype;
130ba3869ffSJan Glauber hardware_rtype = cr->rtype;
131ba3869ffSJan Glauber if (cmd->opcode == MMC_GEN_CMD)
132ba3869ffSJan Glauber hardware_ctype = (cmd->arg & 1) ? 1 : 2;
133ba3869ffSJan Glauber
134ba3869ffSJan Glauber switch (mmc_cmd_type(cmd)) {
135ba3869ffSJan Glauber case MMC_CMD_ADTC:
136ba3869ffSJan Glauber desired_ctype = (cmd->data->flags & MMC_DATA_WRITE) ? 2 : 1;
137ba3869ffSJan Glauber break;
138ba3869ffSJan Glauber case MMC_CMD_AC:
139ba3869ffSJan Glauber case MMC_CMD_BC:
140ba3869ffSJan Glauber case MMC_CMD_BCR:
141ba3869ffSJan Glauber desired_ctype = 0;
142ba3869ffSJan Glauber break;
143ba3869ffSJan Glauber }
144ba3869ffSJan Glauber
145ba3869ffSJan Glauber switch (mmc_resp_type(cmd)) {
146ba3869ffSJan Glauber case MMC_RSP_NONE:
147ba3869ffSJan Glauber desired_rtype = 0;
148ba3869ffSJan Glauber break;
149ba3869ffSJan Glauber case MMC_RSP_R1:/* MMC_RSP_R5, MMC_RSP_R6, MMC_RSP_R7 */
150ba3869ffSJan Glauber case MMC_RSP_R1B:
151ba3869ffSJan Glauber desired_rtype = 1;
152ba3869ffSJan Glauber break;
153ba3869ffSJan Glauber case MMC_RSP_R2:
154ba3869ffSJan Glauber desired_rtype = 2;
155ba3869ffSJan Glauber break;
156ba3869ffSJan Glauber case MMC_RSP_R3: /* MMC_RSP_R4 */
157ba3869ffSJan Glauber desired_rtype = 3;
158ba3869ffSJan Glauber break;
159ba3869ffSJan Glauber }
160ba3869ffSJan Glauber r.ctype_xor = desired_ctype ^ hardware_ctype;
161ba3869ffSJan Glauber r.rtype_xor = desired_rtype ^ hardware_rtype;
162ba3869ffSJan Glauber return r;
163ba3869ffSJan Glauber }
164ba3869ffSJan Glauber
check_switch_errors(struct cvm_mmc_host * host)165ba3869ffSJan Glauber static void check_switch_errors(struct cvm_mmc_host *host)
166ba3869ffSJan Glauber {
167ba3869ffSJan Glauber u64 emm_switch;
168ba3869ffSJan Glauber
169ba3869ffSJan Glauber emm_switch = readq(host->base + MIO_EMM_SWITCH(host));
170ba3869ffSJan Glauber if (emm_switch & MIO_EMM_SWITCH_ERR0)
171ba3869ffSJan Glauber dev_err(host->dev, "Switch power class error\n");
172ba3869ffSJan Glauber if (emm_switch & MIO_EMM_SWITCH_ERR1)
173ba3869ffSJan Glauber dev_err(host->dev, "Switch hs timing error\n");
174ba3869ffSJan Glauber if (emm_switch & MIO_EMM_SWITCH_ERR2)
175ba3869ffSJan Glauber dev_err(host->dev, "Switch bus width error\n");
176ba3869ffSJan Glauber }
177ba3869ffSJan Glauber
clear_bus_id(u64 * reg)178ba3869ffSJan Glauber static void clear_bus_id(u64 *reg)
179ba3869ffSJan Glauber {
180ba3869ffSJan Glauber u64 bus_id_mask = GENMASK_ULL(61, 60);
181ba3869ffSJan Glauber
182ba3869ffSJan Glauber *reg &= ~bus_id_mask;
183ba3869ffSJan Glauber }
184ba3869ffSJan Glauber
set_bus_id(u64 * reg,int bus_id)185ba3869ffSJan Glauber static void set_bus_id(u64 *reg, int bus_id)
186ba3869ffSJan Glauber {
187ba3869ffSJan Glauber clear_bus_id(reg);
188ba3869ffSJan Glauber *reg |= FIELD_PREP(GENMASK(61, 60), bus_id);
189ba3869ffSJan Glauber }
190ba3869ffSJan Glauber
get_bus_id(u64 reg)191ba3869ffSJan Glauber static int get_bus_id(u64 reg)
192ba3869ffSJan Glauber {
193ba3869ffSJan Glauber return FIELD_GET(GENMASK_ULL(61, 60), reg);
194ba3869ffSJan Glauber }
195ba3869ffSJan Glauber
196ba3869ffSJan Glauber /*
197ba3869ffSJan Glauber * We never set the switch_exe bit since that would interfere
198ba3869ffSJan Glauber * with the commands send by the MMC core.
199ba3869ffSJan Glauber */
do_switch(struct cvm_mmc_host * host,u64 emm_switch)200ba3869ffSJan Glauber static void do_switch(struct cvm_mmc_host *host, u64 emm_switch)
201ba3869ffSJan Glauber {
202ba3869ffSJan Glauber int retries = 100;
203ba3869ffSJan Glauber u64 rsp_sts;
204ba3869ffSJan Glauber int bus_id;
205ba3869ffSJan Glauber
206ba3869ffSJan Glauber /*
207ba3869ffSJan Glauber * Modes setting only taken from slot 0. Work around that hardware
208ba3869ffSJan Glauber * issue by first switching to slot 0.
209ba3869ffSJan Glauber */
210ba3869ffSJan Glauber bus_id = get_bus_id(emm_switch);
211ba3869ffSJan Glauber clear_bus_id(&emm_switch);
212ba3869ffSJan Glauber writeq(emm_switch, host->base + MIO_EMM_SWITCH(host));
213ba3869ffSJan Glauber
214ba3869ffSJan Glauber set_bus_id(&emm_switch, bus_id);
215ba3869ffSJan Glauber writeq(emm_switch, host->base + MIO_EMM_SWITCH(host));
216ba3869ffSJan Glauber
217ba3869ffSJan Glauber /* wait for the switch to finish */
218ba3869ffSJan Glauber do {
219ba3869ffSJan Glauber rsp_sts = readq(host->base + MIO_EMM_RSP_STS(host));
220ba3869ffSJan Glauber if (!(rsp_sts & MIO_EMM_RSP_STS_SWITCH_VAL))
221ba3869ffSJan Glauber break;
222ba3869ffSJan Glauber udelay(10);
223ba3869ffSJan Glauber } while (--retries);
224ba3869ffSJan Glauber
225ba3869ffSJan Glauber check_switch_errors(host);
226ba3869ffSJan Glauber }
227ba3869ffSJan Glauber
switch_val_changed(struct cvm_mmc_slot * slot,u64 new_val)228ba3869ffSJan Glauber static bool switch_val_changed(struct cvm_mmc_slot *slot, u64 new_val)
229ba3869ffSJan Glauber {
230ba3869ffSJan Glauber /* Match BUS_ID, HS_TIMING, BUS_WIDTH, POWER_CLASS, CLK_HI, CLK_LO */
231ba3869ffSJan Glauber u64 match = 0x3001070fffffffffull;
232ba3869ffSJan Glauber
233ba3869ffSJan Glauber return (slot->cached_switch & match) != (new_val & match);
234ba3869ffSJan Glauber }
235ba3869ffSJan Glauber
set_wdog(struct cvm_mmc_slot * slot,unsigned int ns)236ba3869ffSJan Glauber static void set_wdog(struct cvm_mmc_slot *slot, unsigned int ns)
237ba3869ffSJan Glauber {
238ba3869ffSJan Glauber u64 timeout;
239ba3869ffSJan Glauber
240ba3869ffSJan Glauber if (!slot->clock)
241ba3869ffSJan Glauber return;
242ba3869ffSJan Glauber
243ba3869ffSJan Glauber if (ns)
244ba3869ffSJan Glauber timeout = (slot->clock * ns) / NSEC_PER_SEC;
245ba3869ffSJan Glauber else
246ba3869ffSJan Glauber timeout = (slot->clock * 850ull) / 1000ull;
247ba3869ffSJan Glauber writeq(timeout, slot->host->base + MIO_EMM_WDOG(slot->host));
248ba3869ffSJan Glauber }
249ba3869ffSJan Glauber
cvm_mmc_reset_bus(struct cvm_mmc_slot * slot)250ba3869ffSJan Glauber static void cvm_mmc_reset_bus(struct cvm_mmc_slot *slot)
251ba3869ffSJan Glauber {
252ba3869ffSJan Glauber struct cvm_mmc_host *host = slot->host;
253ba3869ffSJan Glauber u64 emm_switch, wdog;
254ba3869ffSJan Glauber
255ba3869ffSJan Glauber emm_switch = readq(slot->host->base + MIO_EMM_SWITCH(host));
256ba3869ffSJan Glauber emm_switch &= ~(MIO_EMM_SWITCH_EXE | MIO_EMM_SWITCH_ERR0 |
257ba3869ffSJan Glauber MIO_EMM_SWITCH_ERR1 | MIO_EMM_SWITCH_ERR2);
258ba3869ffSJan Glauber set_bus_id(&emm_switch, slot->bus_id);
259ba3869ffSJan Glauber
260ba3869ffSJan Glauber wdog = readq(slot->host->base + MIO_EMM_WDOG(host));
261ba3869ffSJan Glauber do_switch(slot->host, emm_switch);
262ba3869ffSJan Glauber
263ba3869ffSJan Glauber slot->cached_switch = emm_switch;
264ba3869ffSJan Glauber
265ba3869ffSJan Glauber msleep(20);
266ba3869ffSJan Glauber
267ba3869ffSJan Glauber writeq(wdog, slot->host->base + MIO_EMM_WDOG(host));
268ba3869ffSJan Glauber }
269ba3869ffSJan Glauber
270ba3869ffSJan Glauber /* Switch to another slot if needed */
cvm_mmc_switch_to(struct cvm_mmc_slot * slot)271ba3869ffSJan Glauber static void cvm_mmc_switch_to(struct cvm_mmc_slot *slot)
272ba3869ffSJan Glauber {
273ba3869ffSJan Glauber struct cvm_mmc_host *host = slot->host;
274ba3869ffSJan Glauber struct cvm_mmc_slot *old_slot;
275ba3869ffSJan Glauber u64 emm_sample, emm_switch;
276ba3869ffSJan Glauber
277ba3869ffSJan Glauber if (slot->bus_id == host->last_slot)
278ba3869ffSJan Glauber return;
279ba3869ffSJan Glauber
280ba3869ffSJan Glauber if (host->last_slot >= 0 && host->slot[host->last_slot]) {
281ba3869ffSJan Glauber old_slot = host->slot[host->last_slot];
282ba3869ffSJan Glauber old_slot->cached_switch = readq(host->base + MIO_EMM_SWITCH(host));
283ba3869ffSJan Glauber old_slot->cached_rca = readq(host->base + MIO_EMM_RCA(host));
284ba3869ffSJan Glauber }
285ba3869ffSJan Glauber
286ba3869ffSJan Glauber writeq(slot->cached_rca, host->base + MIO_EMM_RCA(host));
287ba3869ffSJan Glauber emm_switch = slot->cached_switch;
288ba3869ffSJan Glauber set_bus_id(&emm_switch, slot->bus_id);
289ba3869ffSJan Glauber do_switch(host, emm_switch);
290ba3869ffSJan Glauber
291ba3869ffSJan Glauber emm_sample = FIELD_PREP(MIO_EMM_SAMPLE_CMD_CNT, slot->cmd_cnt) |
292ba3869ffSJan Glauber FIELD_PREP(MIO_EMM_SAMPLE_DAT_CNT, slot->dat_cnt);
293ba3869ffSJan Glauber writeq(emm_sample, host->base + MIO_EMM_SAMPLE(host));
294ba3869ffSJan Glauber
295ba3869ffSJan Glauber host->last_slot = slot->bus_id;
296ba3869ffSJan Glauber }
297ba3869ffSJan Glauber
do_read(struct cvm_mmc_host * host,struct mmc_request * req,u64 dbuf)298ba3869ffSJan Glauber static void do_read(struct cvm_mmc_host *host, struct mmc_request *req,
299ba3869ffSJan Glauber u64 dbuf)
300ba3869ffSJan Glauber {
301ba3869ffSJan Glauber struct sg_mapping_iter *smi = &host->smi;
302ba3869ffSJan Glauber int data_len = req->data->blocks * req->data->blksz;
303ba3869ffSJan Glauber int bytes_xfered, shift = -1;
304ba3869ffSJan Glauber u64 dat = 0;
305ba3869ffSJan Glauber
306ba3869ffSJan Glauber /* Auto inc from offset zero */
307ba3869ffSJan Glauber writeq((0x10000 | (dbuf << 6)), host->base + MIO_EMM_BUF_IDX(host));
308ba3869ffSJan Glauber
309ba3869ffSJan Glauber for (bytes_xfered = 0; bytes_xfered < data_len;) {
310ba3869ffSJan Glauber if (smi->consumed >= smi->length) {
311ba3869ffSJan Glauber if (!sg_miter_next(smi))
312ba3869ffSJan Glauber break;
313ba3869ffSJan Glauber smi->consumed = 0;
314ba3869ffSJan Glauber }
315ba3869ffSJan Glauber
316ba3869ffSJan Glauber if (shift < 0) {
317ba3869ffSJan Glauber dat = readq(host->base + MIO_EMM_BUF_DAT(host));
318ba3869ffSJan Glauber shift = 56;
319ba3869ffSJan Glauber }
320ba3869ffSJan Glauber
321ba3869ffSJan Glauber while (smi->consumed < smi->length && shift >= 0) {
322ba3869ffSJan Glauber ((u8 *)smi->addr)[smi->consumed] = (dat >> shift) & 0xff;
323ba3869ffSJan Glauber bytes_xfered++;
324ba3869ffSJan Glauber smi->consumed++;
325ba3869ffSJan Glauber shift -= 8;
326ba3869ffSJan Glauber }
327ba3869ffSJan Glauber }
328ba3869ffSJan Glauber
329ba3869ffSJan Glauber sg_miter_stop(smi);
330ba3869ffSJan Glauber req->data->bytes_xfered = bytes_xfered;
331ba3869ffSJan Glauber req->data->error = 0;
332ba3869ffSJan Glauber }
333ba3869ffSJan Glauber
do_write(struct mmc_request * req)334ba3869ffSJan Glauber static void do_write(struct mmc_request *req)
335ba3869ffSJan Glauber {
336ba3869ffSJan Glauber req->data->bytes_xfered = req->data->blocks * req->data->blksz;
337ba3869ffSJan Glauber req->data->error = 0;
338ba3869ffSJan Glauber }
339ba3869ffSJan Glauber
set_cmd_response(struct cvm_mmc_host * host,struct mmc_request * req,u64 rsp_sts)340ba3869ffSJan Glauber static void set_cmd_response(struct cvm_mmc_host *host, struct mmc_request *req,
341ba3869ffSJan Glauber u64 rsp_sts)
342ba3869ffSJan Glauber {
343ba3869ffSJan Glauber u64 rsp_hi, rsp_lo;
344ba3869ffSJan Glauber
345ba3869ffSJan Glauber if (!(rsp_sts & MIO_EMM_RSP_STS_RSP_VAL))
346ba3869ffSJan Glauber return;
347ba3869ffSJan Glauber
348ba3869ffSJan Glauber rsp_lo = readq(host->base + MIO_EMM_RSP_LO(host));
349ba3869ffSJan Glauber
350ba3869ffSJan Glauber switch (FIELD_GET(MIO_EMM_RSP_STS_RSP_TYPE, rsp_sts)) {
351ba3869ffSJan Glauber case 1:
352ba3869ffSJan Glauber case 3:
353ba3869ffSJan Glauber req->cmd->resp[0] = (rsp_lo >> 8) & 0xffffffff;
354ba3869ffSJan Glauber req->cmd->resp[1] = 0;
355ba3869ffSJan Glauber req->cmd->resp[2] = 0;
356ba3869ffSJan Glauber req->cmd->resp[3] = 0;
357ba3869ffSJan Glauber break;
358ba3869ffSJan Glauber case 2:
359ba3869ffSJan Glauber req->cmd->resp[3] = rsp_lo & 0xffffffff;
360ba3869ffSJan Glauber req->cmd->resp[2] = (rsp_lo >> 32) & 0xffffffff;
361ba3869ffSJan Glauber rsp_hi = readq(host->base + MIO_EMM_RSP_HI(host));
362ba3869ffSJan Glauber req->cmd->resp[1] = rsp_hi & 0xffffffff;
363ba3869ffSJan Glauber req->cmd->resp[0] = (rsp_hi >> 32) & 0xffffffff;
364ba3869ffSJan Glauber break;
365ba3869ffSJan Glauber }
366ba3869ffSJan Glauber }
367ba3869ffSJan Glauber
get_dma_dir(struct mmc_data * data)368ba3869ffSJan Glauber static int get_dma_dir(struct mmc_data *data)
369ba3869ffSJan Glauber {
370ba3869ffSJan Glauber return (data->flags & MMC_DATA_WRITE) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
371ba3869ffSJan Glauber }
372ba3869ffSJan Glauber
finish_dma_single(struct cvm_mmc_host * host,struct mmc_data * data)373ba3869ffSJan Glauber static int finish_dma_single(struct cvm_mmc_host *host, struct mmc_data *data)
374ba3869ffSJan Glauber {
375ba3869ffSJan Glauber data->bytes_xfered = data->blocks * data->blksz;
376ba3869ffSJan Glauber data->error = 0;
377b803974aSKevin Hao dma_unmap_sg(host->dev, data->sg, data->sg_len, get_dma_dir(data));
378ba3869ffSJan Glauber return 1;
379ba3869ffSJan Glauber }
380ba3869ffSJan Glauber
finish_dma_sg(struct cvm_mmc_host * host,struct mmc_data * data)381cd76e5c5SJan Glauber static int finish_dma_sg(struct cvm_mmc_host *host, struct mmc_data *data)
382cd76e5c5SJan Glauber {
383cd76e5c5SJan Glauber u64 fifo_cfg;
384cd76e5c5SJan Glauber int count;
385cd76e5c5SJan Glauber
386cd76e5c5SJan Glauber /* Check if there are any pending requests left */
387cd76e5c5SJan Glauber fifo_cfg = readq(host->dma_base + MIO_EMM_DMA_FIFO_CFG(host));
388cd76e5c5SJan Glauber count = FIELD_GET(MIO_EMM_DMA_FIFO_CFG_COUNT, fifo_cfg);
389cd76e5c5SJan Glauber if (count)
390cd76e5c5SJan Glauber dev_err(host->dev, "%u requests still pending\n", count);
391cd76e5c5SJan Glauber
392cd76e5c5SJan Glauber data->bytes_xfered = data->blocks * data->blksz;
393cd76e5c5SJan Glauber data->error = 0;
394cd76e5c5SJan Glauber
395cd76e5c5SJan Glauber /* Clear and disable FIFO */
396cd76e5c5SJan Glauber writeq(BIT_ULL(16), host->dma_base + MIO_EMM_DMA_FIFO_CFG(host));
397cd76e5c5SJan Glauber dma_unmap_sg(host->dev, data->sg, data->sg_len, get_dma_dir(data));
398cd76e5c5SJan Glauber return 1;
399cd76e5c5SJan Glauber }
400cd76e5c5SJan Glauber
finish_dma(struct cvm_mmc_host * host,struct mmc_data * data)401ba3869ffSJan Glauber static int finish_dma(struct cvm_mmc_host *host, struct mmc_data *data)
402ba3869ffSJan Glauber {
403cd76e5c5SJan Glauber if (host->use_sg && data->sg_len > 1)
404cd76e5c5SJan Glauber return finish_dma_sg(host, data);
405cd76e5c5SJan Glauber else
406ba3869ffSJan Glauber return finish_dma_single(host, data);
407ba3869ffSJan Glauber }
408ba3869ffSJan Glauber
check_status(u64 rsp_sts)409ba3869ffSJan Glauber static int check_status(u64 rsp_sts)
410ba3869ffSJan Glauber {
411ba3869ffSJan Glauber if (rsp_sts & MIO_EMM_RSP_STS_RSP_BAD_STS ||
412ba3869ffSJan Glauber rsp_sts & MIO_EMM_RSP_STS_RSP_CRC_ERR ||
413ba3869ffSJan Glauber rsp_sts & MIO_EMM_RSP_STS_BLK_CRC_ERR)
414ba3869ffSJan Glauber return -EILSEQ;
415ba3869ffSJan Glauber if (rsp_sts & MIO_EMM_RSP_STS_RSP_TIMEOUT ||
416ba3869ffSJan Glauber rsp_sts & MIO_EMM_RSP_STS_BLK_TIMEOUT)
417ba3869ffSJan Glauber return -ETIMEDOUT;
418ba3869ffSJan Glauber if (rsp_sts & MIO_EMM_RSP_STS_DBUF_ERR)
419ba3869ffSJan Glauber return -EIO;
420ba3869ffSJan Glauber return 0;
421ba3869ffSJan Glauber }
422ba3869ffSJan Glauber
423ba3869ffSJan Glauber /* Try to clean up failed DMA. */
cleanup_dma(struct cvm_mmc_host * host,u64 rsp_sts)424ba3869ffSJan Glauber static void cleanup_dma(struct cvm_mmc_host *host, u64 rsp_sts)
425ba3869ffSJan Glauber {
426ba3869ffSJan Glauber u64 emm_dma;
427ba3869ffSJan Glauber
428ba3869ffSJan Glauber emm_dma = readq(host->base + MIO_EMM_DMA(host));
429ba3869ffSJan Glauber emm_dma |= FIELD_PREP(MIO_EMM_DMA_VAL, 1) |
430ba3869ffSJan Glauber FIELD_PREP(MIO_EMM_DMA_DAT_NULL, 1);
431ba3869ffSJan Glauber set_bus_id(&emm_dma, get_bus_id(rsp_sts));
432ba3869ffSJan Glauber writeq(emm_dma, host->base + MIO_EMM_DMA(host));
433ba3869ffSJan Glauber }
434ba3869ffSJan Glauber
cvm_mmc_interrupt(int irq,void * dev_id)435ba3869ffSJan Glauber irqreturn_t cvm_mmc_interrupt(int irq, void *dev_id)
436ba3869ffSJan Glauber {
437ba3869ffSJan Glauber struct cvm_mmc_host *host = dev_id;
438ba3869ffSJan Glauber struct mmc_request *req;
439ba3869ffSJan Glauber u64 emm_int, rsp_sts;
440ba3869ffSJan Glauber bool host_done;
441ba3869ffSJan Glauber
442ba3869ffSJan Glauber if (host->need_irq_handler_lock)
443f9261eb7SXiaofei Tan spin_lock(&host->irq_handler_lock);
444ba3869ffSJan Glauber else
445ba3869ffSJan Glauber __acquire(&host->irq_handler_lock);
446ba3869ffSJan Glauber
447ba3869ffSJan Glauber /* Clear interrupt bits (write 1 clears ). */
448ba3869ffSJan Glauber emm_int = readq(host->base + MIO_EMM_INT(host));
449ba3869ffSJan Glauber writeq(emm_int, host->base + MIO_EMM_INT(host));
450ba3869ffSJan Glauber
451ba3869ffSJan Glauber if (emm_int & MIO_EMM_INT_SWITCH_ERR)
452ba3869ffSJan Glauber check_switch_errors(host);
453ba3869ffSJan Glauber
454ba3869ffSJan Glauber req = host->current_req;
455ba3869ffSJan Glauber if (!req)
456ba3869ffSJan Glauber goto out;
457ba3869ffSJan Glauber
458ba3869ffSJan Glauber rsp_sts = readq(host->base + MIO_EMM_RSP_STS(host));
459ba3869ffSJan Glauber /*
460ba3869ffSJan Glauber * dma_val set means DMA is still in progress. Don't touch
461ba3869ffSJan Glauber * the request and wait for the interrupt indicating that
462ba3869ffSJan Glauber * the DMA is finished.
463ba3869ffSJan Glauber */
464ba3869ffSJan Glauber if ((rsp_sts & MIO_EMM_RSP_STS_DMA_VAL) && host->dma_active)
465ba3869ffSJan Glauber goto out;
466ba3869ffSJan Glauber
467ba3869ffSJan Glauber if (!host->dma_active && req->data &&
468ba3869ffSJan Glauber (emm_int & MIO_EMM_INT_BUF_DONE)) {
469ba3869ffSJan Glauber unsigned int type = (rsp_sts >> 7) & 3;
470ba3869ffSJan Glauber
471ba3869ffSJan Glauber if (type == 1)
472ba3869ffSJan Glauber do_read(host, req, rsp_sts & MIO_EMM_RSP_STS_DBUF);
473ba3869ffSJan Glauber else if (type == 2)
474ba3869ffSJan Glauber do_write(req);
475ba3869ffSJan Glauber }
476ba3869ffSJan Glauber
477ba3869ffSJan Glauber host_done = emm_int & MIO_EMM_INT_CMD_DONE ||
478ba3869ffSJan Glauber emm_int & MIO_EMM_INT_DMA_DONE ||
479ba3869ffSJan Glauber emm_int & MIO_EMM_INT_CMD_ERR ||
480ba3869ffSJan Glauber emm_int & MIO_EMM_INT_DMA_ERR;
481ba3869ffSJan Glauber
482ba3869ffSJan Glauber if (!(host_done && req->done))
483ba3869ffSJan Glauber goto no_req_done;
484ba3869ffSJan Glauber
485ba3869ffSJan Glauber req->cmd->error = check_status(rsp_sts);
486ba3869ffSJan Glauber
487ba3869ffSJan Glauber if (host->dma_active && req->data)
488ba3869ffSJan Glauber if (!finish_dma(host, req->data))
489ba3869ffSJan Glauber goto no_req_done;
490ba3869ffSJan Glauber
491ba3869ffSJan Glauber set_cmd_response(host, req, rsp_sts);
492ba3869ffSJan Glauber if ((emm_int & MIO_EMM_INT_DMA_ERR) &&
493ba3869ffSJan Glauber (rsp_sts & MIO_EMM_RSP_STS_DMA_PEND))
494ba3869ffSJan Glauber cleanup_dma(host, rsp_sts);
495ba3869ffSJan Glauber
496ba3869ffSJan Glauber host->current_req = NULL;
497ba3869ffSJan Glauber req->done(req);
498ba3869ffSJan Glauber
499ba3869ffSJan Glauber no_req_done:
500ba3869ffSJan Glauber if (host->dmar_fixup_done)
501ba3869ffSJan Glauber host->dmar_fixup_done(host);
502ba3869ffSJan Glauber if (host_done)
503ba3869ffSJan Glauber host->release_bus(host);
504ba3869ffSJan Glauber out:
505ba3869ffSJan Glauber if (host->need_irq_handler_lock)
506f9261eb7SXiaofei Tan spin_unlock(&host->irq_handler_lock);
507ba3869ffSJan Glauber else
508ba3869ffSJan Glauber __release(&host->irq_handler_lock);
509ba3869ffSJan Glauber return IRQ_RETVAL(emm_int != 0);
510ba3869ffSJan Glauber }
511ba3869ffSJan Glauber
512ba3869ffSJan Glauber /*
513ba3869ffSJan Glauber * Program DMA_CFG and if needed DMA_ADR.
514ba3869ffSJan Glauber * Returns 0 on error, DMA address otherwise.
515ba3869ffSJan Glauber */
prepare_dma_single(struct cvm_mmc_host * host,struct mmc_data * data)516ba3869ffSJan Glauber static u64 prepare_dma_single(struct cvm_mmc_host *host, struct mmc_data *data)
517ba3869ffSJan Glauber {
518ba3869ffSJan Glauber u64 dma_cfg, addr;
519ba3869ffSJan Glauber int count, rw;
520ba3869ffSJan Glauber
521ba3869ffSJan Glauber count = dma_map_sg(host->dev, data->sg, data->sg_len,
522ba3869ffSJan Glauber get_dma_dir(data));
523ba3869ffSJan Glauber if (!count)
524ba3869ffSJan Glauber return 0;
525ba3869ffSJan Glauber
526ba3869ffSJan Glauber rw = (data->flags & MMC_DATA_WRITE) ? 1 : 0;
527ba3869ffSJan Glauber dma_cfg = FIELD_PREP(MIO_EMM_DMA_CFG_EN, 1) |
528ba3869ffSJan Glauber FIELD_PREP(MIO_EMM_DMA_CFG_RW, rw);
529ba3869ffSJan Glauber #ifdef __LITTLE_ENDIAN
530ba3869ffSJan Glauber dma_cfg |= FIELD_PREP(MIO_EMM_DMA_CFG_ENDIAN, 1);
531ba3869ffSJan Glauber #endif
532ba3869ffSJan Glauber dma_cfg |= FIELD_PREP(MIO_EMM_DMA_CFG_SIZE,
533ba3869ffSJan Glauber (sg_dma_len(&data->sg[0]) / 8) - 1);
534ba3869ffSJan Glauber
535ba3869ffSJan Glauber addr = sg_dma_address(&data->sg[0]);
536ba3869ffSJan Glauber if (!host->big_dma_addr)
537ba3869ffSJan Glauber dma_cfg |= FIELD_PREP(MIO_EMM_DMA_CFG_ADR, addr);
538ba3869ffSJan Glauber writeq(dma_cfg, host->dma_base + MIO_EMM_DMA_CFG(host));
539ba3869ffSJan Glauber
540ba3869ffSJan Glauber pr_debug("[%s] sg_dma_len: %u total sg_elem: %d\n",
541ba3869ffSJan Glauber (rw) ? "W" : "R", sg_dma_len(&data->sg[0]), count);
542ba3869ffSJan Glauber
543ba3869ffSJan Glauber if (host->big_dma_addr)
544ba3869ffSJan Glauber writeq(addr, host->dma_base + MIO_EMM_DMA_ADR(host));
545ba3869ffSJan Glauber return addr;
546ba3869ffSJan Glauber }
547ba3869ffSJan Glauber
548cd76e5c5SJan Glauber /*
549cd76e5c5SJan Glauber * Queue complete sg list into the FIFO.
550cd76e5c5SJan Glauber * Returns 0 on error, 1 otherwise.
551cd76e5c5SJan Glauber */
prepare_dma_sg(struct cvm_mmc_host * host,struct mmc_data * data)552cd76e5c5SJan Glauber static u64 prepare_dma_sg(struct cvm_mmc_host *host, struct mmc_data *data)
553cd76e5c5SJan Glauber {
554cd76e5c5SJan Glauber struct scatterlist *sg;
555cd76e5c5SJan Glauber u64 fifo_cmd, addr;
556cd76e5c5SJan Glauber int count, i, rw;
557cd76e5c5SJan Glauber
558cd76e5c5SJan Glauber count = dma_map_sg(host->dev, data->sg, data->sg_len,
559cd76e5c5SJan Glauber get_dma_dir(data));
560cd76e5c5SJan Glauber if (!count)
561cd76e5c5SJan Glauber return 0;
562cd76e5c5SJan Glauber if (count > 16)
563cd76e5c5SJan Glauber goto error;
564cd76e5c5SJan Glauber
565cd76e5c5SJan Glauber /* Enable FIFO by removing CLR bit */
566cd76e5c5SJan Glauber writeq(0, host->dma_base + MIO_EMM_DMA_FIFO_CFG(host));
567cd76e5c5SJan Glauber
568cd76e5c5SJan Glauber for_each_sg(data->sg, sg, count, i) {
569cd76e5c5SJan Glauber /* Program DMA address */
570cd76e5c5SJan Glauber addr = sg_dma_address(sg);
571cd76e5c5SJan Glauber if (addr & 7)
572cd76e5c5SJan Glauber goto error;
573cd76e5c5SJan Glauber writeq(addr, host->dma_base + MIO_EMM_DMA_FIFO_ADR(host));
574cd76e5c5SJan Glauber
575cd76e5c5SJan Glauber /*
576cd76e5c5SJan Glauber * If we have scatter-gather support we also have an extra
577cd76e5c5SJan Glauber * register for the DMA addr, so no need to check
578cd76e5c5SJan Glauber * host->big_dma_addr here.
579cd76e5c5SJan Glauber */
580cd76e5c5SJan Glauber rw = (data->flags & MMC_DATA_WRITE) ? 1 : 0;
581cd76e5c5SJan Glauber fifo_cmd = FIELD_PREP(MIO_EMM_DMA_FIFO_CMD_RW, rw);
582cd76e5c5SJan Glauber
583cd76e5c5SJan Glauber /* enable interrupts on the last element */
584cd76e5c5SJan Glauber fifo_cmd |= FIELD_PREP(MIO_EMM_DMA_FIFO_CMD_INTDIS,
585cd76e5c5SJan Glauber (i + 1 == count) ? 0 : 1);
586cd76e5c5SJan Glauber
587cd76e5c5SJan Glauber #ifdef __LITTLE_ENDIAN
588cd76e5c5SJan Glauber fifo_cmd |= FIELD_PREP(MIO_EMM_DMA_FIFO_CMD_ENDIAN, 1);
589cd76e5c5SJan Glauber #endif
590cd76e5c5SJan Glauber fifo_cmd |= FIELD_PREP(MIO_EMM_DMA_FIFO_CMD_SIZE,
591cd76e5c5SJan Glauber sg_dma_len(sg) / 8 - 1);
592cd76e5c5SJan Glauber /*
593cd76e5c5SJan Glauber * The write copies the address and the command to the FIFO
594cd76e5c5SJan Glauber * and increments the FIFO's COUNT field.
595cd76e5c5SJan Glauber */
596cd76e5c5SJan Glauber writeq(fifo_cmd, host->dma_base + MIO_EMM_DMA_FIFO_CMD(host));
597cd76e5c5SJan Glauber pr_debug("[%s] sg_dma_len: %u sg_elem: %d/%d\n",
598cd76e5c5SJan Glauber (rw) ? "W" : "R", sg_dma_len(sg), i, count);
599cd76e5c5SJan Glauber }
600cd76e5c5SJan Glauber
601cd76e5c5SJan Glauber /*
602cd76e5c5SJan Glauber * In difference to prepare_dma_single we don't return the
603cd76e5c5SJan Glauber * address here, as it would not make sense for scatter-gather.
604cd76e5c5SJan Glauber * The dma fixup is only required on models that don't support
605cd76e5c5SJan Glauber * scatter-gather, so that is not a problem.
606cd76e5c5SJan Glauber */
607cd76e5c5SJan Glauber return 1;
608cd76e5c5SJan Glauber
609cd76e5c5SJan Glauber error:
610cd76e5c5SJan Glauber WARN_ON_ONCE(1);
611cd76e5c5SJan Glauber dma_unmap_sg(host->dev, data->sg, data->sg_len, get_dma_dir(data));
612cd76e5c5SJan Glauber /* Disable FIFO */
613cd76e5c5SJan Glauber writeq(BIT_ULL(16), host->dma_base + MIO_EMM_DMA_FIFO_CFG(host));
614cd76e5c5SJan Glauber return 0;
615cd76e5c5SJan Glauber }
616cd76e5c5SJan Glauber
prepare_dma(struct cvm_mmc_host * host,struct mmc_data * data)617ba3869ffSJan Glauber static u64 prepare_dma(struct cvm_mmc_host *host, struct mmc_data *data)
618ba3869ffSJan Glauber {
619cd76e5c5SJan Glauber if (host->use_sg && data->sg_len > 1)
620cd76e5c5SJan Glauber return prepare_dma_sg(host, data);
621cd76e5c5SJan Glauber else
622ba3869ffSJan Glauber return prepare_dma_single(host, data);
623ba3869ffSJan Glauber }
624ba3869ffSJan Glauber
prepare_ext_dma(struct mmc_host * mmc,struct mmc_request * mrq)625ba3869ffSJan Glauber static u64 prepare_ext_dma(struct mmc_host *mmc, struct mmc_request *mrq)
626ba3869ffSJan Glauber {
627ba3869ffSJan Glauber struct cvm_mmc_slot *slot = mmc_priv(mmc);
628ba3869ffSJan Glauber u64 emm_dma;
629ba3869ffSJan Glauber
630ba3869ffSJan Glauber emm_dma = FIELD_PREP(MIO_EMM_DMA_VAL, 1) |
631ba3869ffSJan Glauber FIELD_PREP(MIO_EMM_DMA_SECTOR,
632dceb9781SSteven J. Hill mmc_card_is_blockaddr(mmc->card) ? 1 : 0) |
633ba3869ffSJan Glauber FIELD_PREP(MIO_EMM_DMA_RW,
634ba3869ffSJan Glauber (mrq->data->flags & MMC_DATA_WRITE) ? 1 : 0) |
635ba3869ffSJan Glauber FIELD_PREP(MIO_EMM_DMA_BLOCK_CNT, mrq->data->blocks) |
636ba3869ffSJan Glauber FIELD_PREP(MIO_EMM_DMA_CARD_ADDR, mrq->cmd->arg);
637ba3869ffSJan Glauber set_bus_id(&emm_dma, slot->bus_id);
638ba3869ffSJan Glauber
639ba3869ffSJan Glauber if (mmc_card_mmc(mmc->card) || (mmc_card_sd(mmc->card) &&
640ba3869ffSJan Glauber (mmc->card->scr.cmds & SD_SCR_CMD23_SUPPORT)))
641ba3869ffSJan Glauber emm_dma |= FIELD_PREP(MIO_EMM_DMA_MULTI, 1);
642ba3869ffSJan Glauber
643ba3869ffSJan Glauber pr_debug("[%s] blocks: %u multi: %d\n",
644ba3869ffSJan Glauber (emm_dma & MIO_EMM_DMA_RW) ? "W" : "R",
645ba3869ffSJan Glauber mrq->data->blocks, (emm_dma & MIO_EMM_DMA_MULTI) ? 1 : 0);
646ba3869ffSJan Glauber return emm_dma;
647ba3869ffSJan Glauber }
648ba3869ffSJan Glauber
cvm_mmc_dma_request(struct mmc_host * mmc,struct mmc_request * mrq)649ba3869ffSJan Glauber static void cvm_mmc_dma_request(struct mmc_host *mmc,
650ba3869ffSJan Glauber struct mmc_request *mrq)
651ba3869ffSJan Glauber {
652ba3869ffSJan Glauber struct cvm_mmc_slot *slot = mmc_priv(mmc);
653ba3869ffSJan Glauber struct cvm_mmc_host *host = slot->host;
654ba3869ffSJan Glauber struct mmc_data *data;
655ba3869ffSJan Glauber u64 emm_dma, addr;
656ba3869ffSJan Glauber
657ba3869ffSJan Glauber if (!mrq->data || !mrq->data->sg || !mrq->data->sg_len ||
658ba3869ffSJan Glauber !mrq->stop || mrq->stop->opcode != MMC_STOP_TRANSMISSION) {
659*6b0e0fceSBean Huo dev_err(&mmc->card->dev, "Error: %s no data\n", __func__);
660ba3869ffSJan Glauber goto error;
661ba3869ffSJan Glauber }
662ba3869ffSJan Glauber
663ba3869ffSJan Glauber cvm_mmc_switch_to(slot);
664ba3869ffSJan Glauber
665ba3869ffSJan Glauber data = mrq->data;
666ba3869ffSJan Glauber pr_debug("DMA request blocks: %d block_size: %d total_size: %d\n",
667ba3869ffSJan Glauber data->blocks, data->blksz, data->blocks * data->blksz);
668ba3869ffSJan Glauber if (data->timeout_ns)
669ba3869ffSJan Glauber set_wdog(slot, data->timeout_ns);
670ba3869ffSJan Glauber
671ba3869ffSJan Glauber WARN_ON(host->current_req);
672ba3869ffSJan Glauber host->current_req = mrq;
673ba3869ffSJan Glauber
674ba3869ffSJan Glauber emm_dma = prepare_ext_dma(mmc, mrq);
675ba3869ffSJan Glauber addr = prepare_dma(host, data);
676ba3869ffSJan Glauber if (!addr) {
677ba3869ffSJan Glauber dev_err(host->dev, "prepare_dma failed\n");
678ba3869ffSJan Glauber goto error;
679ba3869ffSJan Glauber }
680ba3869ffSJan Glauber
681ba3869ffSJan Glauber host->dma_active = true;
682ba3869ffSJan Glauber host->int_enable(host, MIO_EMM_INT_CMD_ERR | MIO_EMM_INT_DMA_DONE |
683ba3869ffSJan Glauber MIO_EMM_INT_DMA_ERR);
684ba3869ffSJan Glauber
685ba3869ffSJan Glauber if (host->dmar_fixup)
686ba3869ffSJan Glauber host->dmar_fixup(host, mrq->cmd, data, addr);
687ba3869ffSJan Glauber
688ba3869ffSJan Glauber /*
689ba3869ffSJan Glauber * If we have a valid SD card in the slot, we set the response
690ba3869ffSJan Glauber * bit mask to check for CRC errors and timeouts only.
691ba3869ffSJan Glauber * Otherwise, use the default power reset value.
692ba3869ffSJan Glauber */
693c34d1579SJan Glauber if (mmc_card_sd(mmc->card))
694ba3869ffSJan Glauber writeq(0x00b00000ull, host->base + MIO_EMM_STS_MASK(host));
695ba3869ffSJan Glauber else
696ba3869ffSJan Glauber writeq(0xe4390080ull, host->base + MIO_EMM_STS_MASK(host));
697ba3869ffSJan Glauber writeq(emm_dma, host->base + MIO_EMM_DMA(host));
698ba3869ffSJan Glauber return;
699ba3869ffSJan Glauber
700ba3869ffSJan Glauber error:
701ba3869ffSJan Glauber mrq->cmd->error = -EINVAL;
702ba3869ffSJan Glauber if (mrq->done)
703ba3869ffSJan Glauber mrq->done(mrq);
704ba3869ffSJan Glauber host->release_bus(host);
705ba3869ffSJan Glauber }
706ba3869ffSJan Glauber
do_read_request(struct cvm_mmc_host * host,struct mmc_request * mrq)707ba3869ffSJan Glauber static void do_read_request(struct cvm_mmc_host *host, struct mmc_request *mrq)
708ba3869ffSJan Glauber {
709ba3869ffSJan Glauber sg_miter_start(&host->smi, mrq->data->sg, mrq->data->sg_len,
710ba3869ffSJan Glauber SG_MITER_ATOMIC | SG_MITER_TO_SG);
711ba3869ffSJan Glauber }
712ba3869ffSJan Glauber
do_write_request(struct cvm_mmc_host * host,struct mmc_request * mrq)713ba3869ffSJan Glauber static void do_write_request(struct cvm_mmc_host *host, struct mmc_request *mrq)
714ba3869ffSJan Glauber {
715ba3869ffSJan Glauber unsigned int data_len = mrq->data->blocks * mrq->data->blksz;
716ba3869ffSJan Glauber struct sg_mapping_iter *smi = &host->smi;
717ba3869ffSJan Glauber unsigned int bytes_xfered;
718ba3869ffSJan Glauber int shift = 56;
719ba3869ffSJan Glauber u64 dat = 0;
720ba3869ffSJan Glauber
721ba3869ffSJan Glauber /* Copy data to the xmit buffer before issuing the command. */
722ba3869ffSJan Glauber sg_miter_start(smi, mrq->data->sg, mrq->data->sg_len, SG_MITER_FROM_SG);
723ba3869ffSJan Glauber
724ba3869ffSJan Glauber /* Auto inc from offset zero, dbuf zero */
725ba3869ffSJan Glauber writeq(0x10000ull, host->base + MIO_EMM_BUF_IDX(host));
726ba3869ffSJan Glauber
727ba3869ffSJan Glauber for (bytes_xfered = 0; bytes_xfered < data_len;) {
728ba3869ffSJan Glauber if (smi->consumed >= smi->length) {
729ba3869ffSJan Glauber if (!sg_miter_next(smi))
730ba3869ffSJan Glauber break;
731ba3869ffSJan Glauber smi->consumed = 0;
732ba3869ffSJan Glauber }
733ba3869ffSJan Glauber
734ba3869ffSJan Glauber while (smi->consumed < smi->length && shift >= 0) {
7353fcc7834SDan Carpenter dat |= (u64)((u8 *)smi->addr)[smi->consumed] << shift;
736ba3869ffSJan Glauber bytes_xfered++;
737ba3869ffSJan Glauber smi->consumed++;
738ba3869ffSJan Glauber shift -= 8;
739ba3869ffSJan Glauber }
740ba3869ffSJan Glauber
741ba3869ffSJan Glauber if (shift < 0) {
742ba3869ffSJan Glauber writeq(dat, host->base + MIO_EMM_BUF_DAT(host));
743ba3869ffSJan Glauber shift = 56;
744ba3869ffSJan Glauber dat = 0;
745ba3869ffSJan Glauber }
746ba3869ffSJan Glauber }
747ba3869ffSJan Glauber sg_miter_stop(smi);
748ba3869ffSJan Glauber }
749ba3869ffSJan Glauber
cvm_mmc_request(struct mmc_host * mmc,struct mmc_request * mrq)750ba3869ffSJan Glauber static void cvm_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
751ba3869ffSJan Glauber {
752ba3869ffSJan Glauber struct cvm_mmc_slot *slot = mmc_priv(mmc);
753ba3869ffSJan Glauber struct cvm_mmc_host *host = slot->host;
754ba3869ffSJan Glauber struct mmc_command *cmd = mrq->cmd;
755ba3869ffSJan Glauber struct cvm_mmc_cr_mods mods;
756ba3869ffSJan Glauber u64 emm_cmd, rsp_sts;
757ba3869ffSJan Glauber int retries = 100;
758ba3869ffSJan Glauber
759ba3869ffSJan Glauber /*
760ba3869ffSJan Glauber * Note about locking:
761ba3869ffSJan Glauber * All MMC devices share the same bus and controller. Allow only a
762ba3869ffSJan Glauber * single user of the bootbus/MMC bus at a time. The lock is acquired
763ba3869ffSJan Glauber * on all entry points from the MMC layer.
764ba3869ffSJan Glauber *
765ba3869ffSJan Glauber * For requests the lock is only released after the completion
766ba3869ffSJan Glauber * interrupt!
767ba3869ffSJan Glauber */
768ba3869ffSJan Glauber host->acquire_bus(host);
769ba3869ffSJan Glauber
770ba3869ffSJan Glauber if (cmd->opcode == MMC_READ_MULTIPLE_BLOCK ||
771ba3869ffSJan Glauber cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK)
772ba3869ffSJan Glauber return cvm_mmc_dma_request(mmc, mrq);
773ba3869ffSJan Glauber
774ba3869ffSJan Glauber cvm_mmc_switch_to(slot);
775ba3869ffSJan Glauber
776ba3869ffSJan Glauber mods = cvm_mmc_get_cr_mods(cmd);
777ba3869ffSJan Glauber
778ba3869ffSJan Glauber WARN_ON(host->current_req);
779ba3869ffSJan Glauber host->current_req = mrq;
780ba3869ffSJan Glauber
781ba3869ffSJan Glauber if (cmd->data) {
782ba3869ffSJan Glauber if (cmd->data->flags & MMC_DATA_READ)
783ba3869ffSJan Glauber do_read_request(host, mrq);
784ba3869ffSJan Glauber else
785ba3869ffSJan Glauber do_write_request(host, mrq);
786ba3869ffSJan Glauber
787ba3869ffSJan Glauber if (cmd->data->timeout_ns)
788ba3869ffSJan Glauber set_wdog(slot, cmd->data->timeout_ns);
789ba3869ffSJan Glauber } else
790ba3869ffSJan Glauber set_wdog(slot, 0);
791ba3869ffSJan Glauber
792ba3869ffSJan Glauber host->dma_active = false;
793ba3869ffSJan Glauber host->int_enable(host, MIO_EMM_INT_CMD_DONE | MIO_EMM_INT_CMD_ERR);
794ba3869ffSJan Glauber
795ba3869ffSJan Glauber emm_cmd = FIELD_PREP(MIO_EMM_CMD_VAL, 1) |
796ba3869ffSJan Glauber FIELD_PREP(MIO_EMM_CMD_CTYPE_XOR, mods.ctype_xor) |
797ba3869ffSJan Glauber FIELD_PREP(MIO_EMM_CMD_RTYPE_XOR, mods.rtype_xor) |
798ba3869ffSJan Glauber FIELD_PREP(MIO_EMM_CMD_IDX, cmd->opcode) |
799ba3869ffSJan Glauber FIELD_PREP(MIO_EMM_CMD_ARG, cmd->arg);
800ba3869ffSJan Glauber set_bus_id(&emm_cmd, slot->bus_id);
801fe79018aSJan Glauber if (cmd->data && mmc_cmd_type(cmd) == MMC_CMD_ADTC)
802ba3869ffSJan Glauber emm_cmd |= FIELD_PREP(MIO_EMM_CMD_OFFSET,
803ba3869ffSJan Glauber 64 - ((cmd->data->blocks * cmd->data->blksz) / 8));
804ba3869ffSJan Glauber
805ba3869ffSJan Glauber writeq(0, host->base + MIO_EMM_STS_MASK(host));
806ba3869ffSJan Glauber
807ba3869ffSJan Glauber retry:
808ba3869ffSJan Glauber rsp_sts = readq(host->base + MIO_EMM_RSP_STS(host));
809ba3869ffSJan Glauber if (rsp_sts & MIO_EMM_RSP_STS_DMA_VAL ||
810ba3869ffSJan Glauber rsp_sts & MIO_EMM_RSP_STS_CMD_VAL ||
811ba3869ffSJan Glauber rsp_sts & MIO_EMM_RSP_STS_SWITCH_VAL ||
812ba3869ffSJan Glauber rsp_sts & MIO_EMM_RSP_STS_DMA_PEND) {
813ba3869ffSJan Glauber udelay(10);
814ba3869ffSJan Glauber if (--retries)
815ba3869ffSJan Glauber goto retry;
816ba3869ffSJan Glauber }
817ba3869ffSJan Glauber if (!retries)
818ba3869ffSJan Glauber dev_err(host->dev, "Bad status: %llx before command write\n", rsp_sts);
819ba3869ffSJan Glauber writeq(emm_cmd, host->base + MIO_EMM_CMD(host));
820ba3869ffSJan Glauber }
821ba3869ffSJan Glauber
cvm_mmc_set_ios(struct mmc_host * mmc,struct mmc_ios * ios)822ba3869ffSJan Glauber static void cvm_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
823ba3869ffSJan Glauber {
824ba3869ffSJan Glauber struct cvm_mmc_slot *slot = mmc_priv(mmc);
825ba3869ffSJan Glauber struct cvm_mmc_host *host = slot->host;
826ba3869ffSJan Glauber int clk_period = 0, power_class = 10, bus_width = 0;
827ba3869ffSJan Glauber u64 clock, emm_switch;
828ba3869ffSJan Glauber
829ba3869ffSJan Glauber host->acquire_bus(host);
830ba3869ffSJan Glauber cvm_mmc_switch_to(slot);
831ba3869ffSJan Glauber
832ba3869ffSJan Glauber /* Set the power state */
833ba3869ffSJan Glauber switch (ios->power_mode) {
834ba3869ffSJan Glauber case MMC_POWER_ON:
835ba3869ffSJan Glauber break;
836ba3869ffSJan Glauber
837ba3869ffSJan Glauber case MMC_POWER_OFF:
838ba3869ffSJan Glauber cvm_mmc_reset_bus(slot);
839ba3869ffSJan Glauber if (host->global_pwr_gpiod)
840ba3869ffSJan Glauber host->set_shared_power(host, 0);
8419e7b9a25SJan Glauber else if (!IS_ERR(mmc->supply.vmmc))
842ba3869ffSJan Glauber mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
843ba3869ffSJan Glauber break;
844ba3869ffSJan Glauber
845ba3869ffSJan Glauber case MMC_POWER_UP:
846ba3869ffSJan Glauber if (host->global_pwr_gpiod)
847ba3869ffSJan Glauber host->set_shared_power(host, 1);
8489e7b9a25SJan Glauber else if (!IS_ERR(mmc->supply.vmmc))
849ba3869ffSJan Glauber mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd);
850ba3869ffSJan Glauber break;
851ba3869ffSJan Glauber }
852ba3869ffSJan Glauber
853ba3869ffSJan Glauber /* Convert bus width to HW definition */
854ba3869ffSJan Glauber switch (ios->bus_width) {
855ba3869ffSJan Glauber case MMC_BUS_WIDTH_8:
856ba3869ffSJan Glauber bus_width = 2;
857ba3869ffSJan Glauber break;
858ba3869ffSJan Glauber case MMC_BUS_WIDTH_4:
859ba3869ffSJan Glauber bus_width = 1;
860ba3869ffSJan Glauber break;
861ba3869ffSJan Glauber case MMC_BUS_WIDTH_1:
862ba3869ffSJan Glauber bus_width = 0;
863ba3869ffSJan Glauber break;
864ba3869ffSJan Glauber }
865ba3869ffSJan Glauber
8664ce94407SJan Glauber /* DDR is available for 4/8 bit bus width */
8674ce94407SJan Glauber if (ios->bus_width && ios->timing == MMC_TIMING_MMC_DDR52)
8684ce94407SJan Glauber bus_width |= 4;
8694ce94407SJan Glauber
870ba3869ffSJan Glauber /* Change the clock frequency. */
871ba3869ffSJan Glauber clock = ios->clock;
872ba3869ffSJan Glauber if (clock > 52000000)
873ba3869ffSJan Glauber clock = 52000000;
874ba3869ffSJan Glauber slot->clock = clock;
875ba3869ffSJan Glauber
876ba3869ffSJan Glauber if (clock)
877ba3869ffSJan Glauber clk_period = (host->sys_freq + clock - 1) / (2 * clock);
878ba3869ffSJan Glauber
879ba3869ffSJan Glauber emm_switch = FIELD_PREP(MIO_EMM_SWITCH_HS_TIMING,
880ba3869ffSJan Glauber (ios->timing == MMC_TIMING_MMC_HS)) |
881ba3869ffSJan Glauber FIELD_PREP(MIO_EMM_SWITCH_BUS_WIDTH, bus_width) |
882ba3869ffSJan Glauber FIELD_PREP(MIO_EMM_SWITCH_POWER_CLASS, power_class) |
883ba3869ffSJan Glauber FIELD_PREP(MIO_EMM_SWITCH_CLK_HI, clk_period) |
884ba3869ffSJan Glauber FIELD_PREP(MIO_EMM_SWITCH_CLK_LO, clk_period);
885ba3869ffSJan Glauber set_bus_id(&emm_switch, slot->bus_id);
886ba3869ffSJan Glauber
887ba3869ffSJan Glauber if (!switch_val_changed(slot, emm_switch))
888ba3869ffSJan Glauber goto out;
889ba3869ffSJan Glauber
890ba3869ffSJan Glauber set_wdog(slot, 0);
891ba3869ffSJan Glauber do_switch(host, emm_switch);
892ba3869ffSJan Glauber slot->cached_switch = emm_switch;
893ba3869ffSJan Glauber out:
894ba3869ffSJan Glauber host->release_bus(host);
895ba3869ffSJan Glauber }
896ba3869ffSJan Glauber
897ba3869ffSJan Glauber static const struct mmc_host_ops cvm_mmc_ops = {
898ba3869ffSJan Glauber .request = cvm_mmc_request,
899ba3869ffSJan Glauber .set_ios = cvm_mmc_set_ios,
900ba3869ffSJan Glauber .get_ro = mmc_gpio_get_ro,
901ba3869ffSJan Glauber .get_cd = mmc_gpio_get_cd,
902ba3869ffSJan Glauber };
903ba3869ffSJan Glauber
cvm_mmc_set_clock(struct cvm_mmc_slot * slot,unsigned int clock)904ba3869ffSJan Glauber static void cvm_mmc_set_clock(struct cvm_mmc_slot *slot, unsigned int clock)
905ba3869ffSJan Glauber {
906ba3869ffSJan Glauber struct mmc_host *mmc = slot->mmc;
907ba3869ffSJan Glauber
908ba3869ffSJan Glauber clock = min(clock, mmc->f_max);
909ba3869ffSJan Glauber clock = max(clock, mmc->f_min);
910ba3869ffSJan Glauber slot->clock = clock;
911ba3869ffSJan Glauber }
912ba3869ffSJan Glauber
cvm_mmc_init_lowlevel(struct cvm_mmc_slot * slot)913ba3869ffSJan Glauber static int cvm_mmc_init_lowlevel(struct cvm_mmc_slot *slot)
914ba3869ffSJan Glauber {
915ba3869ffSJan Glauber struct cvm_mmc_host *host = slot->host;
916ba3869ffSJan Glauber u64 emm_switch;
917ba3869ffSJan Glauber
918ba3869ffSJan Glauber /* Enable this bus slot. */
919ba3869ffSJan Glauber host->emm_cfg |= (1ull << slot->bus_id);
920ba3869ffSJan Glauber writeq(host->emm_cfg, slot->host->base + MIO_EMM_CFG(host));
921ba3869ffSJan Glauber udelay(10);
922ba3869ffSJan Glauber
923ba3869ffSJan Glauber /* Program initial clock speed and power. */
924ba3869ffSJan Glauber cvm_mmc_set_clock(slot, slot->mmc->f_min);
925ba3869ffSJan Glauber emm_switch = FIELD_PREP(MIO_EMM_SWITCH_POWER_CLASS, 10);
926ba3869ffSJan Glauber emm_switch |= FIELD_PREP(MIO_EMM_SWITCH_CLK_HI,
927ba3869ffSJan Glauber (host->sys_freq / slot->clock) / 2);
928ba3869ffSJan Glauber emm_switch |= FIELD_PREP(MIO_EMM_SWITCH_CLK_LO,
929ba3869ffSJan Glauber (host->sys_freq / slot->clock) / 2);
930ba3869ffSJan Glauber
931ba3869ffSJan Glauber /* Make the changes take effect on this bus slot. */
932ba3869ffSJan Glauber set_bus_id(&emm_switch, slot->bus_id);
933ba3869ffSJan Glauber do_switch(host, emm_switch);
934ba3869ffSJan Glauber
935ba3869ffSJan Glauber slot->cached_switch = emm_switch;
936ba3869ffSJan Glauber
937ba3869ffSJan Glauber /*
938ba3869ffSJan Glauber * Set watchdog timeout value and default reset value
939ba3869ffSJan Glauber * for the mask register. Finally, set the CARD_RCA
940ba3869ffSJan Glauber * bit so that we can get the card address relative
941ba3869ffSJan Glauber * to the CMD register for CMD7 transactions.
942ba3869ffSJan Glauber */
943ba3869ffSJan Glauber set_wdog(slot, 0);
944ba3869ffSJan Glauber writeq(0xe4390080ull, host->base + MIO_EMM_STS_MASK(host));
945ba3869ffSJan Glauber writeq(1, host->base + MIO_EMM_RCA(host));
946ba3869ffSJan Glauber return 0;
947ba3869ffSJan Glauber }
948ba3869ffSJan Glauber
cvm_mmc_of_parse(struct device * dev,struct cvm_mmc_slot * slot)949ba3869ffSJan Glauber static int cvm_mmc_of_parse(struct device *dev, struct cvm_mmc_slot *slot)
950ba3869ffSJan Glauber {
951ba3869ffSJan Glauber u32 id, cmd_skew = 0, dat_skew = 0, bus_width = 0;
952ba3869ffSJan Glauber struct device_node *node = dev->of_node;
953ba3869ffSJan Glauber struct mmc_host *mmc = slot->mmc;
954ba3869ffSJan Glauber u64 clock_period;
955ba3869ffSJan Glauber int ret;
956ba3869ffSJan Glauber
957ba3869ffSJan Glauber ret = of_property_read_u32(node, "reg", &id);
958ba3869ffSJan Glauber if (ret) {
959bf892de9SRob Herring dev_err(dev, "Missing or invalid reg property on %pOF\n", node);
960ba3869ffSJan Glauber return ret;
961ba3869ffSJan Glauber }
962ba3869ffSJan Glauber
963ba3869ffSJan Glauber if (id >= CAVIUM_MAX_MMC || slot->host->slot[id]) {
964bf892de9SRob Herring dev_err(dev, "Invalid reg property on %pOF\n", node);
965ba3869ffSJan Glauber return -EINVAL;
966ba3869ffSJan Glauber }
967ba3869ffSJan Glauber
9689e7b9a25SJan Glauber ret = mmc_regulator_get_supply(mmc);
96910b0b012SWolfram Sang if (ret)
9709e7b9a25SJan Glauber return ret;
971ba3869ffSJan Glauber /*
972ba3869ffSJan Glauber * Legacy Octeon firmware has no regulator entry, fall-back to
973ba3869ffSJan Glauber * a hard-coded voltage to get a sane OCR.
974ba3869ffSJan Glauber */
9759e7b9a25SJan Glauber if (IS_ERR(mmc->supply.vmmc))
976ba3869ffSJan Glauber mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
977ba3869ffSJan Glauber
978ba3869ffSJan Glauber /* Common MMC bindings */
979ba3869ffSJan Glauber ret = mmc_of_parse(mmc);
980ba3869ffSJan Glauber if (ret)
981ba3869ffSJan Glauber return ret;
982ba3869ffSJan Glauber
983ba3869ffSJan Glauber /* Set bus width */
984ba3869ffSJan Glauber if (!(mmc->caps & (MMC_CAP_8_BIT_DATA | MMC_CAP_4_BIT_DATA))) {
985ba3869ffSJan Glauber of_property_read_u32(node, "cavium,bus-max-width", &bus_width);
986ba3869ffSJan Glauber if (bus_width == 8)
987ba3869ffSJan Glauber mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_4_BIT_DATA;
988ba3869ffSJan Glauber else if (bus_width == 4)
989ba3869ffSJan Glauber mmc->caps |= MMC_CAP_4_BIT_DATA;
990ba3869ffSJan Glauber }
991ba3869ffSJan Glauber
992ba3869ffSJan Glauber /* Set maximum and minimum frequency */
993ba3869ffSJan Glauber if (!mmc->f_max)
994ba3869ffSJan Glauber of_property_read_u32(node, "spi-max-frequency", &mmc->f_max);
995ba3869ffSJan Glauber if (!mmc->f_max || mmc->f_max > 52000000)
996ba3869ffSJan Glauber mmc->f_max = 52000000;
997ba3869ffSJan Glauber mmc->f_min = 400000;
998ba3869ffSJan Glauber
999ba3869ffSJan Glauber /* Sampling register settings, period in picoseconds */
1000ba3869ffSJan Glauber clock_period = 1000000000000ull / slot->host->sys_freq;
1001ba3869ffSJan Glauber of_property_read_u32(node, "cavium,cmd-clk-skew", &cmd_skew);
1002ba3869ffSJan Glauber of_property_read_u32(node, "cavium,dat-clk-skew", &dat_skew);
1003ba3869ffSJan Glauber slot->cmd_cnt = (cmd_skew + clock_period / 2) / clock_period;
1004ba3869ffSJan Glauber slot->dat_cnt = (dat_skew + clock_period / 2) / clock_period;
1005ba3869ffSJan Glauber
1006ba3869ffSJan Glauber return id;
1007ba3869ffSJan Glauber }
1008ba3869ffSJan Glauber
cvm_mmc_of_slot_probe(struct device * dev,struct cvm_mmc_host * host)1009ba3869ffSJan Glauber int cvm_mmc_of_slot_probe(struct device *dev, struct cvm_mmc_host *host)
1010ba3869ffSJan Glauber {
1011ba3869ffSJan Glauber struct cvm_mmc_slot *slot;
1012ba3869ffSJan Glauber struct mmc_host *mmc;
1013ba3869ffSJan Glauber int ret, id;
1014ba3869ffSJan Glauber
1015ba3869ffSJan Glauber mmc = mmc_alloc_host(sizeof(struct cvm_mmc_slot), dev);
1016ba3869ffSJan Glauber if (!mmc)
1017ba3869ffSJan Glauber return -ENOMEM;
1018ba3869ffSJan Glauber
1019ba3869ffSJan Glauber slot = mmc_priv(mmc);
1020ba3869ffSJan Glauber slot->mmc = mmc;
1021ba3869ffSJan Glauber slot->host = host;
1022ba3869ffSJan Glauber
1023ba3869ffSJan Glauber ret = cvm_mmc_of_parse(dev, slot);
1024ba3869ffSJan Glauber if (ret < 0)
1025ba3869ffSJan Glauber goto error;
1026ba3869ffSJan Glauber id = ret;
1027ba3869ffSJan Glauber
1028ba3869ffSJan Glauber /* Set up host parameters */
1029ba3869ffSJan Glauber mmc->ops = &cvm_mmc_ops;
1030ba3869ffSJan Glauber
10314ce94407SJan Glauber /*
10324ce94407SJan Glauber * We only have a 3.3v supply, we cannot support any
10334ce94407SJan Glauber * of the UHS modes. We do support the high speed DDR
10344ce94407SJan Glauber * modes up to 52MHz.
1035c3dccb74SLinus Walleij *
1036c3dccb74SLinus Walleij * Disable bounce buffers for max_segs = 1
10374ce94407SJan Glauber */
1038ba3869ffSJan Glauber mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED |
10391be64c79SUlf Hansson MMC_CAP_CMD23 | MMC_CAP_POWER_OFF_CARD | MMC_CAP_3_3V_DDR;
1040ba3869ffSJan Glauber
1041cd76e5c5SJan Glauber if (host->use_sg)
1042cd76e5c5SJan Glauber mmc->max_segs = 16;
1043cd76e5c5SJan Glauber else
1044ba3869ffSJan Glauber mmc->max_segs = 1;
1045ba3869ffSJan Glauber
1046ba3869ffSJan Glauber /* DMA size field can address up to 8 MB */
1047fa25eba6SKevin Hao mmc->max_seg_size = min_t(unsigned int, 8 * 1024 * 1024,
1048fa25eba6SKevin Hao dma_get_max_seg_size(host->dev));
1049ba3869ffSJan Glauber mmc->max_req_size = mmc->max_seg_size;
1050ba3869ffSJan Glauber /* External DMA is in 512 byte blocks */
1051ba3869ffSJan Glauber mmc->max_blk_size = 512;
1052ba3869ffSJan Glauber /* DMA block count field is 15 bits */
1053ba3869ffSJan Glauber mmc->max_blk_count = 32767;
1054ba3869ffSJan Glauber
1055ba3869ffSJan Glauber slot->clock = mmc->f_min;
1056ba3869ffSJan Glauber slot->bus_id = id;
1057ba3869ffSJan Glauber slot->cached_rca = 1;
1058ba3869ffSJan Glauber
1059ba3869ffSJan Glauber host->acquire_bus(host);
1060ba3869ffSJan Glauber host->slot[id] = slot;
1061ba3869ffSJan Glauber cvm_mmc_switch_to(slot);
1062ba3869ffSJan Glauber cvm_mmc_init_lowlevel(slot);
1063ba3869ffSJan Glauber host->release_bus(host);
1064ba3869ffSJan Glauber
1065ba3869ffSJan Glauber ret = mmc_add_host(mmc);
1066ba3869ffSJan Glauber if (ret) {
1067ba3869ffSJan Glauber dev_err(dev, "mmc_add_host() returned %d\n", ret);
1068ba3869ffSJan Glauber slot->host->slot[id] = NULL;
1069ba3869ffSJan Glauber goto error;
1070ba3869ffSJan Glauber }
1071ba3869ffSJan Glauber return 0;
1072ba3869ffSJan Glauber
1073ba3869ffSJan Glauber error:
1074ba3869ffSJan Glauber mmc_free_host(slot->mmc);
1075ba3869ffSJan Glauber return ret;
1076ba3869ffSJan Glauber }
1077ba3869ffSJan Glauber
cvm_mmc_of_slot_remove(struct cvm_mmc_slot * slot)1078ba3869ffSJan Glauber int cvm_mmc_of_slot_remove(struct cvm_mmc_slot *slot)
1079ba3869ffSJan Glauber {
1080ba3869ffSJan Glauber mmc_remove_host(slot->mmc);
1081ba3869ffSJan Glauber slot->host->slot[slot->bus_id] = NULL;
1082ba3869ffSJan Glauber mmc_free_host(slot->mmc);
1083ba3869ffSJan Glauber return 0;
1084ba3869ffSJan Glauber }
1085