1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
4 *
5 */
6
7 #ifndef _MHI_INT_H
8 #define _MHI_INT_H
9
10 #include "../common.h"
11
12 extern struct bus_type mhi_bus_type;
13
14 /* Host request register */
15 #define MHI_SOC_RESET_REQ_OFFSET 0xb0
16 #define MHI_SOC_RESET_REQ BIT(0)
17
18 #define SOC_HW_VERSION_OFFS 0x224
19 #define SOC_HW_VERSION_FAM_NUM_BMSK GENMASK(31, 28)
20 #define SOC_HW_VERSION_DEV_NUM_BMSK GENMASK(27, 16)
21 #define SOC_HW_VERSION_MAJOR_VER_BMSK GENMASK(15, 8)
22 #define SOC_HW_VERSION_MINOR_VER_BMSK GENMASK(7, 0)
23
24 struct mhi_ctxt {
25 struct mhi_event_ctxt *er_ctxt;
26 struct mhi_chan_ctxt *chan_ctxt;
27 struct mhi_cmd_ctxt *cmd_ctxt;
28 dma_addr_t er_ctxt_addr;
29 dma_addr_t chan_ctxt_addr;
30 dma_addr_t cmd_ctxt_addr;
31 };
32
33 struct bhi_vec_entry {
34 u64 dma_addr;
35 u64 size;
36 };
37
38 enum mhi_ch_state_type {
39 MHI_CH_STATE_TYPE_RESET,
40 MHI_CH_STATE_TYPE_STOP,
41 MHI_CH_STATE_TYPE_START,
42 MHI_CH_STATE_TYPE_MAX,
43 };
44
45 extern const char * const mhi_ch_state_type_str[MHI_CH_STATE_TYPE_MAX];
46 #define TO_CH_STATE_TYPE_STR(state) (((state) >= MHI_CH_STATE_TYPE_MAX) ? \
47 "INVALID_STATE" : \
48 mhi_ch_state_type_str[(state)])
49
50 #define MHI_INVALID_BRSTMODE(mode) (mode != MHI_DB_BRST_DISABLE && \
51 mode != MHI_DB_BRST_ENABLE)
52
53 extern const char * const mhi_ee_str[MHI_EE_MAX];
54 #define TO_MHI_EXEC_STR(ee) (((ee) >= MHI_EE_MAX) ? \
55 "INVALID_EE" : mhi_ee_str[ee])
56
57 #define MHI_IN_PBL(ee) (ee == MHI_EE_PBL || ee == MHI_EE_PTHRU || \
58 ee == MHI_EE_EDL)
59 #define MHI_POWER_UP_CAPABLE(ee) (MHI_IN_PBL(ee) || ee == MHI_EE_AMSS)
60 #define MHI_FW_LOAD_CAPABLE(ee) (ee == MHI_EE_PBL || ee == MHI_EE_EDL)
61 #define MHI_IN_MISSION_MODE(ee) (ee == MHI_EE_AMSS || ee == MHI_EE_WFW || \
62 ee == MHI_EE_FP)
63
64 enum dev_st_transition {
65 DEV_ST_TRANSITION_PBL,
66 DEV_ST_TRANSITION_READY,
67 DEV_ST_TRANSITION_SBL,
68 DEV_ST_TRANSITION_MISSION_MODE,
69 DEV_ST_TRANSITION_FP,
70 DEV_ST_TRANSITION_SYS_ERR,
71 DEV_ST_TRANSITION_DISABLE,
72 DEV_ST_TRANSITION_MAX,
73 };
74
75 extern const char * const dev_state_tran_str[DEV_ST_TRANSITION_MAX];
76 #define TO_DEV_STATE_TRANS_STR(state) (((state) >= DEV_ST_TRANSITION_MAX) ? \
77 "INVALID_STATE" : dev_state_tran_str[state])
78
79 /* internal power states */
80 enum mhi_pm_state {
81 MHI_PM_STATE_DISABLE,
82 MHI_PM_STATE_POR,
83 MHI_PM_STATE_M0,
84 MHI_PM_STATE_M2,
85 MHI_PM_STATE_M3_ENTER,
86 MHI_PM_STATE_M3,
87 MHI_PM_STATE_M3_EXIT,
88 MHI_PM_STATE_FW_DL_ERR,
89 MHI_PM_STATE_SYS_ERR_DETECT,
90 MHI_PM_STATE_SYS_ERR_PROCESS,
91 MHI_PM_STATE_SYS_ERR_FAIL,
92 MHI_PM_STATE_SHUTDOWN_PROCESS,
93 MHI_PM_STATE_LD_ERR_FATAL_DETECT,
94 MHI_PM_STATE_MAX
95 };
96
97 #define MHI_PM_DISABLE BIT(0)
98 #define MHI_PM_POR BIT(1)
99 #define MHI_PM_M0 BIT(2)
100 #define MHI_PM_M2 BIT(3)
101 #define MHI_PM_M3_ENTER BIT(4)
102 #define MHI_PM_M3 BIT(5)
103 #define MHI_PM_M3_EXIT BIT(6)
104 /* firmware download failure state */
105 #define MHI_PM_FW_DL_ERR BIT(7)
106 #define MHI_PM_SYS_ERR_DETECT BIT(8)
107 #define MHI_PM_SYS_ERR_PROCESS BIT(9)
108 #define MHI_PM_SYS_ERR_FAIL BIT(10)
109 #define MHI_PM_SHUTDOWN_PROCESS BIT(11)
110 /* link not accessible */
111 #define MHI_PM_LD_ERR_FATAL_DETECT BIT(12)
112
113 #define MHI_REG_ACCESS_VALID(pm_state) ((pm_state & (MHI_PM_POR | MHI_PM_M0 | \
114 MHI_PM_M2 | MHI_PM_M3_ENTER | MHI_PM_M3_EXIT | \
115 MHI_PM_SYS_ERR_DETECT | MHI_PM_SYS_ERR_PROCESS | \
116 MHI_PM_SYS_ERR_FAIL | MHI_PM_SHUTDOWN_PROCESS | \
117 MHI_PM_FW_DL_ERR)))
118 #define MHI_PM_IN_ERROR_STATE(pm_state) (pm_state >= MHI_PM_FW_DL_ERR)
119 #define MHI_PM_IN_FATAL_STATE(pm_state) (pm_state == MHI_PM_LD_ERR_FATAL_DETECT)
120 #define MHI_DB_ACCESS_VALID(mhi_cntrl) (mhi_cntrl->pm_state & mhi_cntrl->db_access)
121 #define MHI_WAKE_DB_CLEAR_VALID(pm_state) (pm_state & (MHI_PM_M0 | \
122 MHI_PM_M2 | MHI_PM_M3_EXIT))
123 #define MHI_WAKE_DB_SET_VALID(pm_state) (pm_state & MHI_PM_M2)
124 #define MHI_WAKE_DB_FORCE_SET_VALID(pm_state) MHI_WAKE_DB_CLEAR_VALID(pm_state)
125 #define MHI_EVENT_ACCESS_INVALID(pm_state) (pm_state == MHI_PM_DISABLE || \
126 MHI_PM_IN_ERROR_STATE(pm_state))
127 #define MHI_PM_IN_SUSPEND_STATE(pm_state) (pm_state & \
128 (MHI_PM_M3_ENTER | MHI_PM_M3))
129
130 #define NR_OF_CMD_RINGS 1
131 #define CMD_EL_PER_RING 128
132 #define PRIMARY_CMD_RING 0
133 #define MHI_DEV_WAKE_DB 127
134 #define MHI_MAX_MTU 0xffff
135 #define MHI_RANDOM_U32_NONZERO(bmsk) (get_random_u32_inclusive(1, bmsk))
136
137 enum mhi_er_type {
138 MHI_ER_TYPE_INVALID = 0x0,
139 MHI_ER_TYPE_VALID = 0x1,
140 };
141
142 struct db_cfg {
143 bool reset_req;
144 bool db_mode;
145 u32 pollcfg;
146 enum mhi_db_brst_mode brstmode;
147 dma_addr_t db_val;
148 void (*process_db)(struct mhi_controller *mhi_cntrl,
149 struct db_cfg *db_cfg, void __iomem *io_addr,
150 dma_addr_t db_val);
151 };
152
153 struct mhi_pm_transitions {
154 enum mhi_pm_state from_state;
155 u32 to_states;
156 };
157
158 struct state_transition {
159 struct list_head node;
160 enum dev_st_transition state;
161 };
162
163 struct mhi_ring {
164 dma_addr_t dma_handle;
165 dma_addr_t iommu_base;
166 __le64 *ctxt_wp; /* point to ctxt wp */
167 void *pre_aligned;
168 void *base;
169 void *rp;
170 void *wp;
171 size_t el_size;
172 size_t len;
173 size_t elements;
174 size_t alloc_size;
175 void __iomem *db_addr;
176 };
177
178 struct mhi_cmd {
179 struct mhi_ring ring;
180 spinlock_t lock;
181 };
182
183 struct mhi_buf_info {
184 void *v_addr;
185 void *bb_addr;
186 void *wp;
187 void *cb_buf;
188 dma_addr_t p_addr;
189 size_t len;
190 enum dma_data_direction dir;
191 bool used; /* Indicates whether the buffer is used or not */
192 bool pre_mapped; /* Already pre-mapped by client */
193 };
194
195 struct mhi_event {
196 struct mhi_controller *mhi_cntrl;
197 struct mhi_chan *mhi_chan; /* dedicated to channel */
198 u32 er_index;
199 u32 intmod;
200 u32 irq;
201 int chan; /* this event ring is dedicated to a channel (optional) */
202 u32 priority;
203 enum mhi_er_data_type data_type;
204 struct mhi_ring ring;
205 struct db_cfg db_cfg;
206 struct tasklet_struct task;
207 spinlock_t lock;
208 int (*process_event)(struct mhi_controller *mhi_cntrl,
209 struct mhi_event *mhi_event,
210 u32 event_quota);
211 bool hw_ring;
212 bool cl_manage;
213 bool offload_ev; /* managed by a device driver */
214 };
215
216 struct mhi_chan {
217 const char *name;
218 /*
219 * Important: When consuming, increment tre_ring first and when
220 * releasing, decrement buf_ring first. If tre_ring has space, buf_ring
221 * is guranteed to have space so we do not need to check both rings.
222 */
223 struct mhi_ring buf_ring;
224 struct mhi_ring tre_ring;
225 u32 chan;
226 u32 er_index;
227 u32 intmod;
228 enum mhi_ch_type type;
229 enum dma_data_direction dir;
230 struct db_cfg db_cfg;
231 enum mhi_ch_ee_mask ee_mask;
232 enum mhi_ch_state ch_state;
233 enum mhi_ev_ccs ccs;
234 struct mhi_device *mhi_dev;
235 void (*xfer_cb)(struct mhi_device *mhi_dev, struct mhi_result *result);
236 struct mutex mutex;
237 struct completion completion;
238 rwlock_t lock;
239 struct list_head node;
240 bool lpm_notify;
241 bool configured;
242 bool offload_ch;
243 bool pre_alloc;
244 bool wake_capable;
245 };
246
247 /* Default MHI timeout */
248 #define MHI_TIMEOUT_MS (1000)
249
250 /* debugfs related functions */
251 #ifdef CONFIG_MHI_BUS_DEBUG
252 void mhi_create_debugfs(struct mhi_controller *mhi_cntrl);
253 void mhi_destroy_debugfs(struct mhi_controller *mhi_cntrl);
254 void mhi_debugfs_init(void);
255 void mhi_debugfs_exit(void);
256 #else
mhi_create_debugfs(struct mhi_controller * mhi_cntrl)257 static inline void mhi_create_debugfs(struct mhi_controller *mhi_cntrl)
258 {
259 }
260
mhi_destroy_debugfs(struct mhi_controller * mhi_cntrl)261 static inline void mhi_destroy_debugfs(struct mhi_controller *mhi_cntrl)
262 {
263 }
264
mhi_debugfs_init(void)265 static inline void mhi_debugfs_init(void)
266 {
267 }
268
mhi_debugfs_exit(void)269 static inline void mhi_debugfs_exit(void)
270 {
271 }
272 #endif
273
274 struct mhi_device *mhi_alloc_device(struct mhi_controller *mhi_cntrl);
275
276 int mhi_destroy_device(struct device *dev, void *data);
277 void mhi_create_devices(struct mhi_controller *mhi_cntrl);
278
279 int mhi_alloc_bhie_table(struct mhi_controller *mhi_cntrl,
280 struct image_info **image_info, size_t alloc_size);
281 void mhi_free_bhie_table(struct mhi_controller *mhi_cntrl,
282 struct image_info *image_info);
283
284 /* Power management APIs */
285 enum mhi_pm_state __must_check mhi_tryset_pm_state(
286 struct mhi_controller *mhi_cntrl,
287 enum mhi_pm_state state);
288 const char *to_mhi_pm_state_str(u32 state);
289 int mhi_queue_state_transition(struct mhi_controller *mhi_cntrl,
290 enum dev_st_transition state);
291 void mhi_pm_st_worker(struct work_struct *work);
292 void mhi_pm_sys_err_handler(struct mhi_controller *mhi_cntrl);
293 int mhi_ready_state_transition(struct mhi_controller *mhi_cntrl);
294 int mhi_pm_m0_transition(struct mhi_controller *mhi_cntrl);
295 void mhi_pm_m1_transition(struct mhi_controller *mhi_cntrl);
296 int mhi_pm_m3_transition(struct mhi_controller *mhi_cntrl);
297 int __mhi_device_get_sync(struct mhi_controller *mhi_cntrl);
298 int mhi_send_cmd(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
299 enum mhi_cmd_type cmd);
300 int mhi_download_amss_image(struct mhi_controller *mhi_cntrl);
mhi_is_active(struct mhi_controller * mhi_cntrl)301 static inline bool mhi_is_active(struct mhi_controller *mhi_cntrl)
302 {
303 return (mhi_cntrl->dev_state >= MHI_STATE_M0 &&
304 mhi_cntrl->dev_state <= MHI_STATE_M3_FAST);
305 }
306
mhi_trigger_resume(struct mhi_controller * mhi_cntrl)307 static inline void mhi_trigger_resume(struct mhi_controller *mhi_cntrl)
308 {
309 pm_wakeup_event(&mhi_cntrl->mhi_dev->dev, 0);
310 mhi_cntrl->runtime_get(mhi_cntrl);
311 mhi_cntrl->runtime_put(mhi_cntrl);
312 }
313
314 /* Register access methods */
315 void mhi_db_brstmode(struct mhi_controller *mhi_cntrl, struct db_cfg *db_cfg,
316 void __iomem *db_addr, dma_addr_t db_val);
317 void mhi_db_brstmode_disable(struct mhi_controller *mhi_cntrl,
318 struct db_cfg *db_mode, void __iomem *db_addr,
319 dma_addr_t db_val);
320 int __must_check mhi_read_reg(struct mhi_controller *mhi_cntrl,
321 void __iomem *base, u32 offset, u32 *out);
322 int __must_check mhi_read_reg_field(struct mhi_controller *mhi_cntrl,
323 void __iomem *base, u32 offset, u32 mask,
324 u32 *out);
325 int __must_check mhi_poll_reg_field(struct mhi_controller *mhi_cntrl,
326 void __iomem *base, u32 offset, u32 mask,
327 u32 val, u32 delayus);
328 void mhi_write_reg(struct mhi_controller *mhi_cntrl, void __iomem *base,
329 u32 offset, u32 val);
330 int __must_check mhi_write_reg_field(struct mhi_controller *mhi_cntrl,
331 void __iomem *base, u32 offset, u32 mask,
332 u32 val);
333 void mhi_ring_er_db(struct mhi_event *mhi_event);
334 void mhi_write_db(struct mhi_controller *mhi_cntrl, void __iomem *db_addr,
335 dma_addr_t db_val);
336 void mhi_ring_cmd_db(struct mhi_controller *mhi_cntrl, struct mhi_cmd *mhi_cmd);
337 void mhi_ring_chan_db(struct mhi_controller *mhi_cntrl,
338 struct mhi_chan *mhi_chan);
339
340 /* Initialization methods */
341 int mhi_init_mmio(struct mhi_controller *mhi_cntrl);
342 int mhi_init_dev_ctxt(struct mhi_controller *mhi_cntrl);
343 void mhi_deinit_dev_ctxt(struct mhi_controller *mhi_cntrl);
344 int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl);
345 void mhi_deinit_free_irq(struct mhi_controller *mhi_cntrl);
346 int mhi_rddm_prepare(struct mhi_controller *mhi_cntrl,
347 struct image_info *img_info);
348 void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl);
349
350 /* Automatically allocate and queue inbound buffers */
351 #define MHI_CH_INBOUND_ALLOC_BUFS BIT(0)
352 int mhi_prepare_channel(struct mhi_controller *mhi_cntrl,
353 struct mhi_chan *mhi_chan, unsigned int flags);
354
355 int mhi_init_chan_ctxt(struct mhi_controller *mhi_cntrl,
356 struct mhi_chan *mhi_chan);
357 void mhi_deinit_chan_ctxt(struct mhi_controller *mhi_cntrl,
358 struct mhi_chan *mhi_chan);
359 void mhi_reset_chan(struct mhi_controller *mhi_cntrl,
360 struct mhi_chan *mhi_chan);
361
362 /* Event processing methods */
363 void mhi_ctrl_ev_task(unsigned long data);
364 void mhi_ev_task(unsigned long data);
365 int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl,
366 struct mhi_event *mhi_event, u32 event_quota);
367 int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl,
368 struct mhi_event *mhi_event, u32 event_quota);
369
370 /* ISR handlers */
371 irqreturn_t mhi_irq_handler(int irq_number, void *dev);
372 irqreturn_t mhi_intvec_threaded_handler(int irq_number, void *dev);
373 irqreturn_t mhi_intvec_handler(int irq_number, void *dev);
374
375 int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
376 struct mhi_buf_info *info, enum mhi_flags flags);
377 int mhi_map_single_no_bb(struct mhi_controller *mhi_cntrl,
378 struct mhi_buf_info *buf_info);
379 int mhi_map_single_use_bb(struct mhi_controller *mhi_cntrl,
380 struct mhi_buf_info *buf_info);
381 void mhi_unmap_single_no_bb(struct mhi_controller *mhi_cntrl,
382 struct mhi_buf_info *buf_info);
383 void mhi_unmap_single_use_bb(struct mhi_controller *mhi_cntrl,
384 struct mhi_buf_info *buf_info);
385
386 #endif /* _MHI_INT_H */
387