xref: /openbmc/linux/drivers/bus/mhi/host/pm.c (revision 1e952e95843d437b8a904dbd5b48d72db8ac23ec)
1  // SPDX-License-Identifier: GPL-2.0
2  /*
3   * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
4   *
5   */
6  
7  #include <linux/delay.h>
8  #include <linux/device.h>
9  #include <linux/dma-direction.h>
10  #include <linux/dma-mapping.h>
11  #include <linux/interrupt.h>
12  #include <linux/list.h>
13  #include <linux/mhi.h>
14  #include <linux/module.h>
15  #include <linux/slab.h>
16  #include <linux/wait.h>
17  #include "internal.h"
18  
19  /*
20   * Not all MHI state transitions are synchronous. Transitions like Linkdown,
21   * SYS_ERR, and shutdown can happen anytime asynchronously. This function will
22   * transition to a new state only if we're allowed to.
23   *
24   * Priority increases as we go down. For instance, from any state in L0, the
25   * transition can be made to states in L1, L2 and L3. A notable exception to
26   * this rule is state DISABLE.  From DISABLE state we can only transition to
27   * POR state. Also, while in L2 state, user cannot jump back to previous
28   * L1 or L0 states.
29   *
30   * Valid transitions:
31   * L0: DISABLE <--> POR
32   *     POR <--> POR
33   *     POR -> M0 -> M2 --> M0
34   *     POR -> FW_DL_ERR
35   *     FW_DL_ERR <--> FW_DL_ERR
36   *     M0 <--> M0
37   *     M0 -> FW_DL_ERR
38   *     M0 -> M3_ENTER -> M3 -> M3_EXIT --> M0
39   * L1: SYS_ERR_DETECT -> SYS_ERR_PROCESS
40   *     SYS_ERR_PROCESS -> SYS_ERR_FAIL
41   *     SYS_ERR_FAIL -> SYS_ERR_DETECT
42   *     SYS_ERR_PROCESS --> POR
43   * L2: SHUTDOWN_PROCESS -> LD_ERR_FATAL_DETECT
44   *     SHUTDOWN_PROCESS -> DISABLE
45   * L3: LD_ERR_FATAL_DETECT <--> LD_ERR_FATAL_DETECT
46   *     LD_ERR_FATAL_DETECT -> DISABLE
47   */
48  static const struct mhi_pm_transitions dev_state_transitions[] = {
49  	/* L0 States */
50  	{
51  		MHI_PM_DISABLE,
52  		MHI_PM_POR
53  	},
54  	{
55  		MHI_PM_POR,
56  		MHI_PM_POR | MHI_PM_DISABLE | MHI_PM_M0 |
57  		MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
58  		MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_FW_DL_ERR
59  	},
60  	{
61  		MHI_PM_M0,
62  		MHI_PM_M0 | MHI_PM_M2 | MHI_PM_M3_ENTER |
63  		MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
64  		MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_FW_DL_ERR
65  	},
66  	{
67  		MHI_PM_M2,
68  		MHI_PM_M0 | MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
69  		MHI_PM_LD_ERR_FATAL_DETECT
70  	},
71  	{
72  		MHI_PM_M3_ENTER,
73  		MHI_PM_M3 | MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
74  		MHI_PM_LD_ERR_FATAL_DETECT
75  	},
76  	{
77  		MHI_PM_M3,
78  		MHI_PM_M3_EXIT | MHI_PM_SYS_ERR_DETECT |
79  		MHI_PM_LD_ERR_FATAL_DETECT
80  	},
81  	{
82  		MHI_PM_M3_EXIT,
83  		MHI_PM_M0 | MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
84  		MHI_PM_LD_ERR_FATAL_DETECT
85  	},
86  	{
87  		MHI_PM_FW_DL_ERR,
88  		MHI_PM_FW_DL_ERR | MHI_PM_SYS_ERR_DETECT |
89  		MHI_PM_SHUTDOWN_PROCESS | MHI_PM_LD_ERR_FATAL_DETECT
90  	},
91  	/* L1 States */
92  	{
93  		MHI_PM_SYS_ERR_DETECT,
94  		MHI_PM_SYS_ERR_PROCESS | MHI_PM_SHUTDOWN_PROCESS |
95  		MHI_PM_LD_ERR_FATAL_DETECT
96  	},
97  	{
98  		MHI_PM_SYS_ERR_PROCESS,
99  		MHI_PM_POR | MHI_PM_SYS_ERR_FAIL | MHI_PM_SHUTDOWN_PROCESS |
100  		MHI_PM_LD_ERR_FATAL_DETECT
101  	},
102  	{
103  		MHI_PM_SYS_ERR_FAIL,
104  		MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
105  		MHI_PM_LD_ERR_FATAL_DETECT
106  	},
107  	/* L2 States */
108  	{
109  		MHI_PM_SHUTDOWN_PROCESS,
110  		MHI_PM_DISABLE | MHI_PM_LD_ERR_FATAL_DETECT
111  	},
112  	/* L3 States */
113  	{
114  		MHI_PM_LD_ERR_FATAL_DETECT,
115  		MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_DISABLE
116  	},
117  };
118  
mhi_tryset_pm_state(struct mhi_controller * mhi_cntrl,enum mhi_pm_state state)119  enum mhi_pm_state __must_check mhi_tryset_pm_state(struct mhi_controller *mhi_cntrl,
120  						   enum mhi_pm_state state)
121  {
122  	unsigned long cur_state = mhi_cntrl->pm_state;
123  	int index = find_last_bit(&cur_state, 32);
124  
125  	if (unlikely(index >= ARRAY_SIZE(dev_state_transitions)))
126  		return cur_state;
127  
128  	if (unlikely(dev_state_transitions[index].from_state != cur_state))
129  		return cur_state;
130  
131  	if (unlikely(!(dev_state_transitions[index].to_states & state)))
132  		return cur_state;
133  
134  	mhi_cntrl->pm_state = state;
135  	return mhi_cntrl->pm_state;
136  }
137  
mhi_set_mhi_state(struct mhi_controller * mhi_cntrl,enum mhi_state state)138  void mhi_set_mhi_state(struct mhi_controller *mhi_cntrl, enum mhi_state state)
139  {
140  	struct device *dev = &mhi_cntrl->mhi_dev->dev;
141  	int ret;
142  
143  	if (state == MHI_STATE_RESET) {
144  		ret = mhi_write_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL,
145  					  MHICTRL_RESET_MASK, 1);
146  	} else {
147  		ret = mhi_write_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL,
148  					  MHICTRL_MHISTATE_MASK, state);
149  	}
150  
151  	if (ret)
152  		dev_err(dev, "Failed to set MHI state to: %s\n",
153  			mhi_state_str(state));
154  }
155  
156  /* NOP for backward compatibility, host allowed to ring DB in M2 state */
mhi_toggle_dev_wake_nop(struct mhi_controller * mhi_cntrl)157  static void mhi_toggle_dev_wake_nop(struct mhi_controller *mhi_cntrl)
158  {
159  }
160  
mhi_toggle_dev_wake(struct mhi_controller * mhi_cntrl)161  static void mhi_toggle_dev_wake(struct mhi_controller *mhi_cntrl)
162  {
163  	mhi_cntrl->wake_get(mhi_cntrl, false);
164  	mhi_cntrl->wake_put(mhi_cntrl, true);
165  }
166  
167  /* Handle device ready state transition */
mhi_ready_state_transition(struct mhi_controller * mhi_cntrl)168  int mhi_ready_state_transition(struct mhi_controller *mhi_cntrl)
169  {
170  	struct mhi_event *mhi_event;
171  	enum mhi_pm_state cur_state;
172  	struct device *dev = &mhi_cntrl->mhi_dev->dev;
173  	u32 interval_us = 25000; /* poll register field every 25 milliseconds */
174  	int ret, i;
175  
176  	/* Check if device entered error state */
177  	if (MHI_PM_IN_FATAL_STATE(mhi_cntrl->pm_state)) {
178  		dev_err(dev, "Device link is not accessible\n");
179  		return -EIO;
180  	}
181  
182  	/* Wait for RESET to be cleared and READY bit to be set by the device */
183  	ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL,
184  				 MHICTRL_RESET_MASK, 0, interval_us);
185  	if (ret) {
186  		dev_err(dev, "Device failed to clear MHI Reset\n");
187  		return ret;
188  	}
189  
190  	ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs, MHISTATUS,
191  				 MHISTATUS_READY_MASK, 1, interval_us);
192  	if (ret) {
193  		dev_err(dev, "Device failed to enter MHI Ready\n");
194  		return ret;
195  	}
196  
197  	dev_dbg(dev, "Device in READY State\n");
198  	write_lock_irq(&mhi_cntrl->pm_lock);
199  	cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_POR);
200  	mhi_cntrl->dev_state = MHI_STATE_READY;
201  	write_unlock_irq(&mhi_cntrl->pm_lock);
202  
203  	if (cur_state != MHI_PM_POR) {
204  		dev_err(dev, "Error moving to state %s from %s\n",
205  			to_mhi_pm_state_str(MHI_PM_POR),
206  			to_mhi_pm_state_str(cur_state));
207  		return -EIO;
208  	}
209  
210  	read_lock_bh(&mhi_cntrl->pm_lock);
211  	if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
212  		dev_err(dev, "Device registers not accessible\n");
213  		goto error_mmio;
214  	}
215  
216  	/* Configure MMIO registers */
217  	ret = mhi_init_mmio(mhi_cntrl);
218  	if (ret) {
219  		dev_err(dev, "Error configuring MMIO registers\n");
220  		goto error_mmio;
221  	}
222  
223  	/* Add elements to all SW event rings */
224  	mhi_event = mhi_cntrl->mhi_event;
225  	for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
226  		struct mhi_ring *ring = &mhi_event->ring;
227  
228  		/* Skip if this is an offload or HW event */
229  		if (mhi_event->offload_ev || mhi_event->hw_ring)
230  			continue;
231  
232  		ring->wp = ring->base + ring->len - ring->el_size;
233  		*ring->ctxt_wp = cpu_to_le64(ring->iommu_base + ring->len - ring->el_size);
234  		/* Update all cores */
235  		smp_wmb();
236  
237  		/* Ring the event ring db */
238  		spin_lock_irq(&mhi_event->lock);
239  		mhi_ring_er_db(mhi_event);
240  		spin_unlock_irq(&mhi_event->lock);
241  	}
242  
243  	/* Set MHI to M0 state */
244  	mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M0);
245  	read_unlock_bh(&mhi_cntrl->pm_lock);
246  
247  	return 0;
248  
249  error_mmio:
250  	read_unlock_bh(&mhi_cntrl->pm_lock);
251  
252  	return -EIO;
253  }
254  
mhi_pm_m0_transition(struct mhi_controller * mhi_cntrl)255  int mhi_pm_m0_transition(struct mhi_controller *mhi_cntrl)
256  {
257  	enum mhi_pm_state cur_state;
258  	struct mhi_chan *mhi_chan;
259  	struct device *dev = &mhi_cntrl->mhi_dev->dev;
260  	int i;
261  
262  	write_lock_irq(&mhi_cntrl->pm_lock);
263  	mhi_cntrl->dev_state = MHI_STATE_M0;
264  	cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M0);
265  	write_unlock_irq(&mhi_cntrl->pm_lock);
266  	if (unlikely(cur_state != MHI_PM_M0)) {
267  		dev_err(dev, "Unable to transition to M0 state\n");
268  		return -EIO;
269  	}
270  	mhi_cntrl->M0++;
271  
272  	/* Wake up the device */
273  	read_lock_bh(&mhi_cntrl->pm_lock);
274  	mhi_cntrl->wake_get(mhi_cntrl, true);
275  
276  	/* Ring all event rings and CMD ring only if we're in mission mode */
277  	if (MHI_IN_MISSION_MODE(mhi_cntrl->ee)) {
278  		struct mhi_event *mhi_event = mhi_cntrl->mhi_event;
279  		struct mhi_cmd *mhi_cmd =
280  			&mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING];
281  
282  		for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
283  			if (mhi_event->offload_ev)
284  				continue;
285  
286  			spin_lock_irq(&mhi_event->lock);
287  			mhi_ring_er_db(mhi_event);
288  			spin_unlock_irq(&mhi_event->lock);
289  		}
290  
291  		/* Only ring primary cmd ring if ring is not empty */
292  		spin_lock_irq(&mhi_cmd->lock);
293  		if (mhi_cmd->ring.rp != mhi_cmd->ring.wp)
294  			mhi_ring_cmd_db(mhi_cntrl, mhi_cmd);
295  		spin_unlock_irq(&mhi_cmd->lock);
296  	}
297  
298  	/* Ring channel DB registers */
299  	mhi_chan = mhi_cntrl->mhi_chan;
300  	for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) {
301  		struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
302  
303  		if (mhi_chan->db_cfg.reset_req) {
304  			write_lock_irq(&mhi_chan->lock);
305  			mhi_chan->db_cfg.db_mode = true;
306  			write_unlock_irq(&mhi_chan->lock);
307  		}
308  
309  		read_lock_irq(&mhi_chan->lock);
310  
311  		/* Only ring DB if ring is not empty */
312  		if (tre_ring->base && tre_ring->wp  != tre_ring->rp &&
313  		    mhi_chan->ch_state == MHI_CH_STATE_ENABLED)
314  			mhi_ring_chan_db(mhi_cntrl, mhi_chan);
315  		read_unlock_irq(&mhi_chan->lock);
316  	}
317  
318  	mhi_cntrl->wake_put(mhi_cntrl, false);
319  	read_unlock_bh(&mhi_cntrl->pm_lock);
320  	wake_up_all(&mhi_cntrl->state_event);
321  
322  	return 0;
323  }
324  
325  /*
326   * After receiving the MHI state change event from the device indicating the
327   * transition to M1 state, the host can transition the device to M2 state
328   * for keeping it in low power state.
329   */
mhi_pm_m1_transition(struct mhi_controller * mhi_cntrl)330  void mhi_pm_m1_transition(struct mhi_controller *mhi_cntrl)
331  {
332  	enum mhi_pm_state state;
333  	struct device *dev = &mhi_cntrl->mhi_dev->dev;
334  
335  	write_lock_irq(&mhi_cntrl->pm_lock);
336  	state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M2);
337  	if (state == MHI_PM_M2) {
338  		mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M2);
339  		mhi_cntrl->dev_state = MHI_STATE_M2;
340  
341  		write_unlock_irq(&mhi_cntrl->pm_lock);
342  
343  		mhi_cntrl->M2++;
344  		wake_up_all(&mhi_cntrl->state_event);
345  
346  		/* If there are any pending resources, exit M2 immediately */
347  		if (unlikely(atomic_read(&mhi_cntrl->pending_pkts) ||
348  			     atomic_read(&mhi_cntrl->dev_wake))) {
349  			dev_dbg(dev,
350  				"Exiting M2, pending_pkts: %d dev_wake: %d\n",
351  				atomic_read(&mhi_cntrl->pending_pkts),
352  				atomic_read(&mhi_cntrl->dev_wake));
353  			read_lock_bh(&mhi_cntrl->pm_lock);
354  			mhi_cntrl->wake_get(mhi_cntrl, true);
355  			mhi_cntrl->wake_put(mhi_cntrl, true);
356  			read_unlock_bh(&mhi_cntrl->pm_lock);
357  		} else {
358  			mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_IDLE);
359  		}
360  	} else {
361  		write_unlock_irq(&mhi_cntrl->pm_lock);
362  	}
363  }
364  
365  /* MHI M3 completion handler */
mhi_pm_m3_transition(struct mhi_controller * mhi_cntrl)366  int mhi_pm_m3_transition(struct mhi_controller *mhi_cntrl)
367  {
368  	enum mhi_pm_state state;
369  	struct device *dev = &mhi_cntrl->mhi_dev->dev;
370  
371  	write_lock_irq(&mhi_cntrl->pm_lock);
372  	mhi_cntrl->dev_state = MHI_STATE_M3;
373  	state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M3);
374  	write_unlock_irq(&mhi_cntrl->pm_lock);
375  	if (state != MHI_PM_M3) {
376  		dev_err(dev, "Unable to transition to M3 state\n");
377  		return -EIO;
378  	}
379  
380  	mhi_cntrl->M3++;
381  	wake_up_all(&mhi_cntrl->state_event);
382  
383  	return 0;
384  }
385  
386  /* Handle device Mission Mode transition */
mhi_pm_mission_mode_transition(struct mhi_controller * mhi_cntrl)387  static int mhi_pm_mission_mode_transition(struct mhi_controller *mhi_cntrl)
388  {
389  	struct mhi_event *mhi_event;
390  	struct device *dev = &mhi_cntrl->mhi_dev->dev;
391  	enum mhi_ee_type ee = MHI_EE_MAX, current_ee = mhi_cntrl->ee;
392  	int i, ret;
393  
394  	dev_dbg(dev, "Processing Mission Mode transition\n");
395  
396  	write_lock_irq(&mhi_cntrl->pm_lock);
397  	if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state))
398  		ee = mhi_get_exec_env(mhi_cntrl);
399  
400  	if (!MHI_IN_MISSION_MODE(ee)) {
401  		mhi_cntrl->pm_state = MHI_PM_LD_ERR_FATAL_DETECT;
402  		write_unlock_irq(&mhi_cntrl->pm_lock);
403  		wake_up_all(&mhi_cntrl->state_event);
404  		return -EIO;
405  	}
406  	mhi_cntrl->ee = ee;
407  	write_unlock_irq(&mhi_cntrl->pm_lock);
408  
409  	wake_up_all(&mhi_cntrl->state_event);
410  
411  	device_for_each_child(&mhi_cntrl->mhi_dev->dev, &current_ee,
412  			      mhi_destroy_device);
413  	mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_MISSION_MODE);
414  
415  	/* Force MHI to be in M0 state before continuing */
416  	ret = __mhi_device_get_sync(mhi_cntrl);
417  	if (ret)
418  		return ret;
419  
420  	read_lock_bh(&mhi_cntrl->pm_lock);
421  
422  	if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
423  		ret = -EIO;
424  		goto error_mission_mode;
425  	}
426  
427  	/* Add elements to all HW event rings */
428  	mhi_event = mhi_cntrl->mhi_event;
429  	for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
430  		struct mhi_ring *ring = &mhi_event->ring;
431  
432  		if (mhi_event->offload_ev || !mhi_event->hw_ring)
433  			continue;
434  
435  		ring->wp = ring->base + ring->len - ring->el_size;
436  		*ring->ctxt_wp = cpu_to_le64(ring->iommu_base + ring->len - ring->el_size);
437  		/* Update to all cores */
438  		smp_wmb();
439  
440  		spin_lock_irq(&mhi_event->lock);
441  		if (MHI_DB_ACCESS_VALID(mhi_cntrl))
442  			mhi_ring_er_db(mhi_event);
443  		spin_unlock_irq(&mhi_event->lock);
444  	}
445  
446  	read_unlock_bh(&mhi_cntrl->pm_lock);
447  
448  	/*
449  	 * The MHI devices are only created when the client device switches its
450  	 * Execution Environment (EE) to either SBL or AMSS states
451  	 */
452  	mhi_create_devices(mhi_cntrl);
453  
454  	read_lock_bh(&mhi_cntrl->pm_lock);
455  
456  error_mission_mode:
457  	mhi_cntrl->wake_put(mhi_cntrl, false);
458  	read_unlock_bh(&mhi_cntrl->pm_lock);
459  
460  	return ret;
461  }
462  
463  /* Handle shutdown transitions */
mhi_pm_disable_transition(struct mhi_controller * mhi_cntrl)464  static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl)
465  {
466  	enum mhi_pm_state cur_state;
467  	struct mhi_event *mhi_event;
468  	struct mhi_cmd_ctxt *cmd_ctxt;
469  	struct mhi_cmd *mhi_cmd;
470  	struct mhi_event_ctxt *er_ctxt;
471  	struct device *dev = &mhi_cntrl->mhi_dev->dev;
472  	int ret, i;
473  
474  	dev_dbg(dev, "Processing disable transition with PM state: %s\n",
475  		to_mhi_pm_state_str(mhi_cntrl->pm_state));
476  
477  	mutex_lock(&mhi_cntrl->pm_mutex);
478  
479  	/* Trigger MHI RESET so that the device will not access host memory */
480  	if (!MHI_PM_IN_FATAL_STATE(mhi_cntrl->pm_state)) {
481  		/* Skip MHI RESET if in RDDM state */
482  		if (mhi_cntrl->rddm_image && mhi_get_exec_env(mhi_cntrl) == MHI_EE_RDDM)
483  			goto skip_mhi_reset;
484  
485  		dev_dbg(dev, "Triggering MHI Reset in device\n");
486  		mhi_set_mhi_state(mhi_cntrl, MHI_STATE_RESET);
487  
488  		/* Wait for the reset bit to be cleared by the device */
489  		ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL,
490  				 MHICTRL_RESET_MASK, 0, 25000);
491  		if (ret)
492  			dev_err(dev, "Device failed to clear MHI Reset\n");
493  
494  		/*
495  		 * Device will clear BHI_INTVEC as a part of RESET processing,
496  		 * hence re-program it
497  		 */
498  		mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0);
499  
500  		if (!MHI_IN_PBL(mhi_get_exec_env(mhi_cntrl))) {
501  			/* wait for ready to be set */
502  			ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs,
503  						 MHISTATUS,
504  						 MHISTATUS_READY_MASK, 1, 25000);
505  			if (ret)
506  				dev_err(dev, "Device failed to enter READY state\n");
507  		}
508  	}
509  
510  skip_mhi_reset:
511  	dev_dbg(dev,
512  		 "Waiting for all pending event ring processing to complete\n");
513  	mhi_event = mhi_cntrl->mhi_event;
514  	for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
515  		if (mhi_event->offload_ev)
516  			continue;
517  		disable_irq(mhi_cntrl->irq[mhi_event->irq]);
518  		tasklet_kill(&mhi_event->task);
519  	}
520  
521  	/* Release lock and wait for all pending threads to complete */
522  	mutex_unlock(&mhi_cntrl->pm_mutex);
523  	dev_dbg(dev, "Waiting for all pending threads to complete\n");
524  	wake_up_all(&mhi_cntrl->state_event);
525  
526  	dev_dbg(dev, "Reset all active channels and remove MHI devices\n");
527  	device_for_each_child(&mhi_cntrl->mhi_dev->dev, NULL, mhi_destroy_device);
528  
529  	mutex_lock(&mhi_cntrl->pm_mutex);
530  
531  	WARN_ON(atomic_read(&mhi_cntrl->dev_wake));
532  	WARN_ON(atomic_read(&mhi_cntrl->pending_pkts));
533  
534  	/* Reset the ev rings and cmd rings */
535  	dev_dbg(dev, "Resetting EV CTXT and CMD CTXT\n");
536  	mhi_cmd = mhi_cntrl->mhi_cmd;
537  	cmd_ctxt = mhi_cntrl->mhi_ctxt->cmd_ctxt;
538  	for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++, cmd_ctxt++) {
539  		struct mhi_ring *ring = &mhi_cmd->ring;
540  
541  		ring->rp = ring->base;
542  		ring->wp = ring->base;
543  		cmd_ctxt->rp = cmd_ctxt->rbase;
544  		cmd_ctxt->wp = cmd_ctxt->rbase;
545  	}
546  
547  	mhi_event = mhi_cntrl->mhi_event;
548  	er_ctxt = mhi_cntrl->mhi_ctxt->er_ctxt;
549  	for (i = 0; i < mhi_cntrl->total_ev_rings; i++, er_ctxt++,
550  		     mhi_event++) {
551  		struct mhi_ring *ring = &mhi_event->ring;
552  
553  		/* Skip offload events */
554  		if (mhi_event->offload_ev)
555  			continue;
556  
557  		ring->rp = ring->base;
558  		ring->wp = ring->base;
559  		er_ctxt->rp = er_ctxt->rbase;
560  		er_ctxt->wp = er_ctxt->rbase;
561  	}
562  
563  	/* Move to disable state */
564  	write_lock_irq(&mhi_cntrl->pm_lock);
565  	cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_DISABLE);
566  	write_unlock_irq(&mhi_cntrl->pm_lock);
567  	if (unlikely(cur_state != MHI_PM_DISABLE))
568  		dev_err(dev, "Error moving from PM state: %s to: %s\n",
569  			to_mhi_pm_state_str(cur_state),
570  			to_mhi_pm_state_str(MHI_PM_DISABLE));
571  
572  	dev_dbg(dev, "Exiting with PM state: %s, MHI state: %s\n",
573  		to_mhi_pm_state_str(mhi_cntrl->pm_state),
574  		mhi_state_str(mhi_cntrl->dev_state));
575  
576  	mutex_unlock(&mhi_cntrl->pm_mutex);
577  }
578  
579  /* Handle system error transitions */
mhi_pm_sys_error_transition(struct mhi_controller * mhi_cntrl)580  static void mhi_pm_sys_error_transition(struct mhi_controller *mhi_cntrl)
581  {
582  	enum mhi_pm_state cur_state, prev_state;
583  	enum dev_st_transition next_state;
584  	struct mhi_event *mhi_event;
585  	struct mhi_cmd_ctxt *cmd_ctxt;
586  	struct mhi_cmd *mhi_cmd;
587  	struct mhi_event_ctxt *er_ctxt;
588  	struct device *dev = &mhi_cntrl->mhi_dev->dev;
589  	int ret, i;
590  
591  	dev_dbg(dev, "Transitioning from PM state: %s to: %s\n",
592  		to_mhi_pm_state_str(mhi_cntrl->pm_state),
593  		to_mhi_pm_state_str(MHI_PM_SYS_ERR_PROCESS));
594  
595  	/* We must notify MHI control driver so it can clean up first */
596  	mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_SYS_ERROR);
597  
598  	mutex_lock(&mhi_cntrl->pm_mutex);
599  	write_lock_irq(&mhi_cntrl->pm_lock);
600  	prev_state = mhi_cntrl->pm_state;
601  	cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_SYS_ERR_PROCESS);
602  	write_unlock_irq(&mhi_cntrl->pm_lock);
603  
604  	if (cur_state != MHI_PM_SYS_ERR_PROCESS) {
605  		dev_err(dev, "Failed to transition from PM state: %s to: %s\n",
606  			to_mhi_pm_state_str(cur_state),
607  			to_mhi_pm_state_str(MHI_PM_SYS_ERR_PROCESS));
608  		goto exit_sys_error_transition;
609  	}
610  
611  	mhi_cntrl->ee = MHI_EE_DISABLE_TRANSITION;
612  	mhi_cntrl->dev_state = MHI_STATE_RESET;
613  
614  	/* Wake up threads waiting for state transition */
615  	wake_up_all(&mhi_cntrl->state_event);
616  
617  	/* Trigger MHI RESET so that the device will not access host memory */
618  	if (MHI_REG_ACCESS_VALID(prev_state)) {
619  		u32 in_reset = -1;
620  		unsigned long timeout = msecs_to_jiffies(mhi_cntrl->timeout_ms);
621  
622  		dev_dbg(dev, "Triggering MHI Reset in device\n");
623  		mhi_set_mhi_state(mhi_cntrl, MHI_STATE_RESET);
624  
625  		/* Wait for the reset bit to be cleared by the device */
626  		ret = wait_event_timeout(mhi_cntrl->state_event,
627  					 mhi_read_reg_field(mhi_cntrl,
628  							    mhi_cntrl->regs,
629  							    MHICTRL,
630  							    MHICTRL_RESET_MASK,
631  							    &in_reset) ||
632  					!in_reset, timeout);
633  		if (!ret || in_reset) {
634  			dev_err(dev, "Device failed to exit MHI Reset state\n");
635  			write_lock_irq(&mhi_cntrl->pm_lock);
636  			cur_state = mhi_tryset_pm_state(mhi_cntrl,
637  							MHI_PM_SYS_ERR_FAIL);
638  			write_unlock_irq(&mhi_cntrl->pm_lock);
639  			/* Shutdown may have occurred, otherwise cleanup now */
640  			if (cur_state != MHI_PM_SYS_ERR_FAIL)
641  				goto exit_sys_error_transition;
642  		}
643  
644  		/*
645  		 * Device will clear BHI_INTVEC as a part of RESET processing,
646  		 * hence re-program it
647  		 */
648  		mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0);
649  	}
650  
651  	dev_dbg(dev,
652  		"Waiting for all pending event ring processing to complete\n");
653  	mhi_event = mhi_cntrl->mhi_event;
654  	for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
655  		if (mhi_event->offload_ev)
656  			continue;
657  		tasklet_kill(&mhi_event->task);
658  	}
659  
660  	/* Release lock and wait for all pending threads to complete */
661  	mutex_unlock(&mhi_cntrl->pm_mutex);
662  	dev_dbg(dev, "Waiting for all pending threads to complete\n");
663  	wake_up_all(&mhi_cntrl->state_event);
664  
665  	dev_dbg(dev, "Reset all active channels and remove MHI devices\n");
666  	device_for_each_child(&mhi_cntrl->mhi_dev->dev, NULL, mhi_destroy_device);
667  
668  	mutex_lock(&mhi_cntrl->pm_mutex);
669  
670  	WARN_ON(atomic_read(&mhi_cntrl->dev_wake));
671  	WARN_ON(atomic_read(&mhi_cntrl->pending_pkts));
672  
673  	/* Reset the ev rings and cmd rings */
674  	dev_dbg(dev, "Resetting EV CTXT and CMD CTXT\n");
675  	mhi_cmd = mhi_cntrl->mhi_cmd;
676  	cmd_ctxt = mhi_cntrl->mhi_ctxt->cmd_ctxt;
677  	for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++, cmd_ctxt++) {
678  		struct mhi_ring *ring = &mhi_cmd->ring;
679  
680  		ring->rp = ring->base;
681  		ring->wp = ring->base;
682  		cmd_ctxt->rp = cmd_ctxt->rbase;
683  		cmd_ctxt->wp = cmd_ctxt->rbase;
684  	}
685  
686  	mhi_event = mhi_cntrl->mhi_event;
687  	er_ctxt = mhi_cntrl->mhi_ctxt->er_ctxt;
688  	for (i = 0; i < mhi_cntrl->total_ev_rings; i++, er_ctxt++,
689  	     mhi_event++) {
690  		struct mhi_ring *ring = &mhi_event->ring;
691  
692  		/* Skip offload events */
693  		if (mhi_event->offload_ev)
694  			continue;
695  
696  		ring->rp = ring->base;
697  		ring->wp = ring->base;
698  		er_ctxt->rp = er_ctxt->rbase;
699  		er_ctxt->wp = er_ctxt->rbase;
700  	}
701  
702  	/* Transition to next state */
703  	if (MHI_IN_PBL(mhi_get_exec_env(mhi_cntrl))) {
704  		write_lock_irq(&mhi_cntrl->pm_lock);
705  		cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_POR);
706  		write_unlock_irq(&mhi_cntrl->pm_lock);
707  		if (cur_state != MHI_PM_POR) {
708  			dev_err(dev, "Error moving to state %s from %s\n",
709  				to_mhi_pm_state_str(MHI_PM_POR),
710  				to_mhi_pm_state_str(cur_state));
711  			goto exit_sys_error_transition;
712  		}
713  		next_state = DEV_ST_TRANSITION_PBL;
714  	} else {
715  		next_state = DEV_ST_TRANSITION_READY;
716  	}
717  
718  	mhi_queue_state_transition(mhi_cntrl, next_state);
719  
720  exit_sys_error_transition:
721  	dev_dbg(dev, "Exiting with PM state: %s, MHI state: %s\n",
722  		to_mhi_pm_state_str(mhi_cntrl->pm_state),
723  		mhi_state_str(mhi_cntrl->dev_state));
724  
725  	mutex_unlock(&mhi_cntrl->pm_mutex);
726  }
727  
728  /* Queue a new work item and schedule work */
mhi_queue_state_transition(struct mhi_controller * mhi_cntrl,enum dev_st_transition state)729  int mhi_queue_state_transition(struct mhi_controller *mhi_cntrl,
730  			       enum dev_st_transition state)
731  {
732  	struct state_transition *item = kmalloc(sizeof(*item), GFP_ATOMIC);
733  	unsigned long flags;
734  
735  	if (!item)
736  		return -ENOMEM;
737  
738  	item->state = state;
739  	spin_lock_irqsave(&mhi_cntrl->transition_lock, flags);
740  	list_add_tail(&item->node, &mhi_cntrl->transition_list);
741  	spin_unlock_irqrestore(&mhi_cntrl->transition_lock, flags);
742  
743  	queue_work(mhi_cntrl->hiprio_wq, &mhi_cntrl->st_worker);
744  
745  	return 0;
746  }
747  
748  /* SYS_ERR worker */
mhi_pm_sys_err_handler(struct mhi_controller * mhi_cntrl)749  void mhi_pm_sys_err_handler(struct mhi_controller *mhi_cntrl)
750  {
751  	struct device *dev = &mhi_cntrl->mhi_dev->dev;
752  
753  	/* skip if controller supports RDDM */
754  	if (mhi_cntrl->rddm_image) {
755  		dev_dbg(dev, "Controller supports RDDM, skip SYS_ERROR\n");
756  		return;
757  	}
758  
759  	mhi_queue_state_transition(mhi_cntrl, DEV_ST_TRANSITION_SYS_ERR);
760  }
761  
762  /* Device State Transition worker */
mhi_pm_st_worker(struct work_struct * work)763  void mhi_pm_st_worker(struct work_struct *work)
764  {
765  	struct state_transition *itr, *tmp;
766  	LIST_HEAD(head);
767  	struct mhi_controller *mhi_cntrl = container_of(work,
768  							struct mhi_controller,
769  							st_worker);
770  	struct device *dev = &mhi_cntrl->mhi_dev->dev;
771  
772  	spin_lock_irq(&mhi_cntrl->transition_lock);
773  	list_splice_tail_init(&mhi_cntrl->transition_list, &head);
774  	spin_unlock_irq(&mhi_cntrl->transition_lock);
775  
776  	list_for_each_entry_safe(itr, tmp, &head, node) {
777  		list_del(&itr->node);
778  		dev_dbg(dev, "Handling state transition: %s\n",
779  			TO_DEV_STATE_TRANS_STR(itr->state));
780  
781  		switch (itr->state) {
782  		case DEV_ST_TRANSITION_PBL:
783  			write_lock_irq(&mhi_cntrl->pm_lock);
784  			if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state))
785  				mhi_cntrl->ee = mhi_get_exec_env(mhi_cntrl);
786  			write_unlock_irq(&mhi_cntrl->pm_lock);
787  			mhi_fw_load_handler(mhi_cntrl);
788  			break;
789  		case DEV_ST_TRANSITION_SBL:
790  			write_lock_irq(&mhi_cntrl->pm_lock);
791  			mhi_cntrl->ee = MHI_EE_SBL;
792  			write_unlock_irq(&mhi_cntrl->pm_lock);
793  			/*
794  			 * The MHI devices are only created when the client
795  			 * device switches its Execution Environment (EE) to
796  			 * either SBL or AMSS states
797  			 */
798  			mhi_create_devices(mhi_cntrl);
799  			if (mhi_cntrl->fbc_download)
800  				mhi_download_amss_image(mhi_cntrl);
801  			break;
802  		case DEV_ST_TRANSITION_MISSION_MODE:
803  			mhi_pm_mission_mode_transition(mhi_cntrl);
804  			break;
805  		case DEV_ST_TRANSITION_FP:
806  			write_lock_irq(&mhi_cntrl->pm_lock);
807  			mhi_cntrl->ee = MHI_EE_FP;
808  			write_unlock_irq(&mhi_cntrl->pm_lock);
809  			mhi_create_devices(mhi_cntrl);
810  			break;
811  		case DEV_ST_TRANSITION_READY:
812  			mhi_ready_state_transition(mhi_cntrl);
813  			break;
814  		case DEV_ST_TRANSITION_SYS_ERR:
815  			mhi_pm_sys_error_transition(mhi_cntrl);
816  			break;
817  		case DEV_ST_TRANSITION_DISABLE:
818  			mhi_pm_disable_transition(mhi_cntrl);
819  			break;
820  		default:
821  			break;
822  		}
823  		kfree(itr);
824  	}
825  }
826  
mhi_pm_suspend(struct mhi_controller * mhi_cntrl)827  int mhi_pm_suspend(struct mhi_controller *mhi_cntrl)
828  {
829  	struct mhi_chan *itr, *tmp;
830  	struct device *dev = &mhi_cntrl->mhi_dev->dev;
831  	enum mhi_pm_state new_state;
832  	int ret;
833  
834  	if (mhi_cntrl->pm_state == MHI_PM_DISABLE)
835  		return -EINVAL;
836  
837  	if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))
838  		return -EIO;
839  
840  	/* Return busy if there are any pending resources */
841  	if (atomic_read(&mhi_cntrl->dev_wake) ||
842  	    atomic_read(&mhi_cntrl->pending_pkts))
843  		return -EBUSY;
844  
845  	/* Take MHI out of M2 state */
846  	read_lock_bh(&mhi_cntrl->pm_lock);
847  	mhi_cntrl->wake_get(mhi_cntrl, false);
848  	read_unlock_bh(&mhi_cntrl->pm_lock);
849  
850  	ret = wait_event_timeout(mhi_cntrl->state_event,
851  				 mhi_cntrl->dev_state == MHI_STATE_M0 ||
852  				 mhi_cntrl->dev_state == MHI_STATE_M1 ||
853  				 MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
854  				 msecs_to_jiffies(mhi_cntrl->timeout_ms));
855  
856  	read_lock_bh(&mhi_cntrl->pm_lock);
857  	mhi_cntrl->wake_put(mhi_cntrl, false);
858  	read_unlock_bh(&mhi_cntrl->pm_lock);
859  
860  	if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
861  		dev_err(dev,
862  			"Could not enter M0/M1 state");
863  		return -EIO;
864  	}
865  
866  	write_lock_irq(&mhi_cntrl->pm_lock);
867  
868  	if (atomic_read(&mhi_cntrl->dev_wake) ||
869  	    atomic_read(&mhi_cntrl->pending_pkts)) {
870  		write_unlock_irq(&mhi_cntrl->pm_lock);
871  		return -EBUSY;
872  	}
873  
874  	dev_dbg(dev, "Allowing M3 transition\n");
875  	new_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M3_ENTER);
876  	if (new_state != MHI_PM_M3_ENTER) {
877  		write_unlock_irq(&mhi_cntrl->pm_lock);
878  		dev_err(dev,
879  			"Error setting to PM state: %s from: %s\n",
880  			to_mhi_pm_state_str(MHI_PM_M3_ENTER),
881  			to_mhi_pm_state_str(mhi_cntrl->pm_state));
882  		return -EIO;
883  	}
884  
885  	/* Set MHI to M3 and wait for completion */
886  	mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M3);
887  	write_unlock_irq(&mhi_cntrl->pm_lock);
888  	dev_dbg(dev, "Waiting for M3 completion\n");
889  
890  	ret = wait_event_timeout(mhi_cntrl->state_event,
891  				 mhi_cntrl->dev_state == MHI_STATE_M3 ||
892  				 MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
893  				 msecs_to_jiffies(mhi_cntrl->timeout_ms));
894  
895  	if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
896  		dev_err(dev,
897  			"Did not enter M3 state, MHI state: %s, PM state: %s\n",
898  			mhi_state_str(mhi_cntrl->dev_state),
899  			to_mhi_pm_state_str(mhi_cntrl->pm_state));
900  		return -EIO;
901  	}
902  
903  	/* Notify clients about entering LPM */
904  	list_for_each_entry_safe(itr, tmp, &mhi_cntrl->lpm_chans, node) {
905  		mutex_lock(&itr->mutex);
906  		if (itr->mhi_dev)
907  			mhi_notify(itr->mhi_dev, MHI_CB_LPM_ENTER);
908  		mutex_unlock(&itr->mutex);
909  	}
910  
911  	return 0;
912  }
913  EXPORT_SYMBOL_GPL(mhi_pm_suspend);
914  
__mhi_pm_resume(struct mhi_controller * mhi_cntrl,bool force)915  static int __mhi_pm_resume(struct mhi_controller *mhi_cntrl, bool force)
916  {
917  	struct mhi_chan *itr, *tmp;
918  	struct device *dev = &mhi_cntrl->mhi_dev->dev;
919  	enum mhi_pm_state cur_state;
920  	int ret;
921  
922  	dev_dbg(dev, "Entered with PM state: %s, MHI state: %s\n",
923  		to_mhi_pm_state_str(mhi_cntrl->pm_state),
924  		mhi_state_str(mhi_cntrl->dev_state));
925  
926  	if (mhi_cntrl->pm_state == MHI_PM_DISABLE)
927  		return 0;
928  
929  	if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))
930  		return -EIO;
931  
932  	if (mhi_get_mhi_state(mhi_cntrl) != MHI_STATE_M3) {
933  		dev_warn(dev, "Resuming from non M3 state (%s)\n",
934  			 mhi_state_str(mhi_get_mhi_state(mhi_cntrl)));
935  		if (!force)
936  			return -EINVAL;
937  	}
938  
939  	/* Notify clients about exiting LPM */
940  	list_for_each_entry_safe(itr, tmp, &mhi_cntrl->lpm_chans, node) {
941  		mutex_lock(&itr->mutex);
942  		if (itr->mhi_dev)
943  			mhi_notify(itr->mhi_dev, MHI_CB_LPM_EXIT);
944  		mutex_unlock(&itr->mutex);
945  	}
946  
947  	write_lock_irq(&mhi_cntrl->pm_lock);
948  	cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M3_EXIT);
949  	if (cur_state != MHI_PM_M3_EXIT) {
950  		write_unlock_irq(&mhi_cntrl->pm_lock);
951  		dev_info(dev,
952  			 "Error setting to PM state: %s from: %s\n",
953  			 to_mhi_pm_state_str(MHI_PM_M3_EXIT),
954  			 to_mhi_pm_state_str(mhi_cntrl->pm_state));
955  		return -EIO;
956  	}
957  
958  	/* Set MHI to M0 and wait for completion */
959  	mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M0);
960  	write_unlock_irq(&mhi_cntrl->pm_lock);
961  
962  	ret = wait_event_timeout(mhi_cntrl->state_event,
963  				 mhi_cntrl->dev_state == MHI_STATE_M0 ||
964  				 mhi_cntrl->dev_state == MHI_STATE_M2 ||
965  				 MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
966  				 msecs_to_jiffies(mhi_cntrl->timeout_ms));
967  
968  	if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
969  		dev_err(dev,
970  			"Did not enter M0 state, MHI state: %s, PM state: %s\n",
971  			mhi_state_str(mhi_cntrl->dev_state),
972  			to_mhi_pm_state_str(mhi_cntrl->pm_state));
973  		return -EIO;
974  	}
975  
976  	return 0;
977  }
978  
mhi_pm_resume(struct mhi_controller * mhi_cntrl)979  int mhi_pm_resume(struct mhi_controller *mhi_cntrl)
980  {
981  	return __mhi_pm_resume(mhi_cntrl, false);
982  }
983  EXPORT_SYMBOL_GPL(mhi_pm_resume);
984  
mhi_pm_resume_force(struct mhi_controller * mhi_cntrl)985  int mhi_pm_resume_force(struct mhi_controller *mhi_cntrl)
986  {
987  	return __mhi_pm_resume(mhi_cntrl, true);
988  }
989  EXPORT_SYMBOL_GPL(mhi_pm_resume_force);
990  
__mhi_device_get_sync(struct mhi_controller * mhi_cntrl)991  int __mhi_device_get_sync(struct mhi_controller *mhi_cntrl)
992  {
993  	int ret;
994  
995  	/* Wake up the device */
996  	read_lock_bh(&mhi_cntrl->pm_lock);
997  	if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
998  		read_unlock_bh(&mhi_cntrl->pm_lock);
999  		return -EIO;
1000  	}
1001  	mhi_cntrl->wake_get(mhi_cntrl, true);
1002  	if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))
1003  		mhi_trigger_resume(mhi_cntrl);
1004  	read_unlock_bh(&mhi_cntrl->pm_lock);
1005  
1006  	ret = wait_event_timeout(mhi_cntrl->state_event,
1007  				 mhi_cntrl->pm_state == MHI_PM_M0 ||
1008  				 MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
1009  				 msecs_to_jiffies(mhi_cntrl->timeout_ms));
1010  
1011  	if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
1012  		read_lock_bh(&mhi_cntrl->pm_lock);
1013  		mhi_cntrl->wake_put(mhi_cntrl, false);
1014  		read_unlock_bh(&mhi_cntrl->pm_lock);
1015  		return -EIO;
1016  	}
1017  
1018  	return 0;
1019  }
1020  
1021  /* Assert device wake db */
mhi_assert_dev_wake(struct mhi_controller * mhi_cntrl,bool force)1022  static void mhi_assert_dev_wake(struct mhi_controller *mhi_cntrl, bool force)
1023  {
1024  	unsigned long flags;
1025  
1026  	/*
1027  	 * If force flag is set, then increment the wake count value and
1028  	 * ring wake db
1029  	 */
1030  	if (unlikely(force)) {
1031  		spin_lock_irqsave(&mhi_cntrl->wlock, flags);
1032  		atomic_inc(&mhi_cntrl->dev_wake);
1033  		if (MHI_WAKE_DB_FORCE_SET_VALID(mhi_cntrl->pm_state) &&
1034  		    !mhi_cntrl->wake_set) {
1035  			mhi_write_db(mhi_cntrl, mhi_cntrl->wake_db, 1);
1036  			mhi_cntrl->wake_set = true;
1037  		}
1038  		spin_unlock_irqrestore(&mhi_cntrl->wlock, flags);
1039  	} else {
1040  		/*
1041  		 * If resources are already requested, then just increment
1042  		 * the wake count value and return
1043  		 */
1044  		if (likely(atomic_add_unless(&mhi_cntrl->dev_wake, 1, 0)))
1045  			return;
1046  
1047  		spin_lock_irqsave(&mhi_cntrl->wlock, flags);
1048  		if ((atomic_inc_return(&mhi_cntrl->dev_wake) == 1) &&
1049  		    MHI_WAKE_DB_SET_VALID(mhi_cntrl->pm_state) &&
1050  		    !mhi_cntrl->wake_set) {
1051  			mhi_write_db(mhi_cntrl, mhi_cntrl->wake_db, 1);
1052  			mhi_cntrl->wake_set = true;
1053  		}
1054  		spin_unlock_irqrestore(&mhi_cntrl->wlock, flags);
1055  	}
1056  }
1057  
1058  /* De-assert device wake db */
mhi_deassert_dev_wake(struct mhi_controller * mhi_cntrl,bool override)1059  static void mhi_deassert_dev_wake(struct mhi_controller *mhi_cntrl,
1060  				  bool override)
1061  {
1062  	unsigned long flags;
1063  
1064  	/*
1065  	 * Only continue if there is a single resource, else just decrement
1066  	 * and return
1067  	 */
1068  	if (likely(atomic_add_unless(&mhi_cntrl->dev_wake, -1, 1)))
1069  		return;
1070  
1071  	spin_lock_irqsave(&mhi_cntrl->wlock, flags);
1072  	if ((atomic_dec_return(&mhi_cntrl->dev_wake) == 0) &&
1073  	    MHI_WAKE_DB_CLEAR_VALID(mhi_cntrl->pm_state) && !override &&
1074  	    mhi_cntrl->wake_set) {
1075  		mhi_write_db(mhi_cntrl, mhi_cntrl->wake_db, 0);
1076  		mhi_cntrl->wake_set = false;
1077  	}
1078  	spin_unlock_irqrestore(&mhi_cntrl->wlock, flags);
1079  }
1080  
mhi_async_power_up(struct mhi_controller * mhi_cntrl)1081  int mhi_async_power_up(struct mhi_controller *mhi_cntrl)
1082  {
1083  	struct mhi_event *mhi_event = mhi_cntrl->mhi_event;
1084  	enum mhi_state state;
1085  	enum mhi_ee_type current_ee;
1086  	enum dev_st_transition next_state;
1087  	struct device *dev = &mhi_cntrl->mhi_dev->dev;
1088  	u32 interval_us = 25000; /* poll register field every 25 milliseconds */
1089  	int ret, i;
1090  
1091  	dev_info(dev, "Requested to power ON\n");
1092  
1093  	/* Supply default wake routines if not provided by controller driver */
1094  	if (!mhi_cntrl->wake_get || !mhi_cntrl->wake_put ||
1095  	    !mhi_cntrl->wake_toggle) {
1096  		mhi_cntrl->wake_get = mhi_assert_dev_wake;
1097  		mhi_cntrl->wake_put = mhi_deassert_dev_wake;
1098  		mhi_cntrl->wake_toggle = (mhi_cntrl->db_access & MHI_PM_M2) ?
1099  			mhi_toggle_dev_wake_nop : mhi_toggle_dev_wake;
1100  	}
1101  
1102  	mutex_lock(&mhi_cntrl->pm_mutex);
1103  	mhi_cntrl->pm_state = MHI_PM_DISABLE;
1104  
1105  	/* Setup BHI INTVEC */
1106  	write_lock_irq(&mhi_cntrl->pm_lock);
1107  	mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0);
1108  	mhi_cntrl->pm_state = MHI_PM_POR;
1109  	mhi_cntrl->ee = MHI_EE_MAX;
1110  	current_ee = mhi_get_exec_env(mhi_cntrl);
1111  	write_unlock_irq(&mhi_cntrl->pm_lock);
1112  
1113  	/* Confirm that the device is in valid exec env */
1114  	if (!MHI_POWER_UP_CAPABLE(current_ee)) {
1115  		dev_err(dev, "%s is not a valid EE for power on\n",
1116  			TO_MHI_EXEC_STR(current_ee));
1117  		ret = -EIO;
1118  		goto error_exit;
1119  	}
1120  
1121  	state = mhi_get_mhi_state(mhi_cntrl);
1122  	dev_dbg(dev, "Attempting power on with EE: %s, state: %s\n",
1123  		TO_MHI_EXEC_STR(current_ee), mhi_state_str(state));
1124  
1125  	if (state == MHI_STATE_SYS_ERR) {
1126  		mhi_set_mhi_state(mhi_cntrl, MHI_STATE_RESET);
1127  		ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL,
1128  				 MHICTRL_RESET_MASK, 0, interval_us);
1129  		if (ret) {
1130  			dev_info(dev, "Failed to reset MHI due to syserr state\n");
1131  			goto error_exit;
1132  		}
1133  
1134  		/*
1135  		 * device cleares INTVEC as part of RESET processing,
1136  		 * re-program it
1137  		 */
1138  		mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0);
1139  	}
1140  
1141  	/* IRQs have been requested during probe, so we just need to enable them. */
1142  	enable_irq(mhi_cntrl->irq[0]);
1143  
1144  	for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
1145  		if (mhi_event->offload_ev)
1146  			continue;
1147  
1148  		enable_irq(mhi_cntrl->irq[mhi_event->irq]);
1149  	}
1150  
1151  	/* Transition to next state */
1152  	next_state = MHI_IN_PBL(current_ee) ?
1153  		DEV_ST_TRANSITION_PBL : DEV_ST_TRANSITION_READY;
1154  
1155  	mhi_queue_state_transition(mhi_cntrl, next_state);
1156  
1157  	mutex_unlock(&mhi_cntrl->pm_mutex);
1158  
1159  	dev_info(dev, "Power on setup success\n");
1160  
1161  	return 0;
1162  
1163  error_exit:
1164  	mhi_cntrl->pm_state = MHI_PM_DISABLE;
1165  	mutex_unlock(&mhi_cntrl->pm_mutex);
1166  
1167  	return ret;
1168  }
1169  EXPORT_SYMBOL_GPL(mhi_async_power_up);
1170  
mhi_power_down(struct mhi_controller * mhi_cntrl,bool graceful)1171  void mhi_power_down(struct mhi_controller *mhi_cntrl, bool graceful)
1172  {
1173  	enum mhi_pm_state cur_state, transition_state;
1174  	struct device *dev = &mhi_cntrl->mhi_dev->dev;
1175  
1176  	mutex_lock(&mhi_cntrl->pm_mutex);
1177  	write_lock_irq(&mhi_cntrl->pm_lock);
1178  	cur_state = mhi_cntrl->pm_state;
1179  	if (cur_state == MHI_PM_DISABLE) {
1180  		write_unlock_irq(&mhi_cntrl->pm_lock);
1181  		mutex_unlock(&mhi_cntrl->pm_mutex);
1182  		return; /* Already powered down */
1183  	}
1184  
1185  	/* If it's not a graceful shutdown, force MHI to linkdown state */
1186  	transition_state = (graceful) ? MHI_PM_SHUTDOWN_PROCESS :
1187  			   MHI_PM_LD_ERR_FATAL_DETECT;
1188  
1189  	cur_state = mhi_tryset_pm_state(mhi_cntrl, transition_state);
1190  	if (cur_state != transition_state) {
1191  		dev_err(dev, "Failed to move to state: %s from: %s\n",
1192  			to_mhi_pm_state_str(transition_state),
1193  			to_mhi_pm_state_str(mhi_cntrl->pm_state));
1194  		/* Force link down or error fatal detected state */
1195  		mhi_cntrl->pm_state = MHI_PM_LD_ERR_FATAL_DETECT;
1196  	}
1197  
1198  	/* mark device inactive to avoid any further host processing */
1199  	mhi_cntrl->ee = MHI_EE_DISABLE_TRANSITION;
1200  	mhi_cntrl->dev_state = MHI_STATE_RESET;
1201  
1202  	wake_up_all(&mhi_cntrl->state_event);
1203  
1204  	write_unlock_irq(&mhi_cntrl->pm_lock);
1205  	mutex_unlock(&mhi_cntrl->pm_mutex);
1206  
1207  	mhi_queue_state_transition(mhi_cntrl, DEV_ST_TRANSITION_DISABLE);
1208  
1209  	/* Wait for shutdown to complete */
1210  	flush_work(&mhi_cntrl->st_worker);
1211  
1212  	disable_irq(mhi_cntrl->irq[0]);
1213  }
1214  EXPORT_SYMBOL_GPL(mhi_power_down);
1215  
mhi_sync_power_up(struct mhi_controller * mhi_cntrl)1216  int mhi_sync_power_up(struct mhi_controller *mhi_cntrl)
1217  {
1218  	int ret = mhi_async_power_up(mhi_cntrl);
1219  
1220  	if (ret)
1221  		return ret;
1222  
1223  	wait_event_timeout(mhi_cntrl->state_event,
1224  			   MHI_IN_MISSION_MODE(mhi_cntrl->ee) ||
1225  			   MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
1226  			   msecs_to_jiffies(mhi_cntrl->timeout_ms));
1227  
1228  	ret = (MHI_IN_MISSION_MODE(mhi_cntrl->ee)) ? 0 : -ETIMEDOUT;
1229  	if (ret)
1230  		mhi_power_down(mhi_cntrl, false);
1231  
1232  	return ret;
1233  }
1234  EXPORT_SYMBOL(mhi_sync_power_up);
1235  
mhi_force_rddm_mode(struct mhi_controller * mhi_cntrl)1236  int mhi_force_rddm_mode(struct mhi_controller *mhi_cntrl)
1237  {
1238  	struct device *dev = &mhi_cntrl->mhi_dev->dev;
1239  	int ret;
1240  
1241  	/* Check if device is already in RDDM */
1242  	if (mhi_cntrl->ee == MHI_EE_RDDM)
1243  		return 0;
1244  
1245  	dev_dbg(dev, "Triggering SYS_ERR to force RDDM state\n");
1246  	mhi_set_mhi_state(mhi_cntrl, MHI_STATE_SYS_ERR);
1247  
1248  	/* Wait for RDDM event */
1249  	ret = wait_event_timeout(mhi_cntrl->state_event,
1250  				 mhi_cntrl->ee == MHI_EE_RDDM,
1251  				 msecs_to_jiffies(mhi_cntrl->timeout_ms));
1252  	ret = ret ? 0 : -EIO;
1253  
1254  	return ret;
1255  }
1256  EXPORT_SYMBOL_GPL(mhi_force_rddm_mode);
1257  
mhi_device_get(struct mhi_device * mhi_dev)1258  void mhi_device_get(struct mhi_device *mhi_dev)
1259  {
1260  	struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1261  
1262  	mhi_dev->dev_wake++;
1263  	read_lock_bh(&mhi_cntrl->pm_lock);
1264  	if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))
1265  		mhi_trigger_resume(mhi_cntrl);
1266  
1267  	mhi_cntrl->wake_get(mhi_cntrl, true);
1268  	read_unlock_bh(&mhi_cntrl->pm_lock);
1269  }
1270  EXPORT_SYMBOL_GPL(mhi_device_get);
1271  
mhi_device_get_sync(struct mhi_device * mhi_dev)1272  int mhi_device_get_sync(struct mhi_device *mhi_dev)
1273  {
1274  	struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1275  	int ret;
1276  
1277  	ret = __mhi_device_get_sync(mhi_cntrl);
1278  	if (!ret)
1279  		mhi_dev->dev_wake++;
1280  
1281  	return ret;
1282  }
1283  EXPORT_SYMBOL_GPL(mhi_device_get_sync);
1284  
mhi_device_put(struct mhi_device * mhi_dev)1285  void mhi_device_put(struct mhi_device *mhi_dev)
1286  {
1287  	struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1288  
1289  	mhi_dev->dev_wake--;
1290  	read_lock_bh(&mhi_cntrl->pm_lock);
1291  	if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))
1292  		mhi_trigger_resume(mhi_cntrl);
1293  
1294  	mhi_cntrl->wake_put(mhi_cntrl, false);
1295  	read_unlock_bh(&mhi_cntrl->pm_lock);
1296  }
1297  EXPORT_SYMBOL_GPL(mhi_device_put);
1298