main.c (1ddc7618294084fff8d673217a9479550990ee84) main.c (2a81ada32f0e584fc0c943e0d3a8c9f4fae411d6)
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * MHI Endpoint bus stack
4 *
5 * Copyright (C) 2022 Linaro Ltd.
6 * Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
7 */
8

--- 109 unchanged lines hidden (view full) ---

118 struct device *dev = &mhi_cntrl->mhi_dev->dev;
119 struct mhi_result result = {};
120 struct mhi_ep_chan *mhi_chan;
121 struct mhi_ep_ring *ch_ring;
122 u32 tmp, ch_id;
123 int ret;
124
125 ch_id = MHI_TRE_GET_CMD_CHID(el);
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * MHI Endpoint bus stack
4 *
5 * Copyright (C) 2022 Linaro Ltd.
6 * Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
7 */
8

--- 109 unchanged lines hidden (view full) ---

118 struct device *dev = &mhi_cntrl->mhi_dev->dev;
119 struct mhi_result result = {};
120 struct mhi_ep_chan *mhi_chan;
121 struct mhi_ep_ring *ch_ring;
122 u32 tmp, ch_id;
123 int ret;
124
125 ch_id = MHI_TRE_GET_CMD_CHID(el);
126
127 /* Check if the channel is supported by the controller */
128 if ((ch_id > mhi_cntrl->max_chan) || !mhi_cntrl->mhi_chan[ch_id].name) {
129 dev_err(dev, "Channel (%u) not supported!\n", ch_id);
130 return -ENODEV;
131 }
132
133 mhi_chan = &mhi_cntrl->mhi_chan[ch_id];
134 ch_ring = &mhi_cntrl->mhi_chan[ch_id].ring;
135
136 switch (MHI_TRE_GET_CMD_TYPE(el)) {
137 case MHI_PKT_TYPE_START_CHAN_CMD:
138 dev_dbg(dev, "Received START command for channel (%u)\n", ch_id);
139
140 mutex_lock(&mhi_chan->lock);

--- 57 unchanged lines hidden (view full) ---

198 return -ENODEV;
199 }
200
201 mutex_lock(&mhi_chan->lock);
202 /* Disable DB for the channel */
203 mhi_ep_mmio_disable_chdb(mhi_cntrl, ch_id);
204
205 /* Send channel disconnect status to client drivers */
126 mhi_chan = &mhi_cntrl->mhi_chan[ch_id];
127 ch_ring = &mhi_cntrl->mhi_chan[ch_id].ring;
128
129 switch (MHI_TRE_GET_CMD_TYPE(el)) {
130 case MHI_PKT_TYPE_START_CHAN_CMD:
131 dev_dbg(dev, "Received START command for channel (%u)\n", ch_id);
132
133 mutex_lock(&mhi_chan->lock);

--- 57 unchanged lines hidden (view full) ---

191 return -ENODEV;
192 }
193
194 mutex_lock(&mhi_chan->lock);
195 /* Disable DB for the channel */
196 mhi_ep_mmio_disable_chdb(mhi_cntrl, ch_id);
197
198 /* Send channel disconnect status to client drivers */
206 if (mhi_chan->xfer_cb) {
207 result.transaction_status = -ENOTCONN;
208 result.bytes_xferd = 0;
209 mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
210 }
199 result.transaction_status = -ENOTCONN;
200 result.bytes_xferd = 0;
201 mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
211
212 /* Set channel state to STOP */
213 mhi_chan->state = MHI_CH_STATE_STOP;
214 tmp = le32_to_cpu(mhi_cntrl->ch_ctx_cache[ch_id].chcfg);
215 tmp &= ~CHAN_CTX_CHSTATE_MASK;
216 tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_STOP);
217 mhi_cntrl->ch_ctx_cache[ch_id].chcfg = cpu_to_le32(tmp);
218
219 ret = mhi_ep_send_cmd_comp_event(mhi_cntrl, MHI_EV_CC_SUCCESS);
220 if (ret) {
221 dev_err(dev, "Error sending command completion event (%u)\n",
222 MHI_EV_CC_SUCCESS);
223 goto err_unlock;
224 }
225
226 mutex_unlock(&mhi_chan->lock);
227 break;
228 case MHI_PKT_TYPE_RESET_CHAN_CMD:
202
203 /* Set channel state to STOP */
204 mhi_chan->state = MHI_CH_STATE_STOP;
205 tmp = le32_to_cpu(mhi_cntrl->ch_ctx_cache[ch_id].chcfg);
206 tmp &= ~CHAN_CTX_CHSTATE_MASK;
207 tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_STOP);
208 mhi_cntrl->ch_ctx_cache[ch_id].chcfg = cpu_to_le32(tmp);
209
210 ret = mhi_ep_send_cmd_comp_event(mhi_cntrl, MHI_EV_CC_SUCCESS);
211 if (ret) {
212 dev_err(dev, "Error sending command completion event (%u)\n",
213 MHI_EV_CC_SUCCESS);
214 goto err_unlock;
215 }
216
217 mutex_unlock(&mhi_chan->lock);
218 break;
219 case MHI_PKT_TYPE_RESET_CHAN_CMD:
229 dev_dbg(dev, "Received RESET command for channel (%u)\n", ch_id);
220 dev_dbg(dev, "Received STOP command for channel (%u)\n", ch_id);
230 if (!ch_ring->started) {
231 dev_err(dev, "Channel (%u) not opened\n", ch_id);
232 return -ENODEV;
233 }
234
235 mutex_lock(&mhi_chan->lock);
236 /* Stop and reset the transfer ring */
237 mhi_ep_ring_reset(mhi_cntrl, ch_ring);
238
239 /* Send channel disconnect status to client driver */
221 if (!ch_ring->started) {
222 dev_err(dev, "Channel (%u) not opened\n", ch_id);
223 return -ENODEV;
224 }
225
226 mutex_lock(&mhi_chan->lock);
227 /* Stop and reset the transfer ring */
228 mhi_ep_ring_reset(mhi_cntrl, ch_ring);
229
230 /* Send channel disconnect status to client driver */
240 if (mhi_chan->xfer_cb) {
241 result.transaction_status = -ENOTCONN;
242 result.bytes_xferd = 0;
243 mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
244 }
231 result.transaction_status = -ENOTCONN;
232 result.bytes_xferd = 0;
233 mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
245
246 /* Set channel state to DISABLED */
247 mhi_chan->state = MHI_CH_STATE_DISABLED;
248 tmp = le32_to_cpu(mhi_cntrl->ch_ctx_cache[ch_id].chcfg);
249 tmp &= ~CHAN_CTX_CHSTATE_MASK;
250 tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_DISABLED);
251 mhi_cntrl->ch_ctx_cache[ch_id].chcfg = cpu_to_le32(tmp);
252

--- 472 unchanged lines hidden (view full) ---

725 list_splice_tail_init(&mhi_cntrl->ch_db_list, &head);
726 spin_unlock_irqrestore(&mhi_cntrl->list_lock, flags);
727
728 /* Process each queued channel ring. In case of an error, just process next element. */
729 list_for_each_entry_safe(itr, tmp, &head, node) {
730 list_del(&itr->node);
731 ring = itr->ring;
732
234
235 /* Set channel state to DISABLED */
236 mhi_chan->state = MHI_CH_STATE_DISABLED;
237 tmp = le32_to_cpu(mhi_cntrl->ch_ctx_cache[ch_id].chcfg);
238 tmp &= ~CHAN_CTX_CHSTATE_MASK;
239 tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_DISABLED);
240 mhi_cntrl->ch_ctx_cache[ch_id].chcfg = cpu_to_le32(tmp);
241

--- 472 unchanged lines hidden (view full) ---

714 list_splice_tail_init(&mhi_cntrl->ch_db_list, &head);
715 spin_unlock_irqrestore(&mhi_cntrl->list_lock, flags);
716
717 /* Process each queued channel ring. In case of an error, just process next element. */
718 list_for_each_entry_safe(itr, tmp, &head, node) {
719 list_del(&itr->node);
720 ring = itr->ring;
721
733 chan = &mhi_cntrl->mhi_chan[ring->ch_id];
734 mutex_lock(&chan->lock);
735
736 /*
737 * The ring could've stopped while we waited to grab the (chan->lock), so do
738 * a sanity check before going further.
739 */
740 if (!ring->started) {
741 mutex_unlock(&chan->lock);
742 kfree(itr);
743 continue;
744 }
745
746 /* Update the write offset for the ring */
747 ret = mhi_ep_update_wr_offset(ring);
748 if (ret) {
749 dev_err(dev, "Error updating write offset for ring\n");
722 /* Update the write offset for the ring */
723 ret = mhi_ep_update_wr_offset(ring);
724 if (ret) {
725 dev_err(dev, "Error updating write offset for ring\n");
750 mutex_unlock(&chan->lock);
751 kfree(itr);
752 continue;
753 }
754
755 /* Sanity check to make sure there are elements in the ring */
756 if (ring->rd_offset == ring->wr_offset) {
726 kfree(itr);
727 continue;
728 }
729
730 /* Sanity check to make sure there are elements in the ring */
731 if (ring->rd_offset == ring->wr_offset) {
757 mutex_unlock(&chan->lock);
758 kfree(itr);
759 continue;
760 }
761
762 el = &ring->ring_cache[ring->rd_offset];
732 kfree(itr);
733 continue;
734 }
735
736 el = &ring->ring_cache[ring->rd_offset];
737 chan = &mhi_cntrl->mhi_chan[ring->ch_id];
763
738
739 mutex_lock(&chan->lock);
764 dev_dbg(dev, "Processing the ring for channel (%u)\n", ring->ch_id);
765 ret = mhi_ep_process_ch_ring(ring, el);
766 if (ret) {
767 dev_err(dev, "Error processing ring for channel (%u): %d\n",
768 ring->ch_id, ret);
769 mutex_unlock(&chan->lock);
770 kfree(itr);
771 continue;

--- 220 unchanged lines hidden (view full) ---

992 mhi_ep_mmio_mask_interrupts(mhi_cntrl);
993
994 mhi_cntrl->enabled = false;
995}
996
997static void mhi_ep_reset_worker(struct work_struct *work)
998{
999 struct mhi_ep_cntrl *mhi_cntrl = container_of(work, struct mhi_ep_cntrl, reset_work);
740 dev_dbg(dev, "Processing the ring for channel (%u)\n", ring->ch_id);
741 ret = mhi_ep_process_ch_ring(ring, el);
742 if (ret) {
743 dev_err(dev, "Error processing ring for channel (%u): %d\n",
744 ring->ch_id, ret);
745 mutex_unlock(&chan->lock);
746 kfree(itr);
747 continue;

--- 220 unchanged lines hidden (view full) ---

968 mhi_ep_mmio_mask_interrupts(mhi_cntrl);
969
970 mhi_cntrl->enabled = false;
971}
972
973static void mhi_ep_reset_worker(struct work_struct *work)
974{
975 struct mhi_ep_cntrl *mhi_cntrl = container_of(work, struct mhi_ep_cntrl, reset_work);
976 struct device *dev = &mhi_cntrl->mhi_dev->dev;
1000 enum mhi_state cur_state;
977 enum mhi_state cur_state;
978 int ret;
1001
979
1002 mhi_ep_power_down(mhi_cntrl);
980 mhi_ep_abort_transfer(mhi_cntrl);
1003
981
1004 mutex_lock(&mhi_cntrl->state_lock);
1005
982 spin_lock_bh(&mhi_cntrl->state_lock);
1006 /* Reset MMIO to signal host that the MHI_RESET is completed in endpoint */
1007 mhi_ep_mmio_reset(mhi_cntrl);
1008 cur_state = mhi_cntrl->mhi_state;
983 /* Reset MMIO to signal host that the MHI_RESET is completed in endpoint */
984 mhi_ep_mmio_reset(mhi_cntrl);
985 cur_state = mhi_cntrl->mhi_state;
986 spin_unlock_bh(&mhi_cntrl->state_lock);
1009
1010 /*
1011 * Only proceed further if the reset is due to SYS_ERR. The host will
1012 * issue reset during shutdown also and we don't need to do re-init in
1013 * that case.
1014 */
987
988 /*
989 * Only proceed further if the reset is due to SYS_ERR. The host will
990 * issue reset during shutdown also and we don't need to do re-init in
991 * that case.
992 */
1015 if (cur_state == MHI_STATE_SYS_ERR)
1016 mhi_ep_power_up(mhi_cntrl);
993 if (cur_state == MHI_STATE_SYS_ERR) {
994 mhi_ep_mmio_init(mhi_cntrl);
1017
995
1018 mutex_unlock(&mhi_cntrl->state_lock);
996 /* Set AMSS EE before signaling ready state */
997 mhi_ep_mmio_set_env(mhi_cntrl, MHI_EE_AMSS);
998
999 /* All set, notify the host that we are ready */
1000 ret = mhi_ep_set_ready_state(mhi_cntrl);
1001 if (ret)
1002 return;
1003
1004 dev_dbg(dev, "READY state notification sent to the host\n");
1005
1006 ret = mhi_ep_enable(mhi_cntrl);
1007 if (ret) {
1008 dev_err(dev, "Failed to enable MHI endpoint: %d\n", ret);
1009 return;
1010 }
1011
1012 enable_irq(mhi_cntrl->irq);
1013 }
1019}
1020
1021/*
1022 * We don't need to do anything special other than setting the MHI SYS_ERR
1023 * state. The host will reset all contexts and issue MHI RESET so that we
1024 * could also recover from error state.
1025 */
1026void mhi_ep_handle_syserr(struct mhi_ep_cntrl *mhi_cntrl)

--- 62 unchanged lines hidden (view full) ---

1089 kfree(mhi_cntrl->mhi_event);
1090
1091 return ret;
1092}
1093EXPORT_SYMBOL_GPL(mhi_ep_power_up);
1094
1095void mhi_ep_power_down(struct mhi_ep_cntrl *mhi_cntrl)
1096{
1014}
1015
1016/*
1017 * We don't need to do anything special other than setting the MHI SYS_ERR
1018 * state. The host will reset all contexts and issue MHI RESET so that we
1019 * could also recover from error state.
1020 */
1021void mhi_ep_handle_syserr(struct mhi_ep_cntrl *mhi_cntrl)

--- 62 unchanged lines hidden (view full) ---

1084 kfree(mhi_cntrl->mhi_event);
1085
1086 return ret;
1087}
1088EXPORT_SYMBOL_GPL(mhi_ep_power_up);
1089
1090void mhi_ep_power_down(struct mhi_ep_cntrl *mhi_cntrl)
1091{
1097 if (mhi_cntrl->enabled) {
1092 if (mhi_cntrl->enabled)
1098 mhi_ep_abort_transfer(mhi_cntrl);
1093 mhi_ep_abort_transfer(mhi_cntrl);
1099 kfree(mhi_cntrl->mhi_event);
1100 disable_irq(mhi_cntrl->irq);
1101 }
1094
1095 kfree(mhi_cntrl->mhi_event);
1096 disable_irq(mhi_cntrl->irq);
1102}
1103EXPORT_SYMBOL_GPL(mhi_ep_power_down);
1104
1105void mhi_ep_suspend_channels(struct mhi_ep_cntrl *mhi_cntrl)
1106{
1107 struct mhi_ep_chan *mhi_chan;
1108 u32 tmp;
1109 int i;

--- 9 unchanged lines hidden (view full) ---

1119 tmp = le32_to_cpu(mhi_cntrl->ch_ctx_cache[i].chcfg);
1120 if (FIELD_GET(CHAN_CTX_CHSTATE_MASK, tmp) != MHI_CH_STATE_RUNNING) {
1121 mutex_unlock(&mhi_chan->lock);
1122 continue;
1123 }
1124
1125 dev_dbg(&mhi_chan->mhi_dev->dev, "Suspending channel\n");
1126 /* Set channel state to SUSPENDED */
1097}
1098EXPORT_SYMBOL_GPL(mhi_ep_power_down);
1099
1100void mhi_ep_suspend_channels(struct mhi_ep_cntrl *mhi_cntrl)
1101{
1102 struct mhi_ep_chan *mhi_chan;
1103 u32 tmp;
1104 int i;

--- 9 unchanged lines hidden (view full) ---

1114 tmp = le32_to_cpu(mhi_cntrl->ch_ctx_cache[i].chcfg);
1115 if (FIELD_GET(CHAN_CTX_CHSTATE_MASK, tmp) != MHI_CH_STATE_RUNNING) {
1116 mutex_unlock(&mhi_chan->lock);
1117 continue;
1118 }
1119
1120 dev_dbg(&mhi_chan->mhi_dev->dev, "Suspending channel\n");
1121 /* Set channel state to SUSPENDED */
1127 mhi_chan->state = MHI_CH_STATE_SUSPENDED;
1128 tmp &= ~CHAN_CTX_CHSTATE_MASK;
1129 tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_SUSPENDED);
1130 mhi_cntrl->ch_ctx_cache[i].chcfg = cpu_to_le32(tmp);
1131 mutex_unlock(&mhi_chan->lock);
1132 }
1133}
1134
1135void mhi_ep_resume_channels(struct mhi_ep_cntrl *mhi_cntrl)

--- 13 unchanged lines hidden (view full) ---

1149 tmp = le32_to_cpu(mhi_cntrl->ch_ctx_cache[i].chcfg);
1150 if (FIELD_GET(CHAN_CTX_CHSTATE_MASK, tmp) != MHI_CH_STATE_SUSPENDED) {
1151 mutex_unlock(&mhi_chan->lock);
1152 continue;
1153 }
1154
1155 dev_dbg(&mhi_chan->mhi_dev->dev, "Resuming channel\n");
1156 /* Set channel state to RUNNING */
1122 tmp &= ~CHAN_CTX_CHSTATE_MASK;
1123 tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_SUSPENDED);
1124 mhi_cntrl->ch_ctx_cache[i].chcfg = cpu_to_le32(tmp);
1125 mutex_unlock(&mhi_chan->lock);
1126 }
1127}
1128
1129void mhi_ep_resume_channels(struct mhi_ep_cntrl *mhi_cntrl)

--- 13 unchanged lines hidden (view full) ---

1143 tmp = le32_to_cpu(mhi_cntrl->ch_ctx_cache[i].chcfg);
1144 if (FIELD_GET(CHAN_CTX_CHSTATE_MASK, tmp) != MHI_CH_STATE_SUSPENDED) {
1145 mutex_unlock(&mhi_chan->lock);
1146 continue;
1147 }
1148
1149 dev_dbg(&mhi_chan->mhi_dev->dev, "Resuming channel\n");
1150 /* Set channel state to RUNNING */
1157 mhi_chan->state = MHI_CH_STATE_RUNNING;
1158 tmp &= ~CHAN_CTX_CHSTATE_MASK;
1159 tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_RUNNING);
1160 mhi_cntrl->ch_ctx_cache[i].chcfg = cpu_to_le32(tmp);
1161 mutex_unlock(&mhi_chan->lock);
1162 }
1163}
1164
1165static void mhi_ep_release_device(struct device *dev)

--- 217 unchanged lines hidden (view full) ---

1383 mhi_cntrl->wq = alloc_workqueue("mhi_ep_wq", 0, 0);
1384 if (!mhi_cntrl->wq) {
1385 ret = -ENOMEM;
1386 goto err_free_cmd;
1387 }
1388
1389 INIT_LIST_HEAD(&mhi_cntrl->st_transition_list);
1390 INIT_LIST_HEAD(&mhi_cntrl->ch_db_list);
1151 tmp &= ~CHAN_CTX_CHSTATE_MASK;
1152 tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_RUNNING);
1153 mhi_cntrl->ch_ctx_cache[i].chcfg = cpu_to_le32(tmp);
1154 mutex_unlock(&mhi_chan->lock);
1155 }
1156}
1157
1158static void mhi_ep_release_device(struct device *dev)

--- 217 unchanged lines hidden (view full) ---

1376 mhi_cntrl->wq = alloc_workqueue("mhi_ep_wq", 0, 0);
1377 if (!mhi_cntrl->wq) {
1378 ret = -ENOMEM;
1379 goto err_free_cmd;
1380 }
1381
1382 INIT_LIST_HEAD(&mhi_cntrl->st_transition_list);
1383 INIT_LIST_HEAD(&mhi_cntrl->ch_db_list);
1384 spin_lock_init(&mhi_cntrl->state_lock);
1391 spin_lock_init(&mhi_cntrl->list_lock);
1385 spin_lock_init(&mhi_cntrl->list_lock);
1392 mutex_init(&mhi_cntrl->state_lock);
1393 mutex_init(&mhi_cntrl->event_lock);
1394
1395 /* Set MHI version and AMSS EE before enumeration */
1396 mhi_ep_mmio_write(mhi_cntrl, EP_MHIVER, config->mhi_version);
1397 mhi_ep_mmio_set_env(mhi_cntrl, MHI_EE_AMSS);
1398
1399 /* Set controller index */
1400 ret = ida_alloc(&mhi_ep_cntrl_ida, GFP_KERNEL);

--- 144 unchanged lines hidden (view full) ---

1545EXPORT_SYMBOL_GPL(__mhi_ep_driver_register);
1546
1547void mhi_ep_driver_unregister(struct mhi_ep_driver *mhi_drv)
1548{
1549 driver_unregister(&mhi_drv->driver);
1550}
1551EXPORT_SYMBOL_GPL(mhi_ep_driver_unregister);
1552
1386 mutex_init(&mhi_cntrl->event_lock);
1387
1388 /* Set MHI version and AMSS EE before enumeration */
1389 mhi_ep_mmio_write(mhi_cntrl, EP_MHIVER, config->mhi_version);
1390 mhi_ep_mmio_set_env(mhi_cntrl, MHI_EE_AMSS);
1391
1392 /* Set controller index */
1393 ret = ida_alloc(&mhi_ep_cntrl_ida, GFP_KERNEL);

--- 144 unchanged lines hidden (view full) ---

1538EXPORT_SYMBOL_GPL(__mhi_ep_driver_register);
1539
1540void mhi_ep_driver_unregister(struct mhi_ep_driver *mhi_drv)
1541{
1542 driver_unregister(&mhi_drv->driver);
1543}
1544EXPORT_SYMBOL_GPL(mhi_ep_driver_unregister);
1545
1553static int mhi_ep_uevent(struct device *dev, struct kobj_uevent_env *env)
1546static int mhi_ep_uevent(const struct device *dev, struct kobj_uevent_env *env)
1554{
1547{
1555 struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev);
1548 const struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev);
1556
1557 return add_uevent_var(env, "MODALIAS=" MHI_EP_DEVICE_MODALIAS_FMT,
1558 mhi_dev->name);
1559}
1560
1561static int mhi_ep_match(struct device *dev, struct device_driver *drv)
1562{
1563 struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev);

--- 42 unchanged lines hidden ---
1549
1550 return add_uevent_var(env, "MODALIAS=" MHI_EP_DEVICE_MODALIAS_FMT,
1551 mhi_dev->name);
1552}
1553
1554static int mhi_ep_match(struct device *dev, struct device_driver *drv)
1555{
1556 struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev);

--- 42 unchanged lines hidden ---