1 /* SPDX-License-Identifier: GPL-2.0 */ 2 3 /* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. 4 * Copyright (C) 2018-2021 Linaro Ltd. 5 */ 6 #ifndef _GSI_H_ 7 #define _GSI_H_ 8 9 #include <linux/types.h> 10 #include <linux/spinlock.h> 11 #include <linux/mutex.h> 12 #include <linux/completion.h> 13 #include <linux/platform_device.h> 14 #include <linux/netdevice.h> 15 16 #include "ipa_version.h" 17 18 /* Maximum number of channels and event rings supported by the driver */ 19 #define GSI_CHANNEL_COUNT_MAX 23 20 #define GSI_EVT_RING_COUNT_MAX 24 21 22 /* Maximum TLV FIFO size for a channel; 64 here is arbitrary (and high) */ 23 #define GSI_TLV_MAX 64 24 25 struct device; 26 struct scatterlist; 27 struct platform_device; 28 29 struct gsi; 30 struct gsi_trans; 31 struct gsi_channel_data; 32 struct ipa_gsi_endpoint_data; 33 34 /* Execution environment IDs */ 35 enum gsi_ee_id { 36 GSI_EE_AP = 0x0, 37 GSI_EE_MODEM = 0x1, 38 GSI_EE_UC = 0x2, 39 GSI_EE_TZ = 0x3, 40 }; 41 42 struct gsi_ring { 43 void *virt; /* ring array base address */ 44 dma_addr_t addr; /* primarily low 32 bits used */ 45 u32 count; /* number of elements in ring */ 46 47 /* The ring index value indicates the next "open" entry in the ring. 48 * 49 * A channel ring consists of TRE entries filled by the AP and passed 50 * to the hardware for processing. For a channel ring, the ring index 51 * identifies the next unused entry to be filled by the AP. In this 52 * case the initial value is assumed by hardware to be 0. 53 * 54 * An event ring consists of event structures filled by the hardware 55 * and passed to the AP. For event rings, the ring index identifies 56 * the next ring entry that is not known to have been filled by the 57 * hardware. The initial value used is arbitrary (so we use 0). 58 */ 59 u32 index; 60 }; 61 62 /* Transactions use several resources that can be allocated dynamically 63 * but taken from a fixed-size pool. The number of elements required for 64 * the pool is limited by the total number of TREs that can be outstanding. 65 * 66 * If sufficient TREs are available to reserve for a transaction, 67 * allocation from these pools is guaranteed to succeed. Furthermore, 68 * these resources are implicitly freed whenever the TREs in the 69 * transaction they're associated with are released. 70 * 71 * The result of a pool allocation of multiple elements is always 72 * contiguous. 73 */ 74 struct gsi_trans_pool { 75 void *base; /* base address of element pool */ 76 u32 count; /* # elements in the pool */ 77 u32 free; /* next free element in pool (modulo) */ 78 u32 size; /* size (bytes) of an element */ 79 u32 max_alloc; /* max allocation request */ 80 dma_addr_t addr; /* DMA address if DMA pool (or 0) */ 81 }; 82 83 struct gsi_trans_info { 84 atomic_t tre_avail; /* TREs available for allocation */ 85 86 u16 free_id; /* first free trans in array */ 87 u16 allocated_id; /* first allocated transaction */ 88 u16 committed_id; /* first committed transaction */ 89 u16 pending_id; /* first pending transaction */ 90 u16 completed_id; /* first completed transaction */ 91 u16 polled_id; /* first polled transaction */ 92 struct gsi_trans *trans; /* transaction array */ 93 struct gsi_trans **map; /* TRE -> transaction map */ 94 95 struct gsi_trans_pool sg_pool; /* scatterlist pool */ 96 struct gsi_trans_pool cmd_pool; /* command payload DMA pool */ 97 98 spinlock_t spinlock; /* protects updates to the lists */ 99 struct list_head alloc; /* allocated, not committed */ 100 struct list_head committed; /* committed, awaiting doorbell */ 101 struct list_head pending; /* pending, awaiting completion */ 102 struct list_head complete; /* completed, awaiting poll */ 103 struct list_head polled; /* returned by gsi_channel_poll_one() */ 104 }; 105 106 /* Hardware values signifying the state of a channel */ 107 enum gsi_channel_state { 108 GSI_CHANNEL_STATE_NOT_ALLOCATED = 0x0, 109 GSI_CHANNEL_STATE_ALLOCATED = 0x1, 110 GSI_CHANNEL_STATE_STARTED = 0x2, 111 GSI_CHANNEL_STATE_STOPPED = 0x3, 112 GSI_CHANNEL_STATE_STOP_IN_PROC = 0x4, 113 GSI_CHANNEL_STATE_FLOW_CONTROLLED = 0x5, /* IPA v4.2-v4.9 */ 114 GSI_CHANNEL_STATE_ERROR = 0xf, 115 }; 116 117 /* We only care about channels between IPA and AP */ 118 struct gsi_channel { 119 struct gsi *gsi; 120 bool toward_ipa; 121 bool command; /* AP command TX channel or not */ 122 123 u8 trans_tre_max; /* max TREs in a transaction */ 124 u16 tre_count; 125 u16 event_count; 126 127 struct gsi_ring tre_ring; 128 u32 evt_ring_id; 129 130 /* The following counts are used only for TX endpoints */ 131 u64 byte_count; /* total # bytes transferred */ 132 u64 trans_count; /* total # transactions */ 133 u64 queued_byte_count; /* last reported queued byte count */ 134 u64 queued_trans_count; /* ...and queued trans count */ 135 u64 compl_byte_count; /* last reported completed byte count */ 136 u64 compl_trans_count; /* ...and completed trans count */ 137 138 struct gsi_trans_info trans_info; 139 140 struct napi_struct napi; 141 }; 142 143 /* Hardware values signifying the state of an event ring */ 144 enum gsi_evt_ring_state { 145 GSI_EVT_RING_STATE_NOT_ALLOCATED = 0x0, 146 GSI_EVT_RING_STATE_ALLOCATED = 0x1, 147 GSI_EVT_RING_STATE_ERROR = 0xf, 148 }; 149 150 struct gsi_evt_ring { 151 struct gsi_channel *channel; 152 struct gsi_ring ring; 153 }; 154 155 struct gsi { 156 struct device *dev; /* Same as IPA device */ 157 enum ipa_version version; 158 void __iomem *virt_raw; /* I/O mapped address range */ 159 void __iomem *virt; /* Adjusted for most registers */ 160 u32 irq; 161 u32 channel_count; 162 u32 evt_ring_count; 163 u32 event_bitmap; /* allocated event rings */ 164 u32 modem_channel_bitmap; /* modem channels to allocate */ 165 u32 type_enabled_bitmap; /* GSI IRQ types enabled */ 166 u32 ieob_enabled_bitmap; /* IEOB IRQ enabled (event rings) */ 167 int result; /* Negative errno (generic commands) */ 168 struct completion completion; /* Signals GSI command completion */ 169 struct mutex mutex; /* protects commands, programming */ 170 struct gsi_channel channel[GSI_CHANNEL_COUNT_MAX]; 171 struct gsi_evt_ring evt_ring[GSI_EVT_RING_COUNT_MAX]; 172 struct net_device dummy_dev; /* needed for NAPI */ 173 }; 174 175 /** 176 * gsi_setup() - Set up the GSI subsystem 177 * @gsi: Address of GSI structure embedded in an IPA structure 178 * 179 * Return: 0 if successful, or a negative error code 180 * 181 * Performs initialization that must wait until the GSI hardware is 182 * ready (including firmware loaded). 183 */ 184 int gsi_setup(struct gsi *gsi); 185 186 /** 187 * gsi_teardown() - Tear down GSI subsystem 188 * @gsi: GSI address previously passed to a successful gsi_setup() call 189 */ 190 void gsi_teardown(struct gsi *gsi); 191 192 /** 193 * gsi_channel_tre_max() - Channel maximum number of in-flight TREs 194 * @gsi: GSI pointer 195 * @channel_id: Channel whose limit is to be returned 196 * 197 * Return: The maximum number of TREs outstanding on the channel 198 */ 199 u32 gsi_channel_tre_max(struct gsi *gsi, u32 channel_id); 200 201 /** 202 * gsi_channel_start() - Start an allocated GSI channel 203 * @gsi: GSI pointer 204 * @channel_id: Channel to start 205 * 206 * Return: 0 if successful, or a negative error code 207 */ 208 int gsi_channel_start(struct gsi *gsi, u32 channel_id); 209 210 /** 211 * gsi_channel_stop() - Stop a started GSI channel 212 * @gsi: GSI pointer returned by gsi_setup() 213 * @channel_id: Channel to stop 214 * 215 * Return: 0 if successful, or a negative error code 216 */ 217 int gsi_channel_stop(struct gsi *gsi, u32 channel_id); 218 219 /** 220 * gsi_modem_channel_flow_control() - Set channel flow control state (IPA v4.2+) 221 * @gsi: GSI pointer returned by gsi_setup() 222 * @channel_id: Modem TX channel to control 223 * @enable: Whether to enable flow control (i.e., prevent flow) 224 */ 225 void gsi_modem_channel_flow_control(struct gsi *gsi, u32 channel_id, 226 bool enable); 227 228 /** 229 * gsi_channel_reset() - Reset an allocated GSI channel 230 * @gsi: GSI pointer 231 * @channel_id: Channel to be reset 232 * @doorbell: Whether to (possibly) enable the doorbell engine 233 * 234 * Reset a channel and reconfigure it. The @doorbell flag indicates 235 * that the doorbell engine should be enabled if needed. 236 * 237 * GSI hardware relinquishes ownership of all pending receive buffer 238 * transactions and they will complete with their cancelled flag set. 239 */ 240 void gsi_channel_reset(struct gsi *gsi, u32 channel_id, bool doorbell); 241 242 /** 243 * gsi_suspend() - Prepare the GSI subsystem for suspend 244 * @gsi: GSI pointer 245 */ 246 void gsi_suspend(struct gsi *gsi); 247 248 /** 249 * gsi_resume() - Resume the GSI subsystem following suspend 250 * @gsi: GSI pointer 251 */ 252 void gsi_resume(struct gsi *gsi); 253 254 /** 255 * gsi_channel_suspend() - Suspend a GSI channel 256 * @gsi: GSI pointer 257 * @channel_id: Channel to suspend 258 * 259 * For IPA v4.0+, suspend is implemented by stopping the channel. 260 */ 261 int gsi_channel_suspend(struct gsi *gsi, u32 channel_id); 262 263 /** 264 * gsi_channel_resume() - Resume a suspended GSI channel 265 * @gsi: GSI pointer 266 * @channel_id: Channel to resume 267 * 268 * For IPA v4.0+, the stopped channel is started again. 269 */ 270 int gsi_channel_resume(struct gsi *gsi, u32 channel_id); 271 272 /** 273 * gsi_init() - Initialize the GSI subsystem 274 * @gsi: Address of GSI structure embedded in an IPA structure 275 * @pdev: IPA platform device 276 * @version: IPA hardware version (implies GSI version) 277 * @count: Number of entries in the configuration data array 278 * @data: Endpoint and channel configuration data 279 * 280 * Return: 0 if successful, or a negative error code 281 * 282 * Early stage initialization of the GSI subsystem, performing tasks 283 * that can be done before the GSI hardware is ready to use. 284 */ 285 int gsi_init(struct gsi *gsi, struct platform_device *pdev, 286 enum ipa_version version, u32 count, 287 const struct ipa_gsi_endpoint_data *data); 288 289 /** 290 * gsi_exit() - Exit the GSI subsystem 291 * @gsi: GSI address previously passed to a successful gsi_init() call 292 */ 293 void gsi_exit(struct gsi *gsi); 294 295 #endif /* _GSI_H_ */ 296