1 /* SPDX-License-Identifier: GPL-2.0 */ 2 3 /* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. 4 * Copyright (C) 2018-2020 Linaro Ltd. 5 */ 6 #ifndef _GSI_H_ 7 #define _GSI_H_ 8 9 #include <linux/types.h> 10 #include <linux/spinlock.h> 11 #include <linux/mutex.h> 12 #include <linux/completion.h> 13 #include <linux/platform_device.h> 14 #include <linux/netdevice.h> 15 16 /* Maximum number of channels and event rings supported by the driver */ 17 #define GSI_CHANNEL_COUNT_MAX 17 18 #define GSI_EVT_RING_COUNT_MAX 13 19 20 /* Maximum TLV FIFO size for a channel; 64 here is arbitrary (and high) */ 21 #define GSI_TLV_MAX 64 22 23 struct device; 24 struct scatterlist; 25 struct platform_device; 26 27 struct gsi; 28 struct gsi_trans; 29 struct gsi_channel_data; 30 struct ipa_gsi_endpoint_data; 31 32 /* Execution environment IDs */ 33 enum gsi_ee_id { 34 GSI_EE_AP = 0, 35 GSI_EE_MODEM = 1, 36 GSI_EE_UC = 2, 37 GSI_EE_TZ = 3, 38 }; 39 40 struct gsi_ring { 41 void *virt; /* ring array base address */ 42 dma_addr_t addr; /* primarily low 32 bits used */ 43 u32 count; /* number of elements in ring */ 44 45 /* The ring index value indicates the next "open" entry in the ring. 46 * 47 * A channel ring consists of TRE entries filled by the AP and passed 48 * to the hardware for processing. For a channel ring, the ring index 49 * identifies the next unused entry to be filled by the AP. 50 * 51 * An event ring consists of event structures filled by the hardware 52 * and passed to the AP. For event rings, the ring index identifies 53 * the next ring entry that is not known to have been filled by the 54 * hardware. 55 */ 56 u32 index; 57 }; 58 59 /* Transactions use several resources that can be allocated dynamically 60 * but taken from a fixed-size pool. The number of elements required for 61 * the pool is limited by the total number of TREs that can be outstanding. 62 * 63 * If sufficient TREs are available to reserve for a transaction, 64 * allocation from these pools is guaranteed to succeed. Furthermore, 65 * these resources are implicitly freed whenever the TREs in the 66 * transaction they're associated with are released. 67 * 68 * The result of a pool allocation of multiple elements is always 69 * contiguous. 70 */ 71 struct gsi_trans_pool { 72 void *base; /* base address of element pool */ 73 u32 count; /* # elements in the pool */ 74 u32 free; /* next free element in pool (modulo) */ 75 u32 size; /* size (bytes) of an element */ 76 u32 max_alloc; /* max allocation request */ 77 dma_addr_t addr; /* DMA address if DMA pool (or 0) */ 78 }; 79 80 struct gsi_trans_info { 81 atomic_t tre_avail; /* TREs available for allocation */ 82 struct gsi_trans_pool pool; /* transaction pool */ 83 struct gsi_trans_pool sg_pool; /* scatterlist pool */ 84 struct gsi_trans_pool cmd_pool; /* command payload DMA pool */ 85 struct gsi_trans_pool info_pool;/* command information pool */ 86 struct gsi_trans **map; /* TRE -> transaction map */ 87 88 spinlock_t spinlock; /* protects updates to the lists */ 89 struct list_head alloc; /* allocated, not committed */ 90 struct list_head pending; /* committed, awaiting completion */ 91 struct list_head complete; /* completed, awaiting poll */ 92 struct list_head polled; /* returned by gsi_channel_poll_one() */ 93 }; 94 95 /* Hardware values signifying the state of a channel */ 96 enum gsi_channel_state { 97 GSI_CHANNEL_STATE_NOT_ALLOCATED = 0x0, 98 GSI_CHANNEL_STATE_ALLOCATED = 0x1, 99 GSI_CHANNEL_STATE_STARTED = 0x2, 100 GSI_CHANNEL_STATE_STOPPED = 0x3, 101 GSI_CHANNEL_STATE_STOP_IN_PROC = 0x4, 102 GSI_CHANNEL_STATE_ERROR = 0xf, 103 }; 104 105 /* We only care about channels between IPA and AP */ 106 struct gsi_channel { 107 struct gsi *gsi; 108 bool toward_ipa; 109 bool command; /* AP command TX channel or not */ 110 bool use_prefetch; /* use prefetch (else escape buf) */ 111 112 u8 tlv_count; /* # entries in TLV FIFO */ 113 u16 tre_count; 114 u16 event_count; 115 116 struct completion completion; /* signals channel command completion */ 117 118 struct gsi_ring tre_ring; 119 u32 evt_ring_id; 120 121 u64 byte_count; /* total # bytes transferred */ 122 u64 trans_count; /* total # transactions */ 123 /* The following counts are used only for TX endpoints */ 124 u64 queued_byte_count; /* last reported queued byte count */ 125 u64 queued_trans_count; /* ...and queued trans count */ 126 u64 compl_byte_count; /* last reported completed byte count */ 127 u64 compl_trans_count; /* ...and completed trans count */ 128 129 struct gsi_trans_info trans_info; 130 131 struct napi_struct napi; 132 }; 133 134 /* Hardware values signifying the state of an event ring */ 135 enum gsi_evt_ring_state { 136 GSI_EVT_RING_STATE_NOT_ALLOCATED = 0x0, 137 GSI_EVT_RING_STATE_ALLOCATED = 0x1, 138 GSI_EVT_RING_STATE_ERROR = 0xf, 139 }; 140 141 struct gsi_evt_ring { 142 struct gsi_channel *channel; 143 struct completion completion; /* signals event ring state changes */ 144 enum gsi_evt_ring_state state; 145 struct gsi_ring ring; 146 }; 147 148 struct gsi { 149 struct device *dev; /* Same as IPA device */ 150 struct net_device dummy_dev; /* needed for NAPI */ 151 void __iomem *virt; 152 u32 irq; 153 bool irq_wake_enabled; 154 u32 channel_count; 155 u32 evt_ring_count; 156 struct gsi_channel channel[GSI_CHANNEL_COUNT_MAX]; 157 struct gsi_evt_ring evt_ring[GSI_EVT_RING_COUNT_MAX]; 158 u32 event_bitmap; 159 u32 event_enable_bitmap; 160 u32 modem_channel_bitmap; 161 struct completion completion; /* for global EE commands */ 162 struct mutex mutex; /* protects commands, programming */ 163 }; 164 165 /** 166 * gsi_setup() - Set up the GSI subsystem 167 * @gsi: Address of GSI structure embedded in an IPA structure 168 * @legacy: Set up for legacy hardware 169 * 170 * @Return: 0 if successful, or a negative error code 171 * 172 * Performs initialization that must wait until the GSI hardware is 173 * ready (including firmware loaded). 174 */ 175 int gsi_setup(struct gsi *gsi, bool legacy); 176 177 /** 178 * gsi_teardown() - Tear down GSI subsystem 179 * @gsi: GSI address previously passed to a successful gsi_setup() call 180 */ 181 void gsi_teardown(struct gsi *gsi); 182 183 /** 184 * gsi_channel_tre_max() - Channel maximum number of in-flight TREs 185 * @gsi: GSI pointer 186 * @channel_id: Channel whose limit is to be returned 187 * 188 * @Return: The maximum number of TREs oustanding on the channel 189 */ 190 u32 gsi_channel_tre_max(struct gsi *gsi, u32 channel_id); 191 192 /** 193 * gsi_channel_trans_tre_max() - Maximum TREs in a single transaction 194 * @gsi: GSI pointer 195 * @channel_id: Channel whose limit is to be returned 196 * 197 * @Return: The maximum TRE count per transaction on the channel 198 */ 199 u32 gsi_channel_trans_tre_max(struct gsi *gsi, u32 channel_id); 200 201 /** 202 * gsi_channel_start() - Start an allocated GSI channel 203 * @gsi: GSI pointer 204 * @channel_id: Channel to start 205 * 206 * @Return: 0 if successful, or a negative error code 207 */ 208 int gsi_channel_start(struct gsi *gsi, u32 channel_id); 209 210 /** 211 * gsi_channel_stop() - Stop a started GSI channel 212 * @gsi: GSI pointer returned by gsi_setup() 213 * @channel_id: Channel to stop 214 * 215 * @Return: 0 if successful, or a negative error code 216 */ 217 int gsi_channel_stop(struct gsi *gsi, u32 channel_id); 218 219 /** 220 * gsi_channel_reset() - Reset an allocated GSI channel 221 * @gsi: GSI pointer 222 * @channel_id: Channel to be reset 223 * @legacy: Legacy behavior 224 * 225 * Reset a channel and reconfigure it. The @legacy flag indicates 226 * that some steps should be done differently for legacy hardware. 227 * 228 * GSI hardware relinquishes ownership of all pending receive buffer 229 * transactions and they will complete with their cancelled flag set. 230 */ 231 void gsi_channel_reset(struct gsi *gsi, u32 channel_id, bool legacy); 232 233 int gsi_channel_suspend(struct gsi *gsi, u32 channel_id, bool stop); 234 int gsi_channel_resume(struct gsi *gsi, u32 channel_id, bool start); 235 236 /** 237 * gsi_init() - Initialize the GSI subsystem 238 * @gsi: Address of GSI structure embedded in an IPA structure 239 * @pdev: IPA platform device 240 * 241 * @Return: 0 if successful, or a negative error code 242 * 243 * Early stage initialization of the GSI subsystem, performing tasks 244 * that can be done before the GSI hardware is ready to use. 245 */ 246 int gsi_init(struct gsi *gsi, struct platform_device *pdev, bool prefetch, 247 u32 count, const struct ipa_gsi_endpoint_data *data, 248 bool modem_alloc); 249 250 /** 251 * gsi_exit() - Exit the GSI subsystem 252 * @gsi: GSI address previously passed to a successful gsi_init() call 253 */ 254 void gsi_exit(struct gsi *gsi); 255 256 #endif /* _GSI_H_ */ 257