xref: /openbmc/linux/drivers/net/ipa/ipa_endpoint.h (revision 83b975b5)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 
3 /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
4  * Copyright (C) 2019-2022 Linaro Ltd.
5  */
6 #ifndef _IPA_ENDPOINT_H_
7 #define _IPA_ENDPOINT_H_
8 
9 #include <linux/types.h>
10 #include <linux/workqueue.h>
11 #include <linux/if_ether.h>
12 
13 #include "gsi.h"
14 #include "ipa_reg.h"
15 
16 struct net_device;
17 struct sk_buff;
18 
19 struct ipa;
20 struct ipa_gsi_endpoint_data;
21 
22 /* Non-zero granularity of counter used to implement aggregation timeout */
23 #define IPA_AGGR_GRANULARITY		500	/* microseconds */
24 
25 #define IPA_MTU			ETH_DATA_LEN
26 
27 enum ipa_endpoint_name {
28 	IPA_ENDPOINT_AP_COMMAND_TX,
29 	IPA_ENDPOINT_AP_LAN_RX,
30 	IPA_ENDPOINT_AP_MODEM_TX,
31 	IPA_ENDPOINT_AP_MODEM_RX,
32 	IPA_ENDPOINT_MODEM_COMMAND_TX,
33 	IPA_ENDPOINT_MODEM_LAN_TX,
34 	IPA_ENDPOINT_MODEM_LAN_RX,
35 	IPA_ENDPOINT_MODEM_AP_TX,
36 	IPA_ENDPOINT_MODEM_AP_RX,
37 	IPA_ENDPOINT_MODEM_DL_NLO_TX,
38 	IPA_ENDPOINT_COUNT,	/* Number of names (not an index) */
39 };
40 
41 #define IPA_ENDPOINT_MAX		32	/* Max supported by driver */
42 
43 /**
44  * struct ipa_endpoint_tx - Endpoint configuration for TX endpoints
45  * @seq_type:		primary packet processing sequencer type
46  * @seq_rep_type:	sequencer type for replication processing
47  * @status_endpoint:	endpoint to which status elements are sent
48  *
49  * The @status_endpoint is only valid if the endpoint's @status_enable
50  * flag is set.
51  */
52 struct ipa_endpoint_tx {
53 	enum ipa_seq_type seq_type;
54 	enum ipa_seq_rep_type seq_rep_type;
55 	enum ipa_endpoint_name status_endpoint;
56 };
57 
58 /**
59  * struct ipa_endpoint_rx - Endpoint configuration for RX endpoints
60  * @buffer_size:	requested receive buffer size (bytes)
61  * @pad_align:		power-of-2 boundary to which packet payload is aligned
62  * @aggr_time_limit:	time before aggregation closes (microseconds)
63  * @aggr_hard_limit:	whether aggregation closes before or after boundary
64  * @aggr_close_eof:	whether aggregation closes on end-of-frame
65  * @holb_drop:		whether to drop packets to avoid head-of-line blocking
66  *
67  * The actual size of the receive buffer is rounded up if necessary
68  * to be a power-of-2 number of pages.
69  *
70  * With each packet it transfers, the IPA hardware can perform certain
71  * transformations of its packet data.  One of these is adding pad bytes
72  * to the end of the packet data so the result ends on a power-of-2 boundary.
73  *
74  * It is also able to aggregate multiple packets into a single receive buffer.
75  * Aggregation is "open" while a buffer is being filled, and "closes" when
76  * certain criteria are met.
77  *
78  * A time limit can be specified to close aggregation.  Aggregation will be
79  * closed if this period passes after data is first written into a receive
80  * buffer.  If not specified, no time limit is imposed.
81  *
82  * Insufficient space available in the receive buffer can close aggregation.
83  * The aggregation byte limit defines the point (in units of 1024 bytes) in
84  * the buffer where aggregation closes.  With a "soft" aggregation limit,
85  * aggregation closes when a packet written to the buffer *crosses* that
86  * aggregation limit.  With a "hard" aggregation limit, aggregation will
87  * close *before* writing a packet that would cross that boundary.
88  */
89 struct ipa_endpoint_rx {
90 	u32 buffer_size;
91 	u32 pad_align;
92 	u32 aggr_time_limit;
93 	bool aggr_hard_limit;
94 	bool aggr_close_eof;
95 	bool holb_drop;
96 };
97 
98 /**
99  * struct ipa_endpoint_config - IPA endpoint hardware configuration
100  * @resource_group:	resource group to assign endpoint to
101  * @checksum:		whether checksum offload is enabled
102  * @qmap:		whether endpoint uses QMAP protocol
103  * @aggregation:	whether endpoint supports aggregation
104  * @status_enable:	whether endpoint uses status elements
105  * @dma_mode:		whether endpoint operates in DMA mode
106  * @dma_endpoint:	peer endpoint, if operating in DMA mode
107  * @tx:			TX-specific endpoint information (see above)
108  * @rx:			RX-specific endpoint information (see above)
109  */
110 struct ipa_endpoint_config {
111 	u32 resource_group;
112 	bool checksum;
113 	bool qmap;
114 	bool aggregation;
115 	bool status_enable;
116 	bool dma_mode;
117 	enum ipa_endpoint_name dma_endpoint;
118 	union {
119 		struct ipa_endpoint_tx tx;
120 		struct ipa_endpoint_rx rx;
121 	};
122 };
123 
124 /**
125  * enum ipa_replenish_flag:	RX buffer replenish flags
126  *
127  * @IPA_REPLENISH_ENABLED:	Whether receive buffer replenishing is enabled
128  * @IPA_REPLENISH_ACTIVE:	Whether replenishing is underway
129  * @IPA_REPLENISH_COUNT:	Number of defined replenish flags
130  */
131 enum ipa_replenish_flag {
132 	IPA_REPLENISH_ENABLED,
133 	IPA_REPLENISH_ACTIVE,
134 	IPA_REPLENISH_COUNT,	/* Number of flags (must be last) */
135 };
136 
137 /**
138  * struct ipa_endpoint - IPA endpoint information
139  * @ipa:		IPA pointer
140  * @ee_id:		Execution environmnent endpoint is associated with
141  * @channel_id:		GSI channel used by the endpoint
142  * @endpoint_id:	IPA endpoint number
143  * @toward_ipa:		Endpoint direction (true = TX, false = RX)
144  * @config:		Default endpoint configuration
145  * @skb_frag_max:	Maximum allowed number of TX SKB fragments
146  * @evt_ring_id:	GSI event ring used by the endpoint
147  * @netdev:		Network device pointer, if endpoint uses one
148  * @replenish_flags:	Replenishing state flags
149  * @replenish_count:	Total number of replenish transactions committed
150  * @replenish_work:	Work item used for repeated replenish failures
151  */
152 struct ipa_endpoint {
153 	struct ipa *ipa;
154 	enum gsi_ee_id ee_id;
155 	u32 channel_id;
156 	u32 endpoint_id;
157 	bool toward_ipa;
158 	struct ipa_endpoint_config config;
159 
160 	u32 skb_frag_max;	/* Used for netdev TX only */
161 	u32 evt_ring_id;
162 
163 	/* Net device this endpoint is associated with, if any */
164 	struct net_device *netdev;
165 
166 	/* Receive buffer replenishing for RX endpoints */
167 	DECLARE_BITMAP(replenish_flags, IPA_REPLENISH_COUNT);
168 	u64 replenish_count;
169 	struct delayed_work replenish_work;		/* global wq */
170 };
171 
172 void ipa_endpoint_modem_hol_block_clear_all(struct ipa *ipa);
173 
174 void ipa_endpoint_modem_pause_all(struct ipa *ipa, bool enable);
175 
176 int ipa_endpoint_modem_exception_reset_all(struct ipa *ipa);
177 
178 int ipa_endpoint_skb_tx(struct ipa_endpoint *endpoint, struct sk_buff *skb);
179 
180 int ipa_endpoint_enable_one(struct ipa_endpoint *endpoint);
181 void ipa_endpoint_disable_one(struct ipa_endpoint *endpoint);
182 
183 void ipa_endpoint_suspend_one(struct ipa_endpoint *endpoint);
184 void ipa_endpoint_resume_one(struct ipa_endpoint *endpoint);
185 
186 void ipa_endpoint_suspend(struct ipa *ipa);
187 void ipa_endpoint_resume(struct ipa *ipa);
188 
189 void ipa_endpoint_setup(struct ipa *ipa);
190 void ipa_endpoint_teardown(struct ipa *ipa);
191 
192 int ipa_endpoint_config(struct ipa *ipa);
193 void ipa_endpoint_deconfig(struct ipa *ipa);
194 
195 void ipa_endpoint_default_route_set(struct ipa *ipa, u32 endpoint_id);
196 void ipa_endpoint_default_route_clear(struct ipa *ipa);
197 
198 u32 ipa_endpoint_init(struct ipa *ipa, u32 count,
199 		      const struct ipa_gsi_endpoint_data *data);
200 void ipa_endpoint_exit(struct ipa *ipa);
201 
202 void ipa_endpoint_trans_complete(struct ipa_endpoint *ipa,
203 				 struct gsi_trans *trans);
204 void ipa_endpoint_trans_release(struct ipa_endpoint *ipa,
205 				struct gsi_trans *trans);
206 
207 #endif /* _IPA_ENDPOINT_H_ */
208