xref: /openbmc/linux/drivers/infiniband/hw/hfi1/pio.h (revision 63705da3)
1 /* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */
2 /*
3  * Copyright(c) 2015-2017 Intel Corporation.
4  */
5 
6 #ifndef _PIO_H
7 #define _PIO_H
8 /* send context types */
9 #define SC_KERNEL 0
10 #define SC_VL15   1
11 #define SC_ACK    2
12 #define SC_USER   3	/* must be the last one: it may take all left */
13 #define SC_MAX    4	/* count of send context types */
14 
15 /* invalid send context index */
16 #define INVALID_SCI 0xff
17 
18 /* PIO buffer release callback function */
19 typedef void (*pio_release_cb)(void *arg, int code);
20 
21 /* PIO release codes - in bits, as there could more than one that apply */
22 #define PRC_OK		0	/* no known error */
23 #define PRC_STATUS_ERR	0x01	/* credit return due to status error */
24 #define PRC_PBC		0x02	/* credit return due to PBC */
25 #define PRC_THRESHOLD	0x04	/* credit return due to threshold */
26 #define PRC_FILL_ERR	0x08	/* credit return due fill error */
27 #define PRC_FORCE	0x10	/* credit return due credit force */
28 #define PRC_SC_DISABLE	0x20	/* clean-up after a context disable */
29 
30 /* byte helper */
31 union mix {
32 	u64 val64;
33 	u32 val32[2];
34 	u8  val8[8];
35 };
36 
37 /* an allocated PIO buffer */
38 struct pio_buf {
39 	struct send_context *sc;/* back pointer to owning send context */
40 	pio_release_cb cb;	/* called when the buffer is released */
41 	void *arg;		/* argument for cb */
42 	void __iomem *start;	/* buffer start address */
43 	void __iomem *end;	/* context end address */
44 	unsigned long sent_at;	/* buffer is sent when <= free */
45 	union mix carry;	/* pending unwritten bytes */
46 	u16 qw_written;		/* QW written so far */
47 	u8 carry_bytes;	/* number of valid bytes in carry */
48 };
49 
50 /* cache line aligned pio buffer array */
51 union pio_shadow_ring {
52 	struct pio_buf pbuf;
53 } ____cacheline_aligned;
54 
55 /* per-NUMA send context */
56 struct send_context {
57 	/* read-only after init */
58 	struct hfi1_devdata *dd;		/* device */
59 	union pio_shadow_ring *sr;	/* shadow ring */
60 	void __iomem *base_addr;	/* start of PIO memory */
61 	u32 __percpu *buffers_allocated;/* count of buffers allocated */
62 	u32 size;			/* context size, in bytes */
63 
64 	int node;			/* context home node */
65 	u32 sr_size;			/* size of the shadow ring */
66 	u16 flags;			/* flags */
67 	u8  type;			/* context type */
68 	u8  sw_index;			/* software index number */
69 	u8  hw_context;			/* hardware context number */
70 	u8  group;			/* credit return group */
71 
72 	/* allocator fields */
73 	spinlock_t alloc_lock ____cacheline_aligned_in_smp;
74 	u32 sr_head;			/* shadow ring head */
75 	unsigned long fill;		/* official alloc count */
76 	unsigned long alloc_free;	/* copy of free (less cache thrash) */
77 	u32 fill_wrap;			/* tracks fill within ring */
78 	u32 credits;			/* number of blocks in context */
79 	/* adding a new field here would make it part of this cacheline */
80 
81 	/* releaser fields */
82 	spinlock_t release_lock ____cacheline_aligned_in_smp;
83 	u32 sr_tail;			/* shadow ring tail */
84 	unsigned long free;		/* official free count */
85 	volatile __le64 *hw_free;	/* HW free counter */
86 	/* list for PIO waiters */
87 	struct list_head piowait  ____cacheline_aligned_in_smp;
88 	seqlock_t waitlock;
89 
90 	spinlock_t credit_ctrl_lock ____cacheline_aligned_in_smp;
91 	u32 credit_intr_count;		/* count of credit intr users */
92 	u64 credit_ctrl;		/* cache for credit control */
93 	wait_queue_head_t halt_wait;    /* wait until kernel sees interrupt */
94 	struct work_struct halt_work;	/* halted context work queue entry */
95 };
96 
97 /* send context flags */
98 #define SCF_ENABLED 0x01
99 #define SCF_IN_FREE 0x02
100 #define SCF_HALTED  0x04
101 #define SCF_FROZEN  0x08
102 #define SCF_LINK_DOWN 0x10
103 
104 struct send_context_info {
105 	struct send_context *sc;	/* allocated working context */
106 	u16 allocated;			/* has this been allocated? */
107 	u16 type;			/* context type */
108 	u16 base;			/* base in PIO array */
109 	u16 credits;			/* size in PIO array */
110 };
111 
112 /* DMA credit return, index is always (context & 0x7) */
113 struct credit_return {
114 	volatile __le64 cr[8];
115 };
116 
117 /* NUMA indexed credit return array */
118 struct credit_return_base {
119 	struct credit_return *va;
120 	dma_addr_t dma;
121 };
122 
123 /* send context configuration sizes (one per type) */
124 struct sc_config_sizes {
125 	short int size;
126 	short int count;
127 };
128 
129 /*
130  * The diagram below details the relationship of the mapping structures
131  *
132  * Since the mapping now allows for non-uniform send contexts per vl, the
133  * number of send contexts for a vl is either the vl_scontexts[vl] or
134  * a computation based on num_kernel_send_contexts/num_vls:
135  *
136  * For example:
137  * nactual = vl_scontexts ? vl_scontexts[vl] : num_kernel_send_contexts/num_vls
138  *
139  * n = roundup to next highest power of 2 using nactual
140  *
141  * In the case where there are num_kernel_send_contexts/num_vls doesn't divide
142  * evenly, the extras are added from the last vl downward.
143  *
144  * For the case where n > nactual, the send contexts are assigned
145  * in a round robin fashion wrapping back to the first send context
146  * for a particular vl.
147  *
148  *               dd->pio_map
149  *                    |                                   pio_map_elem[0]
150  *                    |                                +--------------------+
151  *                    v                                |       mask         |
152  *               pio_vl_map                            |--------------------|
153  *      +--------------------------+                   | ksc[0] -> sc 1     |
154  *      |    list (RCU)            |                   |--------------------|
155  *      |--------------------------|                 ->| ksc[1] -> sc 2     |
156  *      |    mask                  |              --/  |--------------------|
157  *      |--------------------------|            -/     |        *           |
158  *      |    actual_vls (max 8)    |          -/       |--------------------|
159  *      |--------------------------|       --/         | ksc[n-1] -> sc n   |
160  *      |    vls (max 8)           |     -/            +--------------------+
161  *      |--------------------------|  --/
162  *      |    map[0]                |-/
163  *      |--------------------------|                   +--------------------+
164  *      |    map[1]                |---                |       mask         |
165  *      |--------------------------|   \----           |--------------------|
166  *      |           *              |        \--        | ksc[0] -> sc 1+n   |
167  *      |           *              |           \----   |--------------------|
168  *      |           *              |                \->| ksc[1] -> sc 2+n   |
169  *      |--------------------------|                   |--------------------|
170  *      |   map[vls - 1]           |-                  |         *          |
171  *      +--------------------------+ \-                |--------------------|
172  *                                     \-              | ksc[m-1] -> sc m+n |
173  *                                       \             +--------------------+
174  *                                        \-
175  *                                          \
176  *                                           \-        +----------------------+
177  *                                             \-      |       mask           |
178  *                                               \     |----------------------|
179  *                                                \-   | ksc[0] -> sc 1+m+n   |
180  *                                                  \- |----------------------|
181  *                                                    >| ksc[1] -> sc 2+m+n   |
182  *                                                     |----------------------|
183  *                                                     |         *            |
184  *                                                     |----------------------|
185  *                                                     | ksc[o-1] -> sc o+m+n |
186  *                                                     +----------------------+
187  *
188  */
189 
190 /* Initial number of send contexts per VL */
191 #define INIT_SC_PER_VL 2
192 
193 /*
194  * struct pio_map_elem - mapping for a vl
195  * @mask - selector mask
196  * @ksc - array of kernel send contexts for this vl
197  *
198  * The mask is used to "mod" the selector to
199  * produce index into the trailing array of
200  * kscs
201  */
202 struct pio_map_elem {
203 	u32 mask;
204 	struct send_context *ksc[];
205 };
206 
207 /*
208  * struct pio_vl_map - mapping for a vl
209  * @list - rcu head for free callback
210  * @mask - vl mask to "mod" the vl to produce an index to map array
211  * @actual_vls - number of vls
212  * @vls - numbers of vls rounded to next power of 2
213  * @map - array of pio_map_elem entries
214  *
215  * This is the parent mapping structure. The trailing members of the
216  * struct point to pio_map_elem entries, which in turn point to an
217  * array of kscs for that vl.
218  */
219 struct pio_vl_map {
220 	struct rcu_head list;
221 	u32 mask;
222 	u8 actual_vls;
223 	u8 vls;
224 	struct pio_map_elem *map[];
225 };
226 
227 int pio_map_init(struct hfi1_devdata *dd, u8 port, u8 num_vls,
228 		 u8 *vl_scontexts);
229 void free_pio_map(struct hfi1_devdata *dd);
230 struct send_context *pio_select_send_context_vl(struct hfi1_devdata *dd,
231 						u32 selector, u8 vl);
232 struct send_context *pio_select_send_context_sc(struct hfi1_devdata *dd,
233 						u32 selector, u8 sc5);
234 
235 /* send context functions */
236 int init_credit_return(struct hfi1_devdata *dd);
237 void free_credit_return(struct hfi1_devdata *dd);
238 int init_sc_pools_and_sizes(struct hfi1_devdata *dd);
239 int init_send_contexts(struct hfi1_devdata *dd);
240 int init_pervl_scs(struct hfi1_devdata *dd);
241 struct send_context *sc_alloc(struct hfi1_devdata *dd, int type,
242 			      uint hdrqentsize, int numa);
243 void sc_free(struct send_context *sc);
244 int sc_enable(struct send_context *sc);
245 void sc_disable(struct send_context *sc);
246 int sc_restart(struct send_context *sc);
247 void sc_return_credits(struct send_context *sc);
248 void sc_flush(struct send_context *sc);
249 void sc_drop(struct send_context *sc);
250 void sc_stop(struct send_context *sc, int bit);
251 struct pio_buf *sc_buffer_alloc(struct send_context *sc, u32 dw_len,
252 				pio_release_cb cb, void *arg);
253 void sc_release_update(struct send_context *sc);
254 void sc_group_release_update(struct hfi1_devdata *dd, u32 hw_context);
255 void sc_add_credit_return_intr(struct send_context *sc);
256 void sc_del_credit_return_intr(struct send_context *sc);
257 void sc_set_cr_threshold(struct send_context *sc, u32 new_threshold);
258 u32 sc_percent_to_threshold(struct send_context *sc, u32 percent);
259 u32 sc_mtu_to_threshold(struct send_context *sc, u32 mtu, u32 hdrqentsize);
260 void hfi1_sc_wantpiobuf_intr(struct send_context *sc, u32 needint);
261 void sc_wait(struct hfi1_devdata *dd);
262 void set_pio_integrity(struct send_context *sc);
263 
264 /* support functions */
265 void pio_reset_all(struct hfi1_devdata *dd);
266 void pio_freeze(struct hfi1_devdata *dd);
267 void pio_kernel_unfreeze(struct hfi1_devdata *dd);
268 void pio_kernel_linkup(struct hfi1_devdata *dd);
269 
270 /* global PIO send control operations */
271 #define PSC_GLOBAL_ENABLE 0
272 #define PSC_GLOBAL_DISABLE 1
273 #define PSC_GLOBAL_VLARB_ENABLE 2
274 #define PSC_GLOBAL_VLARB_DISABLE 3
275 #define PSC_CM_RESET 4
276 #define PSC_DATA_VL_ENABLE 5
277 #define PSC_DATA_VL_DISABLE 6
278 
279 void __cm_reset(struct hfi1_devdata *dd, u64 sendctrl);
280 void pio_send_control(struct hfi1_devdata *dd, int op);
281 
282 /* PIO copy routines */
283 void pio_copy(struct hfi1_devdata *dd, struct pio_buf *pbuf, u64 pbc,
284 	      const void *from, size_t count);
285 void seg_pio_copy_start(struct pio_buf *pbuf, u64 pbc,
286 			const void *from, size_t nbytes);
287 void seg_pio_copy_mid(struct pio_buf *pbuf, const void *from, size_t nbytes);
288 void seg_pio_copy_end(struct pio_buf *pbuf);
289 
290 void seqfile_dump_sci(struct seq_file *s, u32 i,
291 		      struct send_context_info *sci);
292 
293 #endif /* _PIO_H */
294