xref: /openbmc/linux/drivers/dma/idxd/registers.h (revision 8dda2eac)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
3 #ifndef _IDXD_REGISTERS_H_
4 #define _IDXD_REGISTERS_H_
5 
6 /* PCI Config */
7 #define PCI_DEVICE_ID_INTEL_DSA_SPR0	0x0b25
8 #define PCI_DEVICE_ID_INTEL_IAX_SPR0	0x0cfe
9 
10 #define IDXD_MMIO_BAR		0
11 #define IDXD_WQ_BAR		2
12 #define IDXD_PORTAL_SIZE	PAGE_SIZE
13 
14 /* MMIO Device BAR0 Registers */
15 #define IDXD_VER_OFFSET			0x00
16 #define IDXD_VER_MAJOR_MASK		0xf0
17 #define IDXD_VER_MINOR_MASK		0x0f
18 #define GET_IDXD_VER_MAJOR(x)		(((x) & IDXD_VER_MAJOR_MASK) >> 4)
19 #define GET_IDXD_VER_MINOR(x)		((x) & IDXD_VER_MINOR_MASK)
20 
21 union gen_cap_reg {
22 	struct {
23 		u64 block_on_fault:1;
24 		u64 overlap_copy:1;
25 		u64 cache_control_mem:1;
26 		u64 cache_control_cache:1;
27 		u64 cmd_cap:1;
28 		u64 rsvd:3;
29 		u64 dest_readback:1;
30 		u64 drain_readback:1;
31 		u64 rsvd2:6;
32 		u64 max_xfer_shift:5;
33 		u64 max_batch_shift:4;
34 		u64 max_ims_mult:6;
35 		u64 config_en:1;
36 		u64 max_descs_per_engine:8;
37 		u64 rsvd3:24;
38 	};
39 	u64 bits;
40 } __packed;
41 #define IDXD_GENCAP_OFFSET		0x10
42 
43 union wq_cap_reg {
44 	struct {
45 		u64 total_wq_size:16;
46 		u64 num_wqs:8;
47 		u64 wqcfg_size:4;
48 		u64 rsvd:20;
49 		u64 shared_mode:1;
50 		u64 dedicated_mode:1;
51 		u64 wq_ats_support:1;
52 		u64 priority:1;
53 		u64 occupancy:1;
54 		u64 occupancy_int:1;
55 		u64 rsvd3:10;
56 	};
57 	u64 bits;
58 } __packed;
59 #define IDXD_WQCAP_OFFSET		0x20
60 #define IDXD_WQCFG_MIN			5
61 
62 union group_cap_reg {
63 	struct {
64 		u64 num_groups:8;
65 		u64 total_tokens:8;
66 		u64 token_en:1;
67 		u64 token_limit:1;
68 		u64 rsvd:46;
69 	};
70 	u64 bits;
71 } __packed;
72 #define IDXD_GRPCAP_OFFSET		0x30
73 
74 union engine_cap_reg {
75 	struct {
76 		u64 num_engines:8;
77 		u64 rsvd:56;
78 	};
79 	u64 bits;
80 } __packed;
81 
82 #define IDXD_ENGCAP_OFFSET		0x38
83 
84 #define IDXD_OPCAP_NOOP			0x0001
85 #define IDXD_OPCAP_BATCH			0x0002
86 #define IDXD_OPCAP_MEMMOVE		0x0008
87 struct opcap {
88 	u64 bits[4];
89 };
90 
91 #define IDXD_OPCAP_OFFSET		0x40
92 
93 #define IDXD_TABLE_OFFSET		0x60
94 union offsets_reg {
95 	struct {
96 		u64 grpcfg:16;
97 		u64 wqcfg:16;
98 		u64 msix_perm:16;
99 		u64 ims:16;
100 		u64 perfmon:16;
101 		u64 rsvd:48;
102 	};
103 	u64 bits[2];
104 } __packed;
105 
106 #define IDXD_TABLE_MULT			0x100
107 
108 #define IDXD_GENCFG_OFFSET		0x80
109 union gencfg_reg {
110 	struct {
111 		u32 token_limit:8;
112 		u32 rsvd:4;
113 		u32 user_int_en:1;
114 		u32 rsvd2:19;
115 	};
116 	u32 bits;
117 } __packed;
118 
119 #define IDXD_GENCTRL_OFFSET		0x88
120 union genctrl_reg {
121 	struct {
122 		u32 softerr_int_en:1;
123 		u32 halt_int_en:1;
124 		u32 rsvd:30;
125 	};
126 	u32 bits;
127 } __packed;
128 
129 #define IDXD_GENSTATS_OFFSET		0x90
130 union gensts_reg {
131 	struct {
132 		u32 state:2;
133 		u32 reset_type:2;
134 		u32 rsvd:28;
135 	};
136 	u32 bits;
137 } __packed;
138 
139 enum idxd_device_status_state {
140 	IDXD_DEVICE_STATE_DISABLED = 0,
141 	IDXD_DEVICE_STATE_ENABLED,
142 	IDXD_DEVICE_STATE_DRAIN,
143 	IDXD_DEVICE_STATE_HALT,
144 };
145 
146 enum idxd_device_reset_type {
147 	IDXD_DEVICE_RESET_SOFTWARE = 0,
148 	IDXD_DEVICE_RESET_FLR,
149 	IDXD_DEVICE_RESET_WARM,
150 	IDXD_DEVICE_RESET_COLD,
151 };
152 
153 #define IDXD_INTCAUSE_OFFSET		0x98
154 #define IDXD_INTC_ERR			0x01
155 #define IDXD_INTC_CMD			0x02
156 #define IDXD_INTC_OCCUPY			0x04
157 #define IDXD_INTC_PERFMON_OVFL		0x08
158 
159 #define IDXD_CMD_OFFSET			0xa0
160 union idxd_command_reg {
161 	struct {
162 		u32 operand:20;
163 		u32 cmd:5;
164 		u32 rsvd:6;
165 		u32 int_req:1;
166 	};
167 	u32 bits;
168 } __packed;
169 
170 enum idxd_cmd {
171 	IDXD_CMD_ENABLE_DEVICE = 1,
172 	IDXD_CMD_DISABLE_DEVICE,
173 	IDXD_CMD_DRAIN_ALL,
174 	IDXD_CMD_ABORT_ALL,
175 	IDXD_CMD_RESET_DEVICE,
176 	IDXD_CMD_ENABLE_WQ,
177 	IDXD_CMD_DISABLE_WQ,
178 	IDXD_CMD_DRAIN_WQ,
179 	IDXD_CMD_ABORT_WQ,
180 	IDXD_CMD_RESET_WQ,
181 	IDXD_CMD_DRAIN_PASID,
182 	IDXD_CMD_ABORT_PASID,
183 	IDXD_CMD_REQUEST_INT_HANDLE,
184 	IDXD_CMD_RELEASE_INT_HANDLE,
185 };
186 
187 #define CMD_INT_HANDLE_IMS		0x10000
188 
189 #define IDXD_CMDSTS_OFFSET		0xa8
190 union cmdsts_reg {
191 	struct {
192 		u8 err;
193 		u16 result;
194 		u8 rsvd:7;
195 		u8 active:1;
196 	};
197 	u32 bits;
198 } __packed;
199 #define IDXD_CMDSTS_ACTIVE		0x80000000
200 #define IDXD_CMDSTS_ERR_MASK		0xff
201 #define IDXD_CMDSTS_RES_SHIFT		8
202 
203 enum idxd_cmdsts_err {
204 	IDXD_CMDSTS_SUCCESS = 0,
205 	IDXD_CMDSTS_INVAL_CMD,
206 	IDXD_CMDSTS_INVAL_WQIDX,
207 	IDXD_CMDSTS_HW_ERR,
208 	/* enable device errors */
209 	IDXD_CMDSTS_ERR_DEV_ENABLED = 0x10,
210 	IDXD_CMDSTS_ERR_CONFIG,
211 	IDXD_CMDSTS_ERR_BUSMASTER_EN,
212 	IDXD_CMDSTS_ERR_PASID_INVAL,
213 	IDXD_CMDSTS_ERR_WQ_SIZE_ERANGE,
214 	IDXD_CMDSTS_ERR_GRP_CONFIG,
215 	IDXD_CMDSTS_ERR_GRP_CONFIG2,
216 	IDXD_CMDSTS_ERR_GRP_CONFIG3,
217 	IDXD_CMDSTS_ERR_GRP_CONFIG4,
218 	/* enable wq errors */
219 	IDXD_CMDSTS_ERR_DEV_NOTEN = 0x20,
220 	IDXD_CMDSTS_ERR_WQ_ENABLED,
221 	IDXD_CMDSTS_ERR_WQ_SIZE,
222 	IDXD_CMDSTS_ERR_WQ_PRIOR,
223 	IDXD_CMDSTS_ERR_WQ_MODE,
224 	IDXD_CMDSTS_ERR_BOF_EN,
225 	IDXD_CMDSTS_ERR_PASID_EN,
226 	IDXD_CMDSTS_ERR_MAX_BATCH_SIZE,
227 	IDXD_CMDSTS_ERR_MAX_XFER_SIZE,
228 	/* disable device errors */
229 	IDXD_CMDSTS_ERR_DIS_DEV_EN = 0x31,
230 	/* disable WQ, drain WQ, abort WQ, reset WQ */
231 	IDXD_CMDSTS_ERR_DEV_NOT_EN,
232 	/* request interrupt handle */
233 	IDXD_CMDSTS_ERR_INVAL_INT_IDX = 0x41,
234 	IDXD_CMDSTS_ERR_NO_HANDLE,
235 };
236 
237 #define IDXD_CMDCAP_OFFSET		0xb0
238 
239 #define IDXD_SWERR_OFFSET		0xc0
240 #define IDXD_SWERR_VALID		0x00000001
241 #define IDXD_SWERR_OVERFLOW		0x00000002
242 #define IDXD_SWERR_ACK			(IDXD_SWERR_VALID | IDXD_SWERR_OVERFLOW)
243 union sw_err_reg {
244 	struct {
245 		u64 valid:1;
246 		u64 overflow:1;
247 		u64 desc_valid:1;
248 		u64 wq_idx_valid:1;
249 		u64 batch:1;
250 		u64 fault_rw:1;
251 		u64 priv:1;
252 		u64 rsvd:1;
253 		u64 error:8;
254 		u64 wq_idx:8;
255 		u64 rsvd2:8;
256 		u64 operation:8;
257 		u64 pasid:20;
258 		u64 rsvd3:4;
259 
260 		u64 batch_idx:16;
261 		u64 rsvd4:16;
262 		u64 invalid_flags:32;
263 
264 		u64 fault_addr;
265 
266 		u64 rsvd5;
267 	};
268 	u64 bits[4];
269 } __packed;
270 
271 union msix_perm {
272 	struct {
273 		u32 rsvd:2;
274 		u32 ignore:1;
275 		u32 pasid_en:1;
276 		u32 rsvd2:8;
277 		u32 pasid:20;
278 	};
279 	u32 bits;
280 } __packed;
281 
282 union group_flags {
283 	struct {
284 		u32 tc_a:3;
285 		u32 tc_b:3;
286 		u32 rsvd:1;
287 		u32 use_token_limit:1;
288 		u32 tokens_reserved:8;
289 		u32 rsvd2:4;
290 		u32 tokens_allowed:8;
291 		u32 rsvd3:4;
292 	};
293 	u32 bits;
294 } __packed;
295 
296 struct grpcfg {
297 	u64 wqs[4];
298 	u64 engines;
299 	union group_flags flags;
300 } __packed;
301 
302 union wqcfg {
303 	struct {
304 		/* bytes 0-3 */
305 		u16 wq_size;
306 		u16 rsvd;
307 
308 		/* bytes 4-7 */
309 		u16 wq_thresh;
310 		u16 rsvd1;
311 
312 		/* bytes 8-11 */
313 		u32 mode:1;	/* shared or dedicated */
314 		u32 bof:1;	/* block on fault */
315 		u32 wq_ats_disable:1;
316 		u32 rsvd2:1;
317 		u32 priority:4;
318 		u32 pasid:20;
319 		u32 pasid_en:1;
320 		u32 priv:1;
321 		u32 rsvd3:2;
322 
323 		/* bytes 12-15 */
324 		u32 max_xfer_shift:5;
325 		u32 max_batch_shift:4;
326 		u32 rsvd4:23;
327 
328 		/* bytes 16-19 */
329 		u16 occupancy_inth;
330 		u16 occupancy_table_sel:1;
331 		u16 rsvd5:15;
332 
333 		/* bytes 20-23 */
334 		u16 occupancy_limit;
335 		u16 occupancy_int_en:1;
336 		u16 rsvd6:15;
337 
338 		/* bytes 24-27 */
339 		u16 occupancy;
340 		u16 occupancy_int:1;
341 		u16 rsvd7:12;
342 		u16 mode_support:1;
343 		u16 wq_state:2;
344 
345 		/* bytes 28-31 */
346 		u32 rsvd8;
347 	};
348 	u32 bits[8];
349 } __packed;
350 
351 #define WQCFG_PASID_IDX                2
352 
353 /*
354  * This macro calculates the offset into the WQCFG register
355  * idxd - struct idxd *
356  * n - wq id
357  * ofs - the index of the 32b dword for the config register
358  *
359  * The WQCFG register block is divided into groups per each wq. The n index
360  * allows us to move to the register group that's for that particular wq.
361  * Each register is 32bits. The ofs gives us the number of register to access.
362  */
363 #define WQCFG_OFFSET(_idxd_dev, n, ofs) \
364 ({\
365 	typeof(_idxd_dev) __idxd_dev = (_idxd_dev);	\
366 	(__idxd_dev)->wqcfg_offset + (n) * (__idxd_dev)->wqcfg_size + sizeof(u32) * (ofs);	\
367 })
368 
369 #define WQCFG_STRIDES(_idxd_dev) ((_idxd_dev)->wqcfg_size / sizeof(u32))
370 
371 #define GRPCFG_SIZE		64
372 #define GRPWQCFG_STRIDES	4
373 
374 /*
375  * This macro calculates the offset into the GRPCFG register
376  * idxd - struct idxd *
377  * n - wq id
378  * ofs - the index of the 32b dword for the config register
379  *
380  * The WQCFG register block is divided into groups per each wq. The n index
381  * allows us to move to the register group that's for that particular wq.
382  * Each register is 32bits. The ofs gives us the number of register to access.
383  */
384 #define GRPWQCFG_OFFSET(idxd_dev, n, ofs) ((idxd_dev)->grpcfg_offset +\
385 					   (n) * GRPCFG_SIZE + sizeof(u64) * (ofs))
386 #define GRPENGCFG_OFFSET(idxd_dev, n) ((idxd_dev)->grpcfg_offset + (n) * GRPCFG_SIZE + 32)
387 #define GRPFLGCFG_OFFSET(idxd_dev, n) ((idxd_dev)->grpcfg_offset + (n) * GRPCFG_SIZE + 40)
388 
389 /* Following is performance monitor registers */
390 #define IDXD_PERFCAP_OFFSET		0x0
391 union idxd_perfcap {
392 	struct {
393 		u64 num_perf_counter:6;
394 		u64 rsvd1:2;
395 		u64 counter_width:8;
396 		u64 num_event_category:4;
397 		u64 global_event_category:16;
398 		u64 filter:8;
399 		u64 rsvd2:8;
400 		u64 cap_per_counter:1;
401 		u64 writeable_counter:1;
402 		u64 counter_freeze:1;
403 		u64 overflow_interrupt:1;
404 		u64 rsvd3:8;
405 	};
406 	u64 bits;
407 } __packed;
408 
409 #define IDXD_EVNTCAP_OFFSET		0x80
410 union idxd_evntcap {
411 	struct {
412 		u64 events:28;
413 		u64 rsvd:36;
414 	};
415 	u64 bits;
416 } __packed;
417 
418 struct idxd_event {
419 	union {
420 		struct {
421 			u32 event_category:4;
422 			u32 events:28;
423 		};
424 		u32 val;
425 	};
426 } __packed;
427 
428 #define IDXD_CNTRCAP_OFFSET		0x800
429 struct idxd_cntrcap {
430 	union {
431 		struct {
432 			u32 counter_width:8;
433 			u32 rsvd:20;
434 			u32 num_events:4;
435 		};
436 		u32 val;
437 	};
438 	struct idxd_event events[];
439 } __packed;
440 
441 #define IDXD_PERFRST_OFFSET		0x10
442 union idxd_perfrst {
443 	struct {
444 		u32 perfrst_config:1;
445 		u32 perfrst_counter:1;
446 		u32 rsvd:30;
447 	};
448 	u32 val;
449 } __packed;
450 
451 #define IDXD_OVFSTATUS_OFFSET		0x30
452 #define IDXD_PERFFRZ_OFFSET		0x20
453 #define IDXD_CNTRCFG_OFFSET		0x100
454 union idxd_cntrcfg {
455 	struct {
456 		u64 enable:1;
457 		u64 interrupt_ovf:1;
458 		u64 global_freeze_ovf:1;
459 		u64 rsvd1:5;
460 		u64 event_category:4;
461 		u64 rsvd2:20;
462 		u64 events:28;
463 		u64 rsvd3:4;
464 	};
465 	u64 val;
466 } __packed;
467 
468 #define IDXD_FLTCFG_OFFSET		0x300
469 
470 #define IDXD_CNTRDATA_OFFSET		0x200
471 union idxd_cntrdata {
472 	struct {
473 		u64 event_count_value;
474 	};
475 	u64 val;
476 } __packed;
477 
478 union event_cfg {
479 	struct {
480 		u64 event_cat:4;
481 		u64 event_enc:28;
482 	};
483 	u64 val;
484 } __packed;
485 
486 union filter_cfg {
487 	struct {
488 		u64 wq:32;
489 		u64 tc:8;
490 		u64 pg_sz:4;
491 		u64 xfer_sz:8;
492 		u64 eng:8;
493 	};
494 	u64 val;
495 } __packed;
496 
497 #endif
498