xref: /openbmc/linux/include/linux/hisi_acc_qm.h (revision ecc23d0a422a3118fcf6e4f0a46e17a6c2047b02)
1  /* SPDX-License-Identifier: GPL-2.0 */
2  /* Copyright (c) 2019 HiSilicon Limited. */
3  #ifndef HISI_ACC_QM_H
4  #define HISI_ACC_QM_H
5  
6  #include <linux/bitfield.h>
7  #include <linux/debugfs.h>
8  #include <linux/iopoll.h>
9  #include <linux/module.h>
10  #include <linux/pci.h>
11  
12  #define QM_QNUM_V1			4096
13  #define QM_QNUM_V2			1024
14  #define QM_MAX_VFS_NUM_V2		63
15  
16  /* qm user domain */
17  #define QM_ARUSER_M_CFG_1		0x100088
18  #define AXUSER_SNOOP_ENABLE		BIT(30)
19  #define AXUSER_CMD_TYPE			GENMASK(14, 12)
20  #define AXUSER_CMD_SMMU_NORMAL		1
21  #define AXUSER_NS			BIT(6)
22  #define AXUSER_NO			BIT(5)
23  #define AXUSER_FP			BIT(4)
24  #define AXUSER_SSV			BIT(0)
25  #define AXUSER_BASE			(AXUSER_SNOOP_ENABLE |		\
26  					FIELD_PREP(AXUSER_CMD_TYPE,	\
27  					AXUSER_CMD_SMMU_NORMAL) |	\
28  					AXUSER_NS | AXUSER_NO | AXUSER_FP)
29  #define QM_ARUSER_M_CFG_ENABLE		0x100090
30  #define ARUSER_M_CFG_ENABLE		0xfffffffe
31  #define QM_AWUSER_M_CFG_1		0x100098
32  #define QM_AWUSER_M_CFG_ENABLE		0x1000a0
33  #define AWUSER_M_CFG_ENABLE		0xfffffffe
34  #define QM_WUSER_M_CFG_ENABLE		0x1000a8
35  #define WUSER_M_CFG_ENABLE		0xffffffff
36  
37  /* mailbox */
38  #define QM_MB_CMD_SQC                   0x0
39  #define QM_MB_CMD_CQC                   0x1
40  #define QM_MB_CMD_EQC                   0x2
41  #define QM_MB_CMD_AEQC                  0x3
42  #define QM_MB_CMD_SQC_BT                0x4
43  #define QM_MB_CMD_CQC_BT                0x5
44  #define QM_MB_CMD_SQC_VFT_V2            0x6
45  #define QM_MB_CMD_STOP_QP               0x8
46  #define QM_MB_CMD_SRC                   0xc
47  #define QM_MB_CMD_DST                   0xd
48  
49  #define QM_MB_CMD_SEND_BASE		0x300
50  #define QM_MB_EVENT_SHIFT               8
51  #define QM_MB_BUSY_SHIFT		13
52  #define QM_MB_OP_SHIFT			14
53  #define QM_MB_CMD_DATA_ADDR_L		0x304
54  #define QM_MB_CMD_DATA_ADDR_H		0x308
55  #define QM_MB_MAX_WAIT_CNT		6000
56  
57  /* doorbell */
58  #define QM_DOORBELL_CMD_SQ              0
59  #define QM_DOORBELL_CMD_CQ              1
60  #define QM_DOORBELL_CMD_EQ              2
61  #define QM_DOORBELL_CMD_AEQ             3
62  
63  #define QM_DOORBELL_SQ_CQ_BASE_V2	0x1000
64  #define QM_DOORBELL_EQ_AEQ_BASE_V2	0x2000
65  #define QM_QP_MAX_NUM_SHIFT             11
66  #define QM_DB_CMD_SHIFT_V2		12
67  #define QM_DB_RAND_SHIFT_V2		16
68  #define QM_DB_INDEX_SHIFT_V2		32
69  #define QM_DB_PRIORITY_SHIFT_V2		48
70  #define QM_VF_STATE			0x60
71  
72  /* qm cache */
73  #define QM_CACHE_CTL			0x100050
74  #define SQC_CACHE_ENABLE		BIT(0)
75  #define CQC_CACHE_ENABLE		BIT(1)
76  #define SQC_CACHE_WB_ENABLE		BIT(4)
77  #define SQC_CACHE_WB_THRD		GENMASK(10, 5)
78  #define CQC_CACHE_WB_ENABLE		BIT(11)
79  #define CQC_CACHE_WB_THRD		GENMASK(17, 12)
80  #define QM_AXI_M_CFG			0x1000ac
81  #define AXI_M_CFG			0xffff
82  #define QM_AXI_M_CFG_ENABLE		0x1000b0
83  #define AM_CFG_SINGLE_PORT_MAX_TRANS	0x300014
84  #define AXI_M_CFG_ENABLE		0xffffffff
85  #define QM_PEH_AXUSER_CFG		0x1000cc
86  #define QM_PEH_AXUSER_CFG_ENABLE	0x1000d0
87  #define PEH_AXUSER_CFG			0x401001
88  #define PEH_AXUSER_CFG_ENABLE		0xffffffff
89  
90  #define QM_MIN_QNUM                     2
91  #define HISI_ACC_SGL_SGE_NR_MAX		255
92  #define QM_SHAPER_CFG			0x100164
93  #define QM_SHAPER_ENABLE		BIT(30)
94  #define QM_SHAPER_TYPE1_OFFSET		10
95  
96  /* page number for queue file region */
97  #define QM_DOORBELL_PAGE_NR		1
98  
99  /* uacce mode of the driver */
100  #define UACCE_MODE_NOUACCE		0 /* don't use uacce */
101  #define UACCE_MODE_SVA			1 /* use uacce sva mode */
102  #define UACCE_MODE_DESC	"0(default) means only register to crypto, 1 means both register to crypto and uacce"
103  
104  enum qm_stop_reason {
105  	QM_NORMAL,
106  	QM_SOFT_RESET,
107  	QM_DOWN,
108  };
109  
110  enum qm_state {
111  	QM_INIT = 0,
112  	QM_START,
113  	QM_CLOSE,
114  	QM_STOP,
115  };
116  
117  enum qp_state {
118  	QP_INIT = 1,
119  	QP_START,
120  	QP_STOP,
121  	QP_CLOSE,
122  };
123  
124  enum qm_hw_ver {
125  	QM_HW_V1 = 0x20,
126  	QM_HW_V2 = 0x21,
127  	QM_HW_V3 = 0x30,
128  };
129  
130  enum qm_fun_type {
131  	QM_HW_PF,
132  	QM_HW_VF,
133  };
134  
135  enum qm_debug_file {
136  	CURRENT_QM,
137  	CURRENT_Q,
138  	CLEAR_ENABLE,
139  	DEBUG_FILE_NUM,
140  };
141  
142  enum qm_vf_state {
143  	QM_READY = 0,
144  	QM_NOT_READY,
145  };
146  
147  enum qm_misc_ctl_bits {
148  	QM_DRIVER_REMOVING = 0x0,
149  	QM_RST_SCHED,
150  	QM_RESETTING,
151  	QM_MODULE_PARAM,
152  };
153  
154  enum qm_cap_bits {
155  	QM_SUPPORT_DB_ISOLATION = 0x0,
156  	QM_SUPPORT_FUNC_QOS,
157  	QM_SUPPORT_STOP_QP,
158  	QM_SUPPORT_MB_COMMAND,
159  	QM_SUPPORT_SVA_PREFETCH,
160  	QM_SUPPORT_RPM,
161  };
162  
163  struct qm_dev_alg {
164  	u64 alg_msk;
165  	const char *alg;
166  };
167  
168  struct dfx_diff_registers {
169  	u32 *regs;
170  	u32 reg_offset;
171  	u32 reg_len;
172  };
173  
174  struct qm_dfx {
175  	atomic64_t err_irq_cnt;
176  	atomic64_t aeq_irq_cnt;
177  	atomic64_t abnormal_irq_cnt;
178  	atomic64_t create_qp_err_cnt;
179  	atomic64_t mb_err_cnt;
180  };
181  
182  struct debugfs_file {
183  	enum qm_debug_file index;
184  	struct mutex lock;
185  	struct qm_debug *debug;
186  };
187  
188  struct qm_debug {
189  	u32 curr_qm_qp_num;
190  	u32 sqe_mask_offset;
191  	u32 sqe_mask_len;
192  	struct qm_dfx dfx;
193  	struct dentry *debug_root;
194  	struct dentry *qm_d;
195  	struct debugfs_file files[DEBUG_FILE_NUM];
196  	unsigned int *qm_last_words;
197  	/* ACC engines recoreding last regs */
198  	unsigned int *last_words;
199  	struct dfx_diff_registers *qm_diff_regs;
200  	struct dfx_diff_registers *acc_diff_regs;
201  };
202  
203  struct qm_shaper_factor {
204  	u32 func_qos;
205  	u64 cir_b;
206  	u64 cir_u;
207  	u64 cir_s;
208  	u64 cbs_s;
209  };
210  
211  struct qm_dma {
212  	void *va;
213  	dma_addr_t dma;
214  	size_t size;
215  };
216  
217  struct hisi_qm_status {
218  	u32 eq_head;
219  	bool eqc_phase;
220  	u32 aeq_head;
221  	bool aeqc_phase;
222  	atomic_t flags;
223  	int stop_reason;
224  };
225  
226  struct hisi_qm;
227  
228  enum acc_err_result {
229  	ACC_ERR_NONE,
230  	ACC_ERR_NEED_RESET,
231  	ACC_ERR_RECOVERED,
232  };
233  
234  struct hisi_qm_err_info {
235  	char *acpi_rst;
236  	u32 msi_wr_port;
237  	u32 ecc_2bits_mask;
238  	u32 qm_shutdown_mask;
239  	u32 dev_shutdown_mask;
240  	u32 qm_reset_mask;
241  	u32 dev_reset_mask;
242  	u32 ce;
243  	u32 nfe;
244  	u32 fe;
245  };
246  
247  struct hisi_qm_err_status {
248  	u32 is_qm_ecc_mbit;
249  	u32 is_dev_ecc_mbit;
250  };
251  
252  struct hisi_qm_err_ini {
253  	int (*hw_init)(struct hisi_qm *qm);
254  	void (*hw_err_enable)(struct hisi_qm *qm);
255  	void (*hw_err_disable)(struct hisi_qm *qm);
256  	u32 (*get_dev_hw_err_status)(struct hisi_qm *qm);
257  	void (*clear_dev_hw_err_status)(struct hisi_qm *qm, u32 err_sts);
258  	void (*open_axi_master_ooo)(struct hisi_qm *qm);
259  	void (*close_axi_master_ooo)(struct hisi_qm *qm);
260  	void (*open_sva_prefetch)(struct hisi_qm *qm);
261  	void (*close_sva_prefetch)(struct hisi_qm *qm);
262  	void (*show_last_dfx_regs)(struct hisi_qm *qm);
263  	void (*err_info_init)(struct hisi_qm *qm);
264  	enum acc_err_result (*get_err_result)(struct hisi_qm *qm);
265  };
266  
267  struct hisi_qm_cap_info {
268  	u32 type;
269  	/* Register offset */
270  	u32 offset;
271  	/* Bit offset in register */
272  	u32 shift;
273  	u32 mask;
274  	u32 v1_val;
275  	u32 v2_val;
276  	u32 v3_val;
277  };
278  
279  struct hisi_qm_cap_record {
280  	u32 type;
281  	u32 cap_val;
282  };
283  
284  struct hisi_qm_cap_tables {
285  	struct hisi_qm_cap_record *qm_cap_table;
286  	struct hisi_qm_cap_record *dev_cap_table;
287  };
288  
289  struct hisi_qm_list {
290  	struct mutex lock;
291  	struct list_head list;
292  	int (*register_to_crypto)(struct hisi_qm *qm);
293  	void (*unregister_from_crypto)(struct hisi_qm *qm);
294  };
295  
296  struct hisi_qm_poll_data {
297  	struct hisi_qm *qm;
298  	struct work_struct work;
299  	u16 *qp_finish_id;
300  	u16 eqe_num;
301  };
302  
303  /**
304   * struct qm_err_isolate
305   * @isolate_lock: protects device error log
306   * @err_threshold: user config error threshold which triggers isolation
307   * @is_isolate: device isolation state
308   * @uacce_hw_errs: index into qm device error list
309   */
310  struct qm_err_isolate {
311  	struct mutex isolate_lock;
312  	u32 err_threshold;
313  	bool is_isolate;
314  	struct list_head qm_hw_errs;
315  };
316  
317  struct hisi_qm {
318  	enum qm_hw_ver ver;
319  	enum qm_fun_type fun_type;
320  	const char *dev_name;
321  	struct pci_dev *pdev;
322  	void __iomem *io_base;
323  	void __iomem *db_io_base;
324  
325  	/* Capbility version, 0: not supports */
326  	u32 cap_ver;
327  	u32 sqe_size;
328  	u32 qp_base;
329  	u32 qp_num;
330  	u32 qp_in_used;
331  	u32 ctrl_qp_num;
332  	u32 max_qp_num;
333  	u32 vfs_num;
334  	u32 db_interval;
335  	u16 eq_depth;
336  	u16 aeq_depth;
337  	struct list_head list;
338  	struct hisi_qm_list *qm_list;
339  
340  	struct qm_dma qdma;
341  	struct qm_sqc *sqc;
342  	struct qm_cqc *cqc;
343  	struct qm_eqe *eqe;
344  	struct qm_aeqe *aeqe;
345  	dma_addr_t sqc_dma;
346  	dma_addr_t cqc_dma;
347  	dma_addr_t eqe_dma;
348  	dma_addr_t aeqe_dma;
349  
350  	struct hisi_qm_status status;
351  	const struct hisi_qm_err_ini *err_ini;
352  	struct hisi_qm_err_info err_info;
353  	struct hisi_qm_err_status err_status;
354  	/* driver removing and reset sched */
355  	unsigned long misc_ctl;
356  	/* Device capability bit */
357  	unsigned long caps;
358  
359  	struct rw_semaphore qps_lock;
360  	struct idr qp_idr;
361  	struct hisi_qp *qp_array;
362  	struct hisi_qm_poll_data *poll_data;
363  
364  	struct mutex mailbox_lock;
365  
366  	const struct hisi_qm_hw_ops *ops;
367  
368  	struct qm_debug debug;
369  
370  	u32 error_mask;
371  
372  	struct workqueue_struct *wq;
373  	struct work_struct rst_work;
374  	struct work_struct cmd_process;
375  
376  	bool use_sva;
377  
378  	resource_size_t phys_base;
379  	resource_size_t db_phys_base;
380  	struct uacce_device *uacce;
381  	int mode;
382  	struct qm_shaper_factor *factor;
383  	u32 mb_qos;
384  	u32 type_rate;
385  	struct qm_err_isolate isolate_data;
386  
387  	struct hisi_qm_cap_tables cap_tables;
388  };
389  
390  struct hisi_qp_status {
391  	atomic_t used;
392  	u16 sq_tail;
393  	u16 cq_head;
394  	bool cqc_phase;
395  	atomic_t flags;
396  };
397  
398  struct hisi_qp_ops {
399  	int (*fill_sqe)(void *sqe, void *q_parm, void *d_parm);
400  };
401  
402  struct hisi_qp {
403  	u32 qp_id;
404  	u16 sq_depth;
405  	u16 cq_depth;
406  	u8 alg_type;
407  	u8 req_type;
408  
409  	struct qm_dma qdma;
410  	void *sqe;
411  	struct qm_cqe *cqe;
412  	dma_addr_t sqe_dma;
413  	dma_addr_t cqe_dma;
414  
415  	struct hisi_qp_status qp_status;
416  	struct hisi_qp_ops *hw_ops;
417  	void *qp_ctx;
418  	void (*req_cb)(struct hisi_qp *qp, void *data);
419  	void (*event_cb)(struct hisi_qp *qp);
420  
421  	struct hisi_qm *qm;
422  	bool is_resetting;
423  	bool is_in_kernel;
424  	u16 pasid;
425  	struct uacce_queue *uacce_q;
426  };
427  
q_num_set(const char * val,const struct kernel_param * kp,unsigned int device)428  static inline int q_num_set(const char *val, const struct kernel_param *kp,
429  			    unsigned int device)
430  {
431  	struct pci_dev *pdev;
432  	u32 n, q_num;
433  	int ret;
434  
435  	if (!val)
436  		return -EINVAL;
437  
438  	pdev = pci_get_device(PCI_VENDOR_ID_HUAWEI, device, NULL);
439  	if (!pdev) {
440  		q_num = min_t(u32, QM_QNUM_V1, QM_QNUM_V2);
441  		pr_info("No device found currently, suppose queue number is %u\n",
442  			q_num);
443  	} else {
444  		if (pdev->revision == QM_HW_V1)
445  			q_num = QM_QNUM_V1;
446  		else
447  			q_num = QM_QNUM_V2;
448  
449  		pci_dev_put(pdev);
450  	}
451  
452  	ret = kstrtou32(val, 10, &n);
453  	if (ret || n < QM_MIN_QNUM || n > q_num)
454  		return -EINVAL;
455  
456  	return param_set_int(val, kp);
457  }
458  
vfs_num_set(const char * val,const struct kernel_param * kp)459  static inline int vfs_num_set(const char *val, const struct kernel_param *kp)
460  {
461  	u32 n;
462  	int ret;
463  
464  	if (!val)
465  		return -EINVAL;
466  
467  	ret = kstrtou32(val, 10, &n);
468  	if (ret < 0)
469  		return ret;
470  
471  	if (n > QM_MAX_VFS_NUM_V2)
472  		return -EINVAL;
473  
474  	return param_set_int(val, kp);
475  }
476  
mode_set(const char * val,const struct kernel_param * kp)477  static inline int mode_set(const char *val, const struct kernel_param *kp)
478  {
479  	u32 n;
480  	int ret;
481  
482  	if (!val)
483  		return -EINVAL;
484  
485  	ret = kstrtou32(val, 10, &n);
486  	if (ret != 0 || (n != UACCE_MODE_SVA &&
487  			 n != UACCE_MODE_NOUACCE))
488  		return -EINVAL;
489  
490  	return param_set_int(val, kp);
491  }
492  
uacce_mode_set(const char * val,const struct kernel_param * kp)493  static inline int uacce_mode_set(const char *val, const struct kernel_param *kp)
494  {
495  	return mode_set(val, kp);
496  }
497  
hisi_qm_init_list(struct hisi_qm_list * qm_list)498  static inline void hisi_qm_init_list(struct hisi_qm_list *qm_list)
499  {
500  	INIT_LIST_HEAD(&qm_list->list);
501  	mutex_init(&qm_list->lock);
502  }
503  
504  int hisi_qm_init(struct hisi_qm *qm);
505  void hisi_qm_uninit(struct hisi_qm *qm);
506  int hisi_qm_start(struct hisi_qm *qm);
507  int hisi_qm_stop(struct hisi_qm *qm, enum qm_stop_reason r);
508  int hisi_qm_start_qp(struct hisi_qp *qp, unsigned long arg);
509  int hisi_qm_stop_qp(struct hisi_qp *qp);
510  int hisi_qp_send(struct hisi_qp *qp, const void *msg);
511  void hisi_qm_debug_init(struct hisi_qm *qm);
512  void hisi_qm_debug_regs_clear(struct hisi_qm *qm);
513  int hisi_qm_sriov_enable(struct pci_dev *pdev, int max_vfs);
514  int hisi_qm_sriov_disable(struct pci_dev *pdev, bool is_frozen);
515  int hisi_qm_sriov_configure(struct pci_dev *pdev, int num_vfs);
516  void hisi_qm_dev_err_init(struct hisi_qm *qm);
517  void hisi_qm_dev_err_uninit(struct hisi_qm *qm);
518  int hisi_qm_regs_debugfs_init(struct hisi_qm *qm,
519  			  struct dfx_diff_registers *dregs, u32 reg_len);
520  void hisi_qm_regs_debugfs_uninit(struct hisi_qm *qm, u32 reg_len);
521  void hisi_qm_acc_diff_regs_dump(struct hisi_qm *qm, struct seq_file *s,
522  				struct dfx_diff_registers *dregs, u32 regs_len);
523  
524  pci_ers_result_t hisi_qm_dev_err_detected(struct pci_dev *pdev,
525  					  pci_channel_state_t state);
526  pci_ers_result_t hisi_qm_dev_slot_reset(struct pci_dev *pdev);
527  void hisi_qm_reset_prepare(struct pci_dev *pdev);
528  void hisi_qm_reset_done(struct pci_dev *pdev);
529  
530  int hisi_qm_wait_mb_ready(struct hisi_qm *qm);
531  int hisi_qm_mb(struct hisi_qm *qm, u8 cmd, dma_addr_t dma_addr, u16 queue,
532  	       bool op);
533  
534  struct hisi_acc_sgl_pool;
535  struct hisi_acc_hw_sgl *hisi_acc_sg_buf_map_to_hw_sgl(struct device *dev,
536  	struct scatterlist *sgl, struct hisi_acc_sgl_pool *pool,
537  	u32 index, dma_addr_t *hw_sgl_dma);
538  void hisi_acc_sg_buf_unmap(struct device *dev, struct scatterlist *sgl,
539  			   struct hisi_acc_hw_sgl *hw_sgl);
540  struct hisi_acc_sgl_pool *hisi_acc_create_sgl_pool(struct device *dev,
541  						   u32 count, u32 sge_nr);
542  void hisi_acc_free_sgl_pool(struct device *dev,
543  			    struct hisi_acc_sgl_pool *pool);
544  int hisi_qm_alloc_qps_node(struct hisi_qm_list *qm_list, int qp_num,
545  			   u8 alg_type, int node, struct hisi_qp **qps);
546  void hisi_qm_free_qps(struct hisi_qp **qps, int qp_num);
547  void hisi_qm_dev_shutdown(struct pci_dev *pdev);
548  void hisi_qm_wait_task_finish(struct hisi_qm *qm, struct hisi_qm_list *qm_list);
549  int hisi_qm_alg_register(struct hisi_qm *qm, struct hisi_qm_list *qm_list);
550  void hisi_qm_alg_unregister(struct hisi_qm *qm, struct hisi_qm_list *qm_list);
551  int hisi_qm_resume(struct device *dev);
552  int hisi_qm_suspend(struct device *dev);
553  void hisi_qm_pm_uninit(struct hisi_qm *qm);
554  void hisi_qm_pm_init(struct hisi_qm *qm);
555  int hisi_qm_get_dfx_access(struct hisi_qm *qm);
556  void hisi_qm_put_dfx_access(struct hisi_qm *qm);
557  void hisi_qm_regs_dump(struct seq_file *s, struct debugfs_regset32 *regset);
558  u32 hisi_qm_get_hw_info(struct hisi_qm *qm,
559  			const struct hisi_qm_cap_info *info_table,
560  			u32 index, bool is_read);
561  int hisi_qm_set_algs(struct hisi_qm *qm, u64 alg_msk, const struct qm_dev_alg *dev_algs,
562  		     u32 dev_algs_size);
563  
564  /* Used by VFIO ACC live migration driver */
565  struct pci_driver *hisi_sec_get_pf_driver(void);
566  struct pci_driver *hisi_hpre_get_pf_driver(void);
567  struct pci_driver *hisi_zip_get_pf_driver(void);
568  #endif
569