1 /* SPDX-License-Identifier: GPL-2.0-only
2 * Copyright (C) 2020 Marvell.
3 */
4
5 #ifndef __OTX2_CPT_COMMON_H
6 #define __OTX2_CPT_COMMON_H
7
8 #include <linux/pci.h>
9 #include <linux/types.h>
10 #include <linux/module.h>
11 #include <linux/delay.h>
12 #include <linux/crypto.h>
13 #include <net/devlink.h>
14 #include "otx2_cpt_hw_types.h"
15 #include "rvu.h"
16 #include "mbox.h"
17
18 #define OTX2_CPT_MAX_VFS_NUM 128
19 #define OTX2_CPT_RVU_FUNC_ADDR_S(blk, slot, offs) \
20 (((blk) << 20) | ((slot) << 12) | (offs))
21 #define OTX2_CPT_RVU_PFFUNC(pf, func) \
22 ((((pf) & RVU_PFVF_PF_MASK) << RVU_PFVF_PF_SHIFT) | \
23 (((func) & RVU_PFVF_FUNC_MASK) << RVU_PFVF_FUNC_SHIFT))
24
25 #define OTX2_CPT_INVALID_CRYPTO_ENG_GRP 0xFF
26 #define OTX2_CPT_NAME_LENGTH 64
27 #define OTX2_CPT_DMA_MINALIGN 128
28
29 /* HW capability flags */
30 #define CN10K_MBOX 0
31 #define CN10K_LMTST 1
32
33 #define BAD_OTX2_CPT_ENG_TYPE OTX2_CPT_MAX_ENG_TYPES
34
35 enum otx2_cpt_eng_type {
36 OTX2_CPT_AE_TYPES = 1,
37 OTX2_CPT_SE_TYPES = 2,
38 OTX2_CPT_IE_TYPES = 3,
39 OTX2_CPT_MAX_ENG_TYPES,
40 };
41
42 /* Take mbox id from end of CPT mbox range in AF (range 0xA00 - 0xBFF) */
43 #define MBOX_MSG_RX_INLINE_IPSEC_LF_CFG 0xBFE
44 #define MBOX_MSG_GET_ENG_GRP_NUM 0xBFF
45 #define MBOX_MSG_GET_CAPS 0xBFD
46 #define MBOX_MSG_GET_KVF_LIMITS 0xBFC
47
48 /*
49 * Message request to config cpt lf for inline inbound ipsec.
50 * This message is only used between CPT PF <-> CPT VF
51 */
52 struct otx2_cpt_rx_inline_lf_cfg {
53 struct mbox_msghdr hdr;
54 u16 sso_pf_func;
55 u16 param1;
56 u16 param2;
57 u16 opcode;
58 u32 credit;
59 u32 reserved;
60 };
61
62 /*
63 * Message request and response to get engine group number
64 * which has attached a given type of engines (SE, AE, IE)
65 * This messages are only used between CPT PF <=> CPT VF
66 */
67 struct otx2_cpt_egrp_num_msg {
68 struct mbox_msghdr hdr;
69 u8 eng_type;
70 };
71
72 struct otx2_cpt_egrp_num_rsp {
73 struct mbox_msghdr hdr;
74 u8 eng_type;
75 u8 eng_grp_num;
76 };
77
78 /*
79 * Message request and response to get kernel crypto limits
80 * This messages are only used between CPT PF <-> CPT VF
81 */
82 struct otx2_cpt_kvf_limits_msg {
83 struct mbox_msghdr hdr;
84 };
85
86 struct otx2_cpt_kvf_limits_rsp {
87 struct mbox_msghdr hdr;
88 u8 kvf_limits;
89 };
90
91 /* CPT HW capabilities */
92 union otx2_cpt_eng_caps {
93 u64 u;
94 struct {
95 u64 reserved_0_4:5;
96 u64 mul:1;
97 u64 sha1_sha2:1;
98 u64 chacha20:1;
99 u64 zuc_snow3g:1;
100 u64 sha3:1;
101 u64 aes:1;
102 u64 kasumi:1;
103 u64 des:1;
104 u64 crc:1;
105 u64 reserved_14_63:50;
106 };
107 };
108
109 /*
110 * Message request and response to get HW capabilities for each
111 * engine type (SE, IE, AE).
112 * This messages are only used between CPT PF <=> CPT VF
113 */
114 struct otx2_cpt_caps_msg {
115 struct mbox_msghdr hdr;
116 };
117
118 struct otx2_cpt_caps_rsp {
119 struct mbox_msghdr hdr;
120 u16 cpt_pf_drv_version;
121 u8 cpt_revision;
122 union otx2_cpt_eng_caps eng_caps[OTX2_CPT_MAX_ENG_TYPES];
123 };
124
otx2_cpt_write64(void __iomem * reg_base,u64 blk,u64 slot,u64 offs,u64 val)125 static inline void otx2_cpt_write64(void __iomem *reg_base, u64 blk, u64 slot,
126 u64 offs, u64 val)
127 {
128 writeq_relaxed(val, reg_base +
129 OTX2_CPT_RVU_FUNC_ADDR_S(blk, slot, offs));
130 }
131
otx2_cpt_read64(void __iomem * reg_base,u64 blk,u64 slot,u64 offs)132 static inline u64 otx2_cpt_read64(void __iomem *reg_base, u64 blk, u64 slot,
133 u64 offs)
134 {
135 return readq_relaxed(reg_base +
136 OTX2_CPT_RVU_FUNC_ADDR_S(blk, slot, offs));
137 }
138
is_dev_otx2(struct pci_dev * pdev)139 static inline bool is_dev_otx2(struct pci_dev *pdev)
140 {
141 if (pdev->device == OTX2_CPT_PCI_PF_DEVICE_ID ||
142 pdev->device == OTX2_CPT_PCI_VF_DEVICE_ID)
143 return true;
144
145 return false;
146 }
147
otx2_cpt_set_hw_caps(struct pci_dev * pdev,unsigned long * cap_flag)148 static inline void otx2_cpt_set_hw_caps(struct pci_dev *pdev,
149 unsigned long *cap_flag)
150 {
151 if (!is_dev_otx2(pdev)) {
152 __set_bit(CN10K_MBOX, cap_flag);
153 __set_bit(CN10K_LMTST, cap_flag);
154 }
155 }
156
157
158 int otx2_cpt_send_ready_msg(struct otx2_mbox *mbox, struct pci_dev *pdev);
159 int otx2_cpt_send_mbox_msg(struct otx2_mbox *mbox, struct pci_dev *pdev);
160
161 int otx2_cpt_send_af_reg_requests(struct otx2_mbox *mbox,
162 struct pci_dev *pdev);
163 int otx2_cpt_add_write_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev,
164 u64 reg, u64 val, int blkaddr);
165 int otx2_cpt_read_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev,
166 u64 reg, u64 *val, int blkaddr);
167 int otx2_cpt_write_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev,
168 u64 reg, u64 val, int blkaddr);
169 struct otx2_cptlfs_info;
170 int otx2_cpt_attach_rscrs_msg(struct otx2_cptlfs_info *lfs);
171 int otx2_cpt_detach_rsrcs_msg(struct otx2_cptlfs_info *lfs);
172 int otx2_cpt_msix_offset_msg(struct otx2_cptlfs_info *lfs);
173 int otx2_cpt_sync_mbox_msg(struct otx2_mbox *mbox);
174
175 #endif /* __OTX2_CPT_COMMON_H */
176