1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __NITROX_DEV_H
3 #define __NITROX_DEV_H
4
5 #include <linux/dma-mapping.h>
6 #include <linux/interrupt.h>
7 #include <linux/pci.h>
8 #include <linux/if.h>
9
10 #define VERSION_LEN 32
11 /* Maximum queues in PF mode */
12 #define MAX_PF_QUEUES 64
13 /* Maximum device queues */
14 #define MAX_DEV_QUEUES (MAX_PF_QUEUES)
15 /* Maximum UCD Blocks */
16 #define CNN55XX_MAX_UCD_BLOCKS 8
17
18 /**
19 * struct nitrox_cmdq - NITROX command queue
20 * @cmd_qlock: command queue lock
21 * @resp_qlock: response queue lock
22 * @backlog_qlock: backlog queue lock
23 * @ndev: NITROX device
24 * @response_head: submitted request list
25 * @backlog_head: backlog queue
26 * @dbell_csr_addr: doorbell register address for this queue
27 * @compl_cnt_csr_addr: completion count register address of the slc port
28 * @base: command queue base address
29 * @dma: dma address of the base
30 * @pending_count: request pending at device
31 * @backlog_count: backlog request count
32 * @write_idx: next write index for the command
33 * @instr_size: command size
34 * @qno: command queue number
35 * @qsize: command queue size
36 * @unalign_base: unaligned base address
37 * @unalign_dma: unaligned dma address
38 */
39 struct nitrox_cmdq {
40 spinlock_t cmd_qlock;
41 spinlock_t resp_qlock;
42 spinlock_t backlog_qlock;
43
44 struct nitrox_device *ndev;
45 struct list_head response_head;
46 struct list_head backlog_head;
47
48 u8 __iomem *dbell_csr_addr;
49 u8 __iomem *compl_cnt_csr_addr;
50 u8 *base;
51 dma_addr_t dma;
52
53 struct work_struct backlog_qflush;
54
55 atomic_t pending_count;
56 atomic_t backlog_count;
57
58 int write_idx;
59 u8 instr_size;
60 u8 qno;
61 u32 qsize;
62
63 u8 *unalign_base;
64 dma_addr_t unalign_dma;
65 };
66
67 /**
68 * struct nitrox_hw - NITROX hardware information
69 * @partname: partname ex: CNN55xxx-xxx
70 * @fw_name: firmware version
71 * @freq: NITROX frequency
72 * @vendor_id: vendor ID
73 * @device_id: device ID
74 * @revision_id: revision ID
75 * @se_cores: number of symmetric cores
76 * @ae_cores: number of asymmetric cores
77 * @zip_cores: number of zip cores
78 */
79 struct nitrox_hw {
80 char partname[IFNAMSIZ * 2];
81 char fw_name[CNN55XX_MAX_UCD_BLOCKS][VERSION_LEN];
82
83 int freq;
84 u16 vendor_id;
85 u16 device_id;
86 u8 revision_id;
87
88 u8 se_cores;
89 u8 ae_cores;
90 u8 zip_cores;
91 };
92
93 struct nitrox_stats {
94 atomic64_t posted;
95 atomic64_t completed;
96 atomic64_t dropped;
97 };
98
99 #define IRQ_NAMESZ 32
100
101 struct nitrox_q_vector {
102 char name[IRQ_NAMESZ];
103 bool valid;
104 int ring;
105 struct tasklet_struct resp_tasklet;
106 union {
107 struct nitrox_cmdq *cmdq;
108 struct nitrox_device *ndev;
109 };
110 };
111
112 enum mcode_type {
113 MCODE_TYPE_INVALID,
114 MCODE_TYPE_AE,
115 MCODE_TYPE_SE_SSL,
116 MCODE_TYPE_SE_IPSEC,
117 };
118
119 /**
120 * mbox_msg - Mailbox message data
121 * @type: message type
122 * @opcode: message opcode
123 * @data: message data
124 */
125 union mbox_msg {
126 u64 value;
127 struct {
128 u64 type: 2;
129 u64 opcode: 6;
130 u64 data: 58;
131 };
132 struct {
133 u64 type: 2;
134 u64 opcode: 6;
135 u64 chipid: 8;
136 u64 vfid: 8;
137 } id;
138 struct {
139 u64 type: 2;
140 u64 opcode: 6;
141 u64 count: 4;
142 u64 info: 40;
143 u64 next_se_grp: 3;
144 u64 next_ae_grp: 3;
145 } mcode_info;
146 };
147
148 /**
149 * nitrox_vfdev - NITROX VF device instance in PF
150 * @state: VF device state
151 * @vfno: VF number
152 * @nr_queues: number of queues enabled in VF
153 * @ring: ring to communicate with VF
154 * @msg: Mailbox message data from VF
155 * @mbx_resp: Mailbox counters
156 */
157 struct nitrox_vfdev {
158 atomic_t state;
159 int vfno;
160 int nr_queues;
161 int ring;
162 union mbox_msg msg;
163 atomic64_t mbx_resp;
164 };
165
166 /**
167 * struct nitrox_iov - SR-IOV information
168 * @num_vfs: number of VF(s) enabled
169 * @max_vf_queues: Maximum number of queues allowed for VF
170 * @vfdev: VF(s) devices
171 * @pf2vf_wq: workqueue for PF2VF communication
172 * @msix: MSI-X entry for PF in SR-IOV case
173 */
174 struct nitrox_iov {
175 int num_vfs;
176 int max_vf_queues;
177 struct nitrox_vfdev *vfdev;
178 struct workqueue_struct *pf2vf_wq;
179 struct msix_entry msix;
180 };
181
182 /*
183 * NITROX Device states
184 */
185 enum ndev_state {
186 __NDEV_NOT_READY,
187 __NDEV_READY,
188 __NDEV_IN_RESET,
189 };
190
191 /* NITROX support modes for VF(s) */
192 enum vf_mode {
193 __NDEV_MODE_PF,
194 __NDEV_MODE_VF16,
195 __NDEV_MODE_VF32,
196 __NDEV_MODE_VF64,
197 __NDEV_MODE_VF128,
198 };
199
200 #define __NDEV_SRIOV_BIT 0
201
202 /* command queue size */
203 #define DEFAULT_CMD_QLEN 2048
204 /* command timeout in milliseconds */
205 #define CMD_TIMEOUT 2000
206
207 #define DEV(ndev) ((struct device *)(&(ndev)->pdev->dev))
208
209 #define NITROX_CSR_ADDR(ndev, offset) \
210 ((ndev)->bar_addr + (offset))
211
212 /**
213 * struct nitrox_device - NITROX Device Information.
214 * @list: pointer to linked list of devices
215 * @bar_addr: iomap address
216 * @pdev: PCI device information
217 * @state: NITROX device state
218 * @flags: flags to indicate device the features
219 * @timeout: Request timeout in jiffies
220 * @refcnt: Device usage count
221 * @idx: device index (0..N)
222 * @node: NUMA node id attached
223 * @qlen: Command queue length
224 * @nr_queues: Number of command queues
225 * @mode: Device mode PF/VF
226 * @ctx_pool: DMA pool for crypto context
227 * @pkt_inq: Packet input rings
228 * @aqmq: AQM command queues
229 * @qvec: MSI-X queue vectors information
230 * @iov: SR-IOV informatin
231 * @num_vecs: number of MSI-X vectors
232 * @stats: request statistics
233 * @hw: hardware information
234 * @debugfs_dir: debugfs directory
235 */
236 struct nitrox_device {
237 struct list_head list;
238
239 u8 __iomem *bar_addr;
240 struct pci_dev *pdev;
241
242 atomic_t state;
243 unsigned long flags;
244 unsigned long timeout;
245 refcount_t refcnt;
246
247 u8 idx;
248 int node;
249 u16 qlen;
250 u16 nr_queues;
251 enum vf_mode mode;
252
253 struct dma_pool *ctx_pool;
254 struct nitrox_cmdq *pkt_inq;
255 struct nitrox_cmdq *aqmq[MAX_DEV_QUEUES] ____cacheline_aligned_in_smp;
256
257 struct nitrox_q_vector *qvec;
258 struct nitrox_iov iov;
259 int num_vecs;
260
261 struct nitrox_stats stats;
262 struct nitrox_hw hw;
263 #if IS_ENABLED(CONFIG_DEBUG_FS)
264 struct dentry *debugfs_dir;
265 #endif
266 };
267
268 /**
269 * nitrox_read_csr - Read from device register
270 * @ndev: NITROX device
271 * @offset: offset of the register to read
272 *
273 * Returns: value read
274 */
nitrox_read_csr(struct nitrox_device * ndev,u64 offset)275 static inline u64 nitrox_read_csr(struct nitrox_device *ndev, u64 offset)
276 {
277 return readq(ndev->bar_addr + offset);
278 }
279
280 /**
281 * nitrox_write_csr - Write to device register
282 * @ndev: NITROX device
283 * @offset: offset of the register to write
284 * @value: value to write
285 */
nitrox_write_csr(struct nitrox_device * ndev,u64 offset,u64 value)286 static inline void nitrox_write_csr(struct nitrox_device *ndev, u64 offset,
287 u64 value)
288 {
289 writeq(value, (ndev->bar_addr + offset));
290 }
291
nitrox_ready(struct nitrox_device * ndev)292 static inline bool nitrox_ready(struct nitrox_device *ndev)
293 {
294 return atomic_read(&ndev->state) == __NDEV_READY;
295 }
296
nitrox_vfdev_ready(struct nitrox_vfdev * vfdev)297 static inline bool nitrox_vfdev_ready(struct nitrox_vfdev *vfdev)
298 {
299 return atomic_read(&vfdev->state) == __NDEV_READY;
300 }
301
302 #endif /* __NITROX_DEV_H */
303