1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018-2019 HiSilicon Limited. */
3 #include <linux/acpi.h>
4 #include <linux/bitops.h>
5 #include <linux/debugfs.h>
6 #include <linux/init.h>
7 #include <linux/io.h>
8 #include <linux/kernel.h>
9 #include <linux/module.h>
10 #include <linux/pci.h>
11 #include <linux/pm_runtime.h>
12 #include <linux/topology.h>
13 #include <linux/uacce.h>
14 #include "hpre.h"
15
16 #define HPRE_QM_ABNML_INT_MASK 0x100004
17 #define HPRE_CTRL_CNT_CLR_CE_BIT BIT(0)
18 #define HPRE_COMM_CNT_CLR_CE 0x0
19 #define HPRE_CTRL_CNT_CLR_CE 0x301000
20 #define HPRE_FSM_MAX_CNT 0x301008
21 #define HPRE_VFG_AXQOS 0x30100c
22 #define HPRE_VFG_AXCACHE 0x301010
23 #define HPRE_RDCHN_INI_CFG 0x301014
24 #define HPRE_AWUSR_FP_CFG 0x301018
25 #define HPRE_BD_ENDIAN 0x301020
26 #define HPRE_ECC_BYPASS 0x301024
27 #define HPRE_RAS_WIDTH_CFG 0x301028
28 #define HPRE_POISON_BYPASS 0x30102c
29 #define HPRE_BD_ARUSR_CFG 0x301030
30 #define HPRE_BD_AWUSR_CFG 0x301034
31 #define HPRE_TYPES_ENB 0x301038
32 #define HPRE_RSA_ENB BIT(0)
33 #define HPRE_ECC_ENB BIT(1)
34 #define HPRE_DATA_RUSER_CFG 0x30103c
35 #define HPRE_DATA_WUSER_CFG 0x301040
36 #define HPRE_INT_MASK 0x301400
37 #define HPRE_INT_STATUS 0x301800
38 #define HPRE_HAC_INT_MSK 0x301400
39 #define HPRE_HAC_RAS_CE_ENB 0x301410
40 #define HPRE_HAC_RAS_NFE_ENB 0x301414
41 #define HPRE_HAC_RAS_FE_ENB 0x301418
42 #define HPRE_HAC_INT_SET 0x301500
43 #define HPRE_RNG_TIMEOUT_NUM 0x301A34
44 #define HPRE_CORE_INT_ENABLE 0
45 #define HPRE_CORE_INT_DISABLE GENMASK(21, 0)
46 #define HPRE_RDCHN_INI_ST 0x301a00
47 #define HPRE_CLSTR_BASE 0x302000
48 #define HPRE_CORE_EN_OFFSET 0x04
49 #define HPRE_CORE_INI_CFG_OFFSET 0x20
50 #define HPRE_CORE_INI_STATUS_OFFSET 0x80
51 #define HPRE_CORE_HTBT_WARN_OFFSET 0x8c
52 #define HPRE_CORE_IS_SCHD_OFFSET 0x90
53
54 #define HPRE_RAS_CE_ENB 0x301410
55 #define HPRE_RAS_NFE_ENB 0x301414
56 #define HPRE_RAS_FE_ENB 0x301418
57 #define HPRE_OOO_SHUTDOWN_SEL 0x301a3c
58 #define HPRE_HAC_RAS_FE_ENABLE 0
59
60 #define HPRE_CORE_ENB (HPRE_CLSTR_BASE + HPRE_CORE_EN_OFFSET)
61 #define HPRE_CORE_INI_CFG (HPRE_CLSTR_BASE + HPRE_CORE_INI_CFG_OFFSET)
62 #define HPRE_CORE_INI_STATUS (HPRE_CLSTR_BASE + HPRE_CORE_INI_STATUS_OFFSET)
63 #define HPRE_HAC_ECC1_CNT 0x301a04
64 #define HPRE_HAC_ECC2_CNT 0x301a08
65 #define HPRE_HAC_SOURCE_INT 0x301600
66 #define HPRE_CLSTR_ADDR_INTRVL 0x1000
67 #define HPRE_CLUSTER_INQURY 0x100
68 #define HPRE_CLSTR_ADDR_INQRY_RSLT 0x104
69 #define HPRE_TIMEOUT_ABNML_BIT 6
70 #define HPRE_PASID_EN_BIT 9
71 #define HPRE_REG_RD_INTVRL_US 10
72 #define HPRE_REG_RD_TMOUT_US 1000
73 #define HPRE_DBGFS_VAL_MAX_LEN 20
74 #define PCI_DEVICE_ID_HUAWEI_HPRE_PF 0xa258
75 #define HPRE_QM_USR_CFG_MASK GENMASK(31, 1)
76 #define HPRE_QM_AXI_CFG_MASK GENMASK(15, 0)
77 #define HPRE_QM_VFG_AX_MASK GENMASK(7, 0)
78 #define HPRE_BD_USR_MASK GENMASK(1, 0)
79 #define HPRE_PREFETCH_CFG 0x301130
80 #define HPRE_SVA_PREFTCH_DFX 0x30115C
81 #define HPRE_PREFETCH_ENABLE (~(BIT(0) | BIT(30)))
82 #define HPRE_PREFETCH_DISABLE BIT(30)
83 #define HPRE_SVA_DISABLE_READY (BIT(4) | BIT(8))
84
85 /* clock gate */
86 #define HPRE_CLKGATE_CTL 0x301a10
87 #define HPRE_PEH_CFG_AUTO_GATE 0x301a2c
88 #define HPRE_CLUSTER_DYN_CTL 0x302010
89 #define HPRE_CORE_SHB_CFG 0x302088
90 #define HPRE_CLKGATE_CTL_EN BIT(0)
91 #define HPRE_PEH_CFG_AUTO_GATE_EN BIT(0)
92 #define HPRE_CLUSTER_DYN_CTL_EN BIT(0)
93 #define HPRE_CORE_GATE_EN (BIT(30) | BIT(31))
94
95 #define HPRE_AM_OOO_SHUTDOWN_ENB 0x301044
96 #define HPRE_AM_OOO_SHUTDOWN_ENABLE BIT(0)
97 #define HPRE_WR_MSI_PORT BIT(2)
98
99 #define HPRE_CORE_ECC_2BIT_ERR BIT(1)
100 #define HPRE_OOO_ECC_2BIT_ERR BIT(5)
101
102 #define HPRE_QM_BME_FLR BIT(7)
103 #define HPRE_QM_PM_FLR BIT(11)
104 #define HPRE_QM_SRIOV_FLR BIT(12)
105
106 #define HPRE_SHAPER_TYPE_RATE 640
107 #define HPRE_VIA_MSI_DSM 1
108 #define HPRE_SQE_MASK_OFFSET 8
109 #define HPRE_SQE_MASK_LEN 24
110
111 #define HPRE_DFX_BASE 0x301000
112 #define HPRE_DFX_COMMON1 0x301400
113 #define HPRE_DFX_COMMON2 0x301A00
114 #define HPRE_DFX_CORE 0x302000
115 #define HPRE_DFX_BASE_LEN 0x55
116 #define HPRE_DFX_COMMON1_LEN 0x41
117 #define HPRE_DFX_COMMON2_LEN 0xE
118 #define HPRE_DFX_CORE_LEN 0x43
119
120 static const char hpre_name[] = "hisi_hpre";
121 static struct dentry *hpre_debugfs_root;
122 static const struct pci_device_id hpre_dev_ids[] = {
123 { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_HUAWEI_HPRE_PF) },
124 { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_HUAWEI_HPRE_VF) },
125 { 0, }
126 };
127
128 MODULE_DEVICE_TABLE(pci, hpre_dev_ids);
129
130 struct hpre_hw_error {
131 u32 int_msk;
132 const char *msg;
133 };
134
135 static const struct qm_dev_alg hpre_dev_algs[] = {
136 {
137 .alg_msk = BIT(0),
138 .alg = "rsa\n"
139 }, {
140 .alg_msk = BIT(1),
141 .alg = "dh\n"
142 }, {
143 .alg_msk = BIT(2),
144 .alg = "ecdh\n"
145 }, {
146 .alg_msk = BIT(3),
147 .alg = "ecdsa\n"
148 }, {
149 .alg_msk = BIT(4),
150 .alg = "sm2\n"
151 }, {
152 .alg_msk = BIT(5),
153 .alg = "x25519\n"
154 }, {
155 .alg_msk = BIT(6),
156 .alg = "x448\n"
157 }, {
158 /* sentinel */
159 }
160 };
161
162 static struct hisi_qm_list hpre_devices = {
163 .register_to_crypto = hpre_algs_register,
164 .unregister_from_crypto = hpre_algs_unregister,
165 };
166
167 static const char * const hpre_debug_file_name[] = {
168 [HPRE_CLEAR_ENABLE] = "rdclr_en",
169 [HPRE_CLUSTER_CTRL] = "cluster_ctrl",
170 };
171
172 enum hpre_cap_type {
173 HPRE_QM_NFE_MASK_CAP,
174 HPRE_QM_RESET_MASK_CAP,
175 HPRE_QM_OOO_SHUTDOWN_MASK_CAP,
176 HPRE_QM_CE_MASK_CAP,
177 HPRE_NFE_MASK_CAP,
178 HPRE_RESET_MASK_CAP,
179 HPRE_OOO_SHUTDOWN_MASK_CAP,
180 HPRE_CE_MASK_CAP,
181 HPRE_CLUSTER_NUM_CAP,
182 HPRE_CORE_TYPE_NUM_CAP,
183 HPRE_CORE_NUM_CAP,
184 HPRE_CLUSTER_CORE_NUM_CAP,
185 HPRE_CORE_ENABLE_BITMAP_CAP,
186 HPRE_DRV_ALG_BITMAP_CAP,
187 HPRE_DEV_ALG_BITMAP_CAP,
188 HPRE_CORE1_ALG_BITMAP_CAP,
189 HPRE_CORE2_ALG_BITMAP_CAP,
190 HPRE_CORE3_ALG_BITMAP_CAP,
191 HPRE_CORE4_ALG_BITMAP_CAP,
192 HPRE_CORE5_ALG_BITMAP_CAP,
193 HPRE_CORE6_ALG_BITMAP_CAP,
194 HPRE_CORE7_ALG_BITMAP_CAP,
195 HPRE_CORE8_ALG_BITMAP_CAP,
196 HPRE_CORE9_ALG_BITMAP_CAP,
197 HPRE_CORE10_ALG_BITMAP_CAP
198 };
199
200 static const struct hisi_qm_cap_info hpre_basic_info[] = {
201 {HPRE_QM_NFE_MASK_CAP, 0x3124, 0, GENMASK(31, 0), 0x0, 0x1C37, 0x7C37},
202 {HPRE_QM_RESET_MASK_CAP, 0x3128, 0, GENMASK(31, 0), 0x0, 0xC37, 0x6C37},
203 {HPRE_QM_OOO_SHUTDOWN_MASK_CAP, 0x3128, 0, GENMASK(31, 0), 0x0, 0x4, 0x6C37},
204 {HPRE_QM_CE_MASK_CAP, 0x312C, 0, GENMASK(31, 0), 0x0, 0x8, 0x8},
205 {HPRE_NFE_MASK_CAP, 0x3130, 0, GENMASK(31, 0), 0x0, 0x3FFFFE, 0x1FFFFFE},
206 {HPRE_RESET_MASK_CAP, 0x3134, 0, GENMASK(31, 0), 0x0, 0x3FFFFE, 0xBFFFFE},
207 {HPRE_OOO_SHUTDOWN_MASK_CAP, 0x3134, 0, GENMASK(31, 0), 0x0, 0x22, 0xBFFFFE},
208 {HPRE_CE_MASK_CAP, 0x3138, 0, GENMASK(31, 0), 0x0, 0x1, 0x1},
209 {HPRE_CLUSTER_NUM_CAP, 0x313c, 20, GENMASK(3, 0), 0x0, 0x4, 0x1},
210 {HPRE_CORE_TYPE_NUM_CAP, 0x313c, 16, GENMASK(3, 0), 0x0, 0x2, 0x2},
211 {HPRE_CORE_NUM_CAP, 0x313c, 8, GENMASK(7, 0), 0x0, 0x8, 0xA},
212 {HPRE_CLUSTER_CORE_NUM_CAP, 0x313c, 0, GENMASK(7, 0), 0x0, 0x2, 0xA},
213 {HPRE_CORE_ENABLE_BITMAP_CAP, 0x3140, 0, GENMASK(31, 0), 0x0, 0xF, 0x3FF},
214 {HPRE_DRV_ALG_BITMAP_CAP, 0x3144, 0, GENMASK(31, 0), 0x0, 0x03, 0x27},
215 {HPRE_DEV_ALG_BITMAP_CAP, 0x3148, 0, GENMASK(31, 0), 0x0, 0x03, 0x7F},
216 {HPRE_CORE1_ALG_BITMAP_CAP, 0x314c, 0, GENMASK(31, 0), 0x0, 0x7F, 0x7F},
217 {HPRE_CORE2_ALG_BITMAP_CAP, 0x3150, 0, GENMASK(31, 0), 0x0, 0x7F, 0x7F},
218 {HPRE_CORE3_ALG_BITMAP_CAP, 0x3154, 0, GENMASK(31, 0), 0x0, 0x7F, 0x7F},
219 {HPRE_CORE4_ALG_BITMAP_CAP, 0x3158, 0, GENMASK(31, 0), 0x0, 0x7F, 0x7F},
220 {HPRE_CORE5_ALG_BITMAP_CAP, 0x315c, 0, GENMASK(31, 0), 0x0, 0x7F, 0x7F},
221 {HPRE_CORE6_ALG_BITMAP_CAP, 0x3160, 0, GENMASK(31, 0), 0x0, 0x7F, 0x7F},
222 {HPRE_CORE7_ALG_BITMAP_CAP, 0x3164, 0, GENMASK(31, 0), 0x0, 0x7F, 0x7F},
223 {HPRE_CORE8_ALG_BITMAP_CAP, 0x3168, 0, GENMASK(31, 0), 0x0, 0x7F, 0x7F},
224 {HPRE_CORE9_ALG_BITMAP_CAP, 0x316c, 0, GENMASK(31, 0), 0x0, 0x10, 0x10},
225 {HPRE_CORE10_ALG_BITMAP_CAP, 0x3170, 0, GENMASK(31, 0), 0x0, 0x10, 0x10}
226 };
227
228 enum hpre_pre_store_cap_idx {
229 HPRE_CLUSTER_NUM_CAP_IDX = 0x0,
230 HPRE_CORE_ENABLE_BITMAP_CAP_IDX,
231 HPRE_DRV_ALG_BITMAP_CAP_IDX,
232 HPRE_DEV_ALG_BITMAP_CAP_IDX,
233 };
234
235 static const u32 hpre_pre_store_caps[] = {
236 HPRE_CLUSTER_NUM_CAP,
237 HPRE_CORE_ENABLE_BITMAP_CAP,
238 HPRE_DRV_ALG_BITMAP_CAP,
239 HPRE_DEV_ALG_BITMAP_CAP,
240 };
241
242 static const struct hpre_hw_error hpre_hw_errors[] = {
243 {
244 .int_msk = BIT(0),
245 .msg = "core_ecc_1bit_err_int_set"
246 }, {
247 .int_msk = BIT(1),
248 .msg = "core_ecc_2bit_err_int_set"
249 }, {
250 .int_msk = BIT(2),
251 .msg = "dat_wb_poison_int_set"
252 }, {
253 .int_msk = BIT(3),
254 .msg = "dat_rd_poison_int_set"
255 }, {
256 .int_msk = BIT(4),
257 .msg = "bd_rd_poison_int_set"
258 }, {
259 .int_msk = BIT(5),
260 .msg = "ooo_ecc_2bit_err_int_set"
261 }, {
262 .int_msk = BIT(6),
263 .msg = "cluster1_shb_timeout_int_set"
264 }, {
265 .int_msk = BIT(7),
266 .msg = "cluster2_shb_timeout_int_set"
267 }, {
268 .int_msk = BIT(8),
269 .msg = "cluster3_shb_timeout_int_set"
270 }, {
271 .int_msk = BIT(9),
272 .msg = "cluster4_shb_timeout_int_set"
273 }, {
274 .int_msk = GENMASK(15, 10),
275 .msg = "ooo_rdrsp_err_int_set"
276 }, {
277 .int_msk = GENMASK(21, 16),
278 .msg = "ooo_wrrsp_err_int_set"
279 }, {
280 .int_msk = BIT(22),
281 .msg = "pt_rng_timeout_int_set"
282 }, {
283 .int_msk = BIT(23),
284 .msg = "sva_fsm_timeout_int_set"
285 }, {
286 .int_msk = BIT(24),
287 .msg = "sva_int_set"
288 }, {
289 /* sentinel */
290 }
291 };
292
293 static const u64 hpre_cluster_offsets[] = {
294 [HPRE_CLUSTER0] =
295 HPRE_CLSTR_BASE + HPRE_CLUSTER0 * HPRE_CLSTR_ADDR_INTRVL,
296 [HPRE_CLUSTER1] =
297 HPRE_CLSTR_BASE + HPRE_CLUSTER1 * HPRE_CLSTR_ADDR_INTRVL,
298 [HPRE_CLUSTER2] =
299 HPRE_CLSTR_BASE + HPRE_CLUSTER2 * HPRE_CLSTR_ADDR_INTRVL,
300 [HPRE_CLUSTER3] =
301 HPRE_CLSTR_BASE + HPRE_CLUSTER3 * HPRE_CLSTR_ADDR_INTRVL,
302 };
303
304 static const struct debugfs_reg32 hpre_cluster_dfx_regs[] = {
305 {"CORES_EN_STATUS ", HPRE_CORE_EN_OFFSET},
306 {"CORES_INI_CFG ", HPRE_CORE_INI_CFG_OFFSET},
307 {"CORES_INI_STATUS ", HPRE_CORE_INI_STATUS_OFFSET},
308 {"CORES_HTBT_WARN ", HPRE_CORE_HTBT_WARN_OFFSET},
309 {"CORES_IS_SCHD ", HPRE_CORE_IS_SCHD_OFFSET},
310 };
311
312 static const struct debugfs_reg32 hpre_com_dfx_regs[] = {
313 {"READ_CLR_EN ", HPRE_CTRL_CNT_CLR_CE},
314 {"AXQOS ", HPRE_VFG_AXQOS},
315 {"AWUSR_CFG ", HPRE_AWUSR_FP_CFG},
316 {"BD_ENDIAN ", HPRE_BD_ENDIAN},
317 {"ECC_CHECK_CTRL ", HPRE_ECC_BYPASS},
318 {"RAS_INT_WIDTH ", HPRE_RAS_WIDTH_CFG},
319 {"POISON_BYPASS ", HPRE_POISON_BYPASS},
320 {"BD_ARUSER ", HPRE_BD_ARUSR_CFG},
321 {"BD_AWUSER ", HPRE_BD_AWUSR_CFG},
322 {"DATA_ARUSER ", HPRE_DATA_RUSER_CFG},
323 {"DATA_AWUSER ", HPRE_DATA_WUSER_CFG},
324 {"INT_STATUS ", HPRE_INT_STATUS},
325 {"INT_MASK ", HPRE_HAC_INT_MSK},
326 {"RAS_CE_ENB ", HPRE_HAC_RAS_CE_ENB},
327 {"RAS_NFE_ENB ", HPRE_HAC_RAS_NFE_ENB},
328 {"RAS_FE_ENB ", HPRE_HAC_RAS_FE_ENB},
329 {"INT_SET ", HPRE_HAC_INT_SET},
330 {"RNG_TIMEOUT_NUM ", HPRE_RNG_TIMEOUT_NUM},
331 };
332
333 static const char *hpre_dfx_files[HPRE_DFX_FILE_NUM] = {
334 "send_cnt",
335 "recv_cnt",
336 "send_fail_cnt",
337 "send_busy_cnt",
338 "over_thrhld_cnt",
339 "overtime_thrhld",
340 "invalid_req_cnt"
341 };
342
343 /* define the HPRE's dfx regs region and region length */
344 static struct dfx_diff_registers hpre_diff_regs[] = {
345 {
346 .reg_offset = HPRE_DFX_BASE,
347 .reg_len = HPRE_DFX_BASE_LEN,
348 }, {
349 .reg_offset = HPRE_DFX_COMMON1,
350 .reg_len = HPRE_DFX_COMMON1_LEN,
351 }, {
352 .reg_offset = HPRE_DFX_COMMON2,
353 .reg_len = HPRE_DFX_COMMON2_LEN,
354 }, {
355 .reg_offset = HPRE_DFX_CORE,
356 .reg_len = HPRE_DFX_CORE_LEN,
357 },
358 };
359
hpre_check_alg_support(struct hisi_qm * qm,u32 alg)360 bool hpre_check_alg_support(struct hisi_qm *qm, u32 alg)
361 {
362 u32 cap_val;
363
364 cap_val = qm->cap_tables.dev_cap_table[HPRE_DRV_ALG_BITMAP_CAP_IDX].cap_val;
365 if (alg & cap_val)
366 return true;
367
368 return false;
369 }
370
hpre_diff_regs_show(struct seq_file * s,void * unused)371 static int hpre_diff_regs_show(struct seq_file *s, void *unused)
372 {
373 struct hisi_qm *qm = s->private;
374
375 hisi_qm_acc_diff_regs_dump(qm, s, qm->debug.acc_diff_regs,
376 ARRAY_SIZE(hpre_diff_regs));
377
378 return 0;
379 }
380
381 DEFINE_SHOW_ATTRIBUTE(hpre_diff_regs);
382
hpre_com_regs_show(struct seq_file * s,void * unused)383 static int hpre_com_regs_show(struct seq_file *s, void *unused)
384 {
385 hisi_qm_regs_dump(s, s->private);
386
387 return 0;
388 }
389
390 DEFINE_SHOW_ATTRIBUTE(hpre_com_regs);
391
hpre_cluster_regs_show(struct seq_file * s,void * unused)392 static int hpre_cluster_regs_show(struct seq_file *s, void *unused)
393 {
394 hisi_qm_regs_dump(s, s->private);
395
396 return 0;
397 }
398
399 DEFINE_SHOW_ATTRIBUTE(hpre_cluster_regs);
400
401 static const struct kernel_param_ops hpre_uacce_mode_ops = {
402 .set = uacce_mode_set,
403 .get = param_get_int,
404 };
405
406 /*
407 * uacce_mode = 0 means hpre only register to crypto,
408 * uacce_mode = 1 means hpre both register to crypto and uacce.
409 */
410 static u32 uacce_mode = UACCE_MODE_NOUACCE;
411 module_param_cb(uacce_mode, &hpre_uacce_mode_ops, &uacce_mode, 0444);
412 MODULE_PARM_DESC(uacce_mode, UACCE_MODE_DESC);
413
414 static bool pf_q_num_flag;
pf_q_num_set(const char * val,const struct kernel_param * kp)415 static int pf_q_num_set(const char *val, const struct kernel_param *kp)
416 {
417 pf_q_num_flag = true;
418
419 return q_num_set(val, kp, PCI_DEVICE_ID_HUAWEI_HPRE_PF);
420 }
421
422 static const struct kernel_param_ops hpre_pf_q_num_ops = {
423 .set = pf_q_num_set,
424 .get = param_get_int,
425 };
426
427 static u32 pf_q_num = HPRE_PF_DEF_Q_NUM;
428 module_param_cb(pf_q_num, &hpre_pf_q_num_ops, &pf_q_num, 0444);
429 MODULE_PARM_DESC(pf_q_num, "Number of queues in PF of CS(2-1024)");
430
431 static const struct kernel_param_ops vfs_num_ops = {
432 .set = vfs_num_set,
433 .get = param_get_int,
434 };
435
436 static u32 vfs_num;
437 module_param_cb(vfs_num, &vfs_num_ops, &vfs_num, 0444);
438 MODULE_PARM_DESC(vfs_num, "Number of VFs to enable(1-63), 0(default)");
439
hpre_create_qp(u8 type)440 struct hisi_qp *hpre_create_qp(u8 type)
441 {
442 int node = cpu_to_node(smp_processor_id());
443 struct hisi_qp *qp = NULL;
444 int ret;
445
446 if (type != HPRE_V2_ALG_TYPE && type != HPRE_V3_ECC_ALG_TYPE)
447 return NULL;
448
449 /*
450 * type: 0 - RSA/DH. algorithm supported in V2,
451 * 1 - ECC algorithm in V3.
452 */
453 ret = hisi_qm_alloc_qps_node(&hpre_devices, 1, type, node, &qp);
454 if (!ret)
455 return qp;
456
457 return NULL;
458 }
459
hpre_config_pasid(struct hisi_qm * qm)460 static void hpre_config_pasid(struct hisi_qm *qm)
461 {
462 u32 val1, val2;
463
464 if (qm->ver >= QM_HW_V3)
465 return;
466
467 val1 = readl_relaxed(qm->io_base + HPRE_DATA_RUSER_CFG);
468 val2 = readl_relaxed(qm->io_base + HPRE_DATA_WUSER_CFG);
469 if (qm->use_sva) {
470 val1 |= BIT(HPRE_PASID_EN_BIT);
471 val2 |= BIT(HPRE_PASID_EN_BIT);
472 } else {
473 val1 &= ~BIT(HPRE_PASID_EN_BIT);
474 val2 &= ~BIT(HPRE_PASID_EN_BIT);
475 }
476 writel_relaxed(val1, qm->io_base + HPRE_DATA_RUSER_CFG);
477 writel_relaxed(val2, qm->io_base + HPRE_DATA_WUSER_CFG);
478 }
479
hpre_cfg_by_dsm(struct hisi_qm * qm)480 static int hpre_cfg_by_dsm(struct hisi_qm *qm)
481 {
482 struct device *dev = &qm->pdev->dev;
483 union acpi_object *obj;
484 guid_t guid;
485
486 if (guid_parse("b06b81ab-0134-4a45-9b0c-483447b95fa7", &guid)) {
487 dev_err(dev, "Hpre GUID failed\n");
488 return -EINVAL;
489 }
490
491 /* Switch over to MSI handling due to non-standard PCI implementation */
492 obj = acpi_evaluate_dsm(ACPI_HANDLE(dev), &guid,
493 0, HPRE_VIA_MSI_DSM, NULL);
494 if (!obj) {
495 dev_err(dev, "ACPI handle failed!\n");
496 return -EIO;
497 }
498
499 ACPI_FREE(obj);
500
501 return 0;
502 }
503
hpre_set_cluster(struct hisi_qm * qm)504 static int hpre_set_cluster(struct hisi_qm *qm)
505 {
506 struct device *dev = &qm->pdev->dev;
507 unsigned long offset;
508 u32 cluster_core_mask;
509 u8 clusters_num;
510 u32 val = 0;
511 int ret, i;
512
513 cluster_core_mask = qm->cap_tables.dev_cap_table[HPRE_CORE_ENABLE_BITMAP_CAP_IDX].cap_val;
514 clusters_num = qm->cap_tables.dev_cap_table[HPRE_CLUSTER_NUM_CAP_IDX].cap_val;
515 for (i = 0; i < clusters_num; i++) {
516 offset = i * HPRE_CLSTR_ADDR_INTRVL;
517
518 /* clusters initiating */
519 writel(cluster_core_mask,
520 qm->io_base + offset + HPRE_CORE_ENB);
521 writel(0x1, qm->io_base + offset + HPRE_CORE_INI_CFG);
522 ret = readl_relaxed_poll_timeout(qm->io_base + offset +
523 HPRE_CORE_INI_STATUS, val,
524 ((val & cluster_core_mask) ==
525 cluster_core_mask),
526 HPRE_REG_RD_INTVRL_US,
527 HPRE_REG_RD_TMOUT_US);
528 if (ret) {
529 dev_err(dev,
530 "cluster %d int st status timeout!\n", i);
531 return -ETIMEDOUT;
532 }
533 }
534
535 return 0;
536 }
537
538 /*
539 * For Kunpeng 920, we should disable FLR triggered by hardware (BME/PM/SRIOV).
540 * Or it may stay in D3 state when we bind and unbind hpre quickly,
541 * as it does FLR triggered by hardware.
542 */
disable_flr_of_bme(struct hisi_qm * qm)543 static void disable_flr_of_bme(struct hisi_qm *qm)
544 {
545 u32 val;
546
547 val = readl(qm->io_base + QM_PEH_AXUSER_CFG);
548 val &= ~(HPRE_QM_BME_FLR | HPRE_QM_SRIOV_FLR);
549 val |= HPRE_QM_PM_FLR;
550 writel(val, qm->io_base + QM_PEH_AXUSER_CFG);
551 writel(PEH_AXUSER_CFG_ENABLE, qm->io_base + QM_PEH_AXUSER_CFG_ENABLE);
552 }
553
hpre_open_sva_prefetch(struct hisi_qm * qm)554 static void hpre_open_sva_prefetch(struct hisi_qm *qm)
555 {
556 u32 val;
557 int ret;
558
559 if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps))
560 return;
561
562 /* Enable prefetch */
563 val = readl_relaxed(qm->io_base + HPRE_PREFETCH_CFG);
564 val &= HPRE_PREFETCH_ENABLE;
565 writel(val, qm->io_base + HPRE_PREFETCH_CFG);
566
567 ret = readl_relaxed_poll_timeout(qm->io_base + HPRE_PREFETCH_CFG,
568 val, !(val & HPRE_PREFETCH_DISABLE),
569 HPRE_REG_RD_INTVRL_US,
570 HPRE_REG_RD_TMOUT_US);
571 if (ret)
572 pci_err(qm->pdev, "failed to open sva prefetch\n");
573 }
574
hpre_close_sva_prefetch(struct hisi_qm * qm)575 static void hpre_close_sva_prefetch(struct hisi_qm *qm)
576 {
577 u32 val;
578 int ret;
579
580 if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps))
581 return;
582
583 val = readl_relaxed(qm->io_base + HPRE_PREFETCH_CFG);
584 val |= HPRE_PREFETCH_DISABLE;
585 writel(val, qm->io_base + HPRE_PREFETCH_CFG);
586
587 ret = readl_relaxed_poll_timeout(qm->io_base + HPRE_SVA_PREFTCH_DFX,
588 val, !(val & HPRE_SVA_DISABLE_READY),
589 HPRE_REG_RD_INTVRL_US,
590 HPRE_REG_RD_TMOUT_US);
591 if (ret)
592 pci_err(qm->pdev, "failed to close sva prefetch\n");
593 }
594
hpre_enable_clock_gate(struct hisi_qm * qm)595 static void hpre_enable_clock_gate(struct hisi_qm *qm)
596 {
597 u32 val;
598
599 if (qm->ver < QM_HW_V3)
600 return;
601
602 val = readl(qm->io_base + HPRE_CLKGATE_CTL);
603 val |= HPRE_CLKGATE_CTL_EN;
604 writel(val, qm->io_base + HPRE_CLKGATE_CTL);
605
606 val = readl(qm->io_base + HPRE_PEH_CFG_AUTO_GATE);
607 val |= HPRE_PEH_CFG_AUTO_GATE_EN;
608 writel(val, qm->io_base + HPRE_PEH_CFG_AUTO_GATE);
609
610 val = readl(qm->io_base + HPRE_CLUSTER_DYN_CTL);
611 val |= HPRE_CLUSTER_DYN_CTL_EN;
612 writel(val, qm->io_base + HPRE_CLUSTER_DYN_CTL);
613
614 val = readl_relaxed(qm->io_base + HPRE_CORE_SHB_CFG);
615 val |= HPRE_CORE_GATE_EN;
616 writel(val, qm->io_base + HPRE_CORE_SHB_CFG);
617 }
618
hpre_disable_clock_gate(struct hisi_qm * qm)619 static void hpre_disable_clock_gate(struct hisi_qm *qm)
620 {
621 u32 val;
622
623 if (qm->ver < QM_HW_V3)
624 return;
625
626 val = readl(qm->io_base + HPRE_CLKGATE_CTL);
627 val &= ~HPRE_CLKGATE_CTL_EN;
628 writel(val, qm->io_base + HPRE_CLKGATE_CTL);
629
630 val = readl(qm->io_base + HPRE_PEH_CFG_AUTO_GATE);
631 val &= ~HPRE_PEH_CFG_AUTO_GATE_EN;
632 writel(val, qm->io_base + HPRE_PEH_CFG_AUTO_GATE);
633
634 val = readl(qm->io_base + HPRE_CLUSTER_DYN_CTL);
635 val &= ~HPRE_CLUSTER_DYN_CTL_EN;
636 writel(val, qm->io_base + HPRE_CLUSTER_DYN_CTL);
637
638 val = readl_relaxed(qm->io_base + HPRE_CORE_SHB_CFG);
639 val &= ~HPRE_CORE_GATE_EN;
640 writel(val, qm->io_base + HPRE_CORE_SHB_CFG);
641 }
642
hpre_set_user_domain_and_cache(struct hisi_qm * qm)643 static int hpre_set_user_domain_and_cache(struct hisi_qm *qm)
644 {
645 struct device *dev = &qm->pdev->dev;
646 u32 val;
647 int ret;
648
649 /* disabel dynamic clock gate before sram init */
650 hpre_disable_clock_gate(qm);
651
652 writel(HPRE_QM_USR_CFG_MASK, qm->io_base + QM_ARUSER_M_CFG_ENABLE);
653 writel(HPRE_QM_USR_CFG_MASK, qm->io_base + QM_AWUSER_M_CFG_ENABLE);
654 writel_relaxed(HPRE_QM_AXI_CFG_MASK, qm->io_base + QM_AXI_M_CFG);
655
656 /* HPRE need more time, we close this interrupt */
657 val = readl_relaxed(qm->io_base + HPRE_QM_ABNML_INT_MASK);
658 val |= BIT(HPRE_TIMEOUT_ABNML_BIT);
659 writel_relaxed(val, qm->io_base + HPRE_QM_ABNML_INT_MASK);
660
661 if (qm->ver >= QM_HW_V3)
662 writel(HPRE_RSA_ENB | HPRE_ECC_ENB,
663 qm->io_base + HPRE_TYPES_ENB);
664 else
665 writel(HPRE_RSA_ENB, qm->io_base + HPRE_TYPES_ENB);
666
667 writel(HPRE_QM_VFG_AX_MASK, qm->io_base + HPRE_VFG_AXCACHE);
668 writel(0x0, qm->io_base + HPRE_BD_ENDIAN);
669 writel(0x0, qm->io_base + HPRE_INT_MASK);
670 writel(0x0, qm->io_base + HPRE_POISON_BYPASS);
671 writel(0x0, qm->io_base + HPRE_COMM_CNT_CLR_CE);
672 writel(0x0, qm->io_base + HPRE_ECC_BYPASS);
673
674 writel(HPRE_BD_USR_MASK, qm->io_base + HPRE_BD_ARUSR_CFG);
675 writel(HPRE_BD_USR_MASK, qm->io_base + HPRE_BD_AWUSR_CFG);
676 writel(0x1, qm->io_base + HPRE_RDCHN_INI_CFG);
677 ret = readl_relaxed_poll_timeout(qm->io_base + HPRE_RDCHN_INI_ST, val,
678 val & BIT(0),
679 HPRE_REG_RD_INTVRL_US,
680 HPRE_REG_RD_TMOUT_US);
681 if (ret) {
682 dev_err(dev, "read rd channel timeout fail!\n");
683 return -ETIMEDOUT;
684 }
685
686 ret = hpre_set_cluster(qm);
687 if (ret)
688 return -ETIMEDOUT;
689
690 /* This setting is only needed by Kunpeng 920. */
691 if (qm->ver == QM_HW_V2) {
692 ret = hpre_cfg_by_dsm(qm);
693 if (ret)
694 return ret;
695
696 disable_flr_of_bme(qm);
697 }
698
699 /* Config data buffer pasid needed by Kunpeng 920 */
700 hpre_config_pasid(qm);
701
702 hpre_enable_clock_gate(qm);
703
704 return ret;
705 }
706
hpre_cnt_regs_clear(struct hisi_qm * qm)707 static void hpre_cnt_regs_clear(struct hisi_qm *qm)
708 {
709 unsigned long offset;
710 u8 clusters_num;
711 int i;
712
713 /* clear clusterX/cluster_ctrl */
714 clusters_num = qm->cap_tables.dev_cap_table[HPRE_CLUSTER_NUM_CAP_IDX].cap_val;
715 for (i = 0; i < clusters_num; i++) {
716 offset = HPRE_CLSTR_BASE + i * HPRE_CLSTR_ADDR_INTRVL;
717 writel(0x0, qm->io_base + offset + HPRE_CLUSTER_INQURY);
718 }
719
720 /* clear rdclr_en */
721 writel(0x0, qm->io_base + HPRE_CTRL_CNT_CLR_CE);
722
723 hisi_qm_debug_regs_clear(qm);
724 }
725
hpre_master_ooo_ctrl(struct hisi_qm * qm,bool enable)726 static void hpre_master_ooo_ctrl(struct hisi_qm *qm, bool enable)
727 {
728 u32 val1, val2;
729
730 val1 = readl(qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB);
731 if (enable) {
732 val1 |= HPRE_AM_OOO_SHUTDOWN_ENABLE;
733 val2 = hisi_qm_get_hw_info(qm, hpre_basic_info,
734 HPRE_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver);
735 } else {
736 val1 &= ~HPRE_AM_OOO_SHUTDOWN_ENABLE;
737 val2 = 0x0;
738 }
739
740 if (qm->ver > QM_HW_V2)
741 writel(val2, qm->io_base + HPRE_OOO_SHUTDOWN_SEL);
742
743 writel(val1, qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB);
744 }
745
hpre_hw_error_disable(struct hisi_qm * qm)746 static void hpre_hw_error_disable(struct hisi_qm *qm)
747 {
748 u32 ce, nfe;
749
750 ce = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_CE_MASK_CAP, qm->cap_ver);
751 nfe = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_NFE_MASK_CAP, qm->cap_ver);
752
753 /* disable hpre hw error interrupts */
754 writel(ce | nfe | HPRE_HAC_RAS_FE_ENABLE, qm->io_base + HPRE_INT_MASK);
755 /* disable HPRE block master OOO when nfe occurs on Kunpeng930 */
756 hpre_master_ooo_ctrl(qm, false);
757 }
758
hpre_hw_error_enable(struct hisi_qm * qm)759 static void hpre_hw_error_enable(struct hisi_qm *qm)
760 {
761 u32 ce, nfe;
762
763 ce = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_CE_MASK_CAP, qm->cap_ver);
764 nfe = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_NFE_MASK_CAP, qm->cap_ver);
765
766 /* clear HPRE hw error source if having */
767 writel(ce | nfe | HPRE_HAC_RAS_FE_ENABLE, qm->io_base + HPRE_HAC_SOURCE_INT);
768
769 /* configure error type */
770 writel(ce, qm->io_base + HPRE_RAS_CE_ENB);
771 writel(nfe, qm->io_base + HPRE_RAS_NFE_ENB);
772 writel(HPRE_HAC_RAS_FE_ENABLE, qm->io_base + HPRE_RAS_FE_ENB);
773
774 /* enable HPRE block master OOO when nfe occurs on Kunpeng930 */
775 hpre_master_ooo_ctrl(qm, true);
776
777 /* enable hpre hw error interrupts */
778 writel(HPRE_CORE_INT_ENABLE, qm->io_base + HPRE_INT_MASK);
779 }
780
hpre_file_to_qm(struct hpre_debugfs_file * file)781 static inline struct hisi_qm *hpre_file_to_qm(struct hpre_debugfs_file *file)
782 {
783 struct hpre *hpre = container_of(file->debug, struct hpre, debug);
784
785 return &hpre->qm;
786 }
787
hpre_clear_enable_read(struct hpre_debugfs_file * file)788 static u32 hpre_clear_enable_read(struct hpre_debugfs_file *file)
789 {
790 struct hisi_qm *qm = hpre_file_to_qm(file);
791
792 return readl(qm->io_base + HPRE_CTRL_CNT_CLR_CE) &
793 HPRE_CTRL_CNT_CLR_CE_BIT;
794 }
795
hpre_clear_enable_write(struct hpre_debugfs_file * file,u32 val)796 static int hpre_clear_enable_write(struct hpre_debugfs_file *file, u32 val)
797 {
798 struct hisi_qm *qm = hpre_file_to_qm(file);
799 u32 tmp;
800
801 if (val != 1 && val != 0)
802 return -EINVAL;
803
804 tmp = (readl(qm->io_base + HPRE_CTRL_CNT_CLR_CE) &
805 ~HPRE_CTRL_CNT_CLR_CE_BIT) | val;
806 writel(tmp, qm->io_base + HPRE_CTRL_CNT_CLR_CE);
807
808 return 0;
809 }
810
hpre_cluster_inqry_read(struct hpre_debugfs_file * file)811 static u32 hpre_cluster_inqry_read(struct hpre_debugfs_file *file)
812 {
813 struct hisi_qm *qm = hpre_file_to_qm(file);
814 int cluster_index = file->index - HPRE_CLUSTER_CTRL;
815 unsigned long offset = HPRE_CLSTR_BASE +
816 cluster_index * HPRE_CLSTR_ADDR_INTRVL;
817
818 return readl(qm->io_base + offset + HPRE_CLSTR_ADDR_INQRY_RSLT);
819 }
820
hpre_cluster_inqry_write(struct hpre_debugfs_file * file,u32 val)821 static void hpre_cluster_inqry_write(struct hpre_debugfs_file *file, u32 val)
822 {
823 struct hisi_qm *qm = hpre_file_to_qm(file);
824 int cluster_index = file->index - HPRE_CLUSTER_CTRL;
825 unsigned long offset = HPRE_CLSTR_BASE + cluster_index *
826 HPRE_CLSTR_ADDR_INTRVL;
827
828 writel(val, qm->io_base + offset + HPRE_CLUSTER_INQURY);
829 }
830
hpre_ctrl_debug_read(struct file * filp,char __user * buf,size_t count,loff_t * pos)831 static ssize_t hpre_ctrl_debug_read(struct file *filp, char __user *buf,
832 size_t count, loff_t *pos)
833 {
834 struct hpre_debugfs_file *file = filp->private_data;
835 struct hisi_qm *qm = hpre_file_to_qm(file);
836 char tbuf[HPRE_DBGFS_VAL_MAX_LEN];
837 u32 val;
838 int ret;
839
840 ret = hisi_qm_get_dfx_access(qm);
841 if (ret)
842 return ret;
843
844 spin_lock_irq(&file->lock);
845 switch (file->type) {
846 case HPRE_CLEAR_ENABLE:
847 val = hpre_clear_enable_read(file);
848 break;
849 case HPRE_CLUSTER_CTRL:
850 val = hpre_cluster_inqry_read(file);
851 break;
852 default:
853 goto err_input;
854 }
855 spin_unlock_irq(&file->lock);
856
857 hisi_qm_put_dfx_access(qm);
858 ret = snprintf(tbuf, HPRE_DBGFS_VAL_MAX_LEN, "%u\n", val);
859 return simple_read_from_buffer(buf, count, pos, tbuf, ret);
860
861 err_input:
862 spin_unlock_irq(&file->lock);
863 hisi_qm_put_dfx_access(qm);
864 return -EINVAL;
865 }
866
hpre_ctrl_debug_write(struct file * filp,const char __user * buf,size_t count,loff_t * pos)867 static ssize_t hpre_ctrl_debug_write(struct file *filp, const char __user *buf,
868 size_t count, loff_t *pos)
869 {
870 struct hpre_debugfs_file *file = filp->private_data;
871 struct hisi_qm *qm = hpre_file_to_qm(file);
872 char tbuf[HPRE_DBGFS_VAL_MAX_LEN];
873 unsigned long val;
874 int len, ret;
875
876 if (*pos != 0)
877 return 0;
878
879 if (count >= HPRE_DBGFS_VAL_MAX_LEN)
880 return -ENOSPC;
881
882 len = simple_write_to_buffer(tbuf, HPRE_DBGFS_VAL_MAX_LEN - 1,
883 pos, buf, count);
884 if (len < 0)
885 return len;
886
887 tbuf[len] = '\0';
888 if (kstrtoul(tbuf, 0, &val))
889 return -EFAULT;
890
891 ret = hisi_qm_get_dfx_access(qm);
892 if (ret)
893 return ret;
894
895 spin_lock_irq(&file->lock);
896 switch (file->type) {
897 case HPRE_CLEAR_ENABLE:
898 ret = hpre_clear_enable_write(file, val);
899 if (ret)
900 goto err_input;
901 break;
902 case HPRE_CLUSTER_CTRL:
903 hpre_cluster_inqry_write(file, val);
904 break;
905 default:
906 ret = -EINVAL;
907 goto err_input;
908 }
909
910 ret = count;
911
912 err_input:
913 spin_unlock_irq(&file->lock);
914 hisi_qm_put_dfx_access(qm);
915 return ret;
916 }
917
918 static const struct file_operations hpre_ctrl_debug_fops = {
919 .owner = THIS_MODULE,
920 .open = simple_open,
921 .read = hpre_ctrl_debug_read,
922 .write = hpre_ctrl_debug_write,
923 };
924
hpre_debugfs_atomic64_get(void * data,u64 * val)925 static int hpre_debugfs_atomic64_get(void *data, u64 *val)
926 {
927 struct hpre_dfx *dfx_item = data;
928
929 *val = atomic64_read(&dfx_item->value);
930
931 return 0;
932 }
933
hpre_debugfs_atomic64_set(void * data,u64 val)934 static int hpre_debugfs_atomic64_set(void *data, u64 val)
935 {
936 struct hpre_dfx *dfx_item = data;
937 struct hpre_dfx *hpre_dfx = NULL;
938
939 if (dfx_item->type == HPRE_OVERTIME_THRHLD) {
940 hpre_dfx = dfx_item - HPRE_OVERTIME_THRHLD;
941 atomic64_set(&hpre_dfx[HPRE_OVER_THRHLD_CNT].value, 0);
942 } else if (val) {
943 return -EINVAL;
944 }
945
946 atomic64_set(&dfx_item->value, val);
947
948 return 0;
949 }
950
951 DEFINE_DEBUGFS_ATTRIBUTE(hpre_atomic64_ops, hpre_debugfs_atomic64_get,
952 hpre_debugfs_atomic64_set, "%llu\n");
953
hpre_create_debugfs_file(struct hisi_qm * qm,struct dentry * dir,enum hpre_ctrl_dbgfs_file type,int indx)954 static int hpre_create_debugfs_file(struct hisi_qm *qm, struct dentry *dir,
955 enum hpre_ctrl_dbgfs_file type, int indx)
956 {
957 struct hpre *hpre = container_of(qm, struct hpre, qm);
958 struct hpre_debug *dbg = &hpre->debug;
959 struct dentry *file_dir;
960
961 if (dir)
962 file_dir = dir;
963 else
964 file_dir = qm->debug.debug_root;
965
966 if (type >= HPRE_DEBUG_FILE_NUM)
967 return -EINVAL;
968
969 spin_lock_init(&dbg->files[indx].lock);
970 dbg->files[indx].debug = dbg;
971 dbg->files[indx].type = type;
972 dbg->files[indx].index = indx;
973 debugfs_create_file(hpre_debug_file_name[type], 0600, file_dir,
974 dbg->files + indx, &hpre_ctrl_debug_fops);
975
976 return 0;
977 }
978
hpre_pf_comm_regs_debugfs_init(struct hisi_qm * qm)979 static int hpre_pf_comm_regs_debugfs_init(struct hisi_qm *qm)
980 {
981 struct device *dev = &qm->pdev->dev;
982 struct debugfs_regset32 *regset;
983
984 regset = devm_kzalloc(dev, sizeof(*regset), GFP_KERNEL);
985 if (!regset)
986 return -ENOMEM;
987
988 regset->regs = hpre_com_dfx_regs;
989 regset->nregs = ARRAY_SIZE(hpre_com_dfx_regs);
990 regset->base = qm->io_base;
991 regset->dev = dev;
992
993 debugfs_create_file("regs", 0444, qm->debug.debug_root,
994 regset, &hpre_com_regs_fops);
995
996 return 0;
997 }
998
hpre_cluster_debugfs_init(struct hisi_qm * qm)999 static int hpre_cluster_debugfs_init(struct hisi_qm *qm)
1000 {
1001 struct device *dev = &qm->pdev->dev;
1002 char buf[HPRE_DBGFS_VAL_MAX_LEN];
1003 struct debugfs_regset32 *regset;
1004 struct dentry *tmp_d;
1005 u8 clusters_num;
1006 int i, ret;
1007
1008 clusters_num = qm->cap_tables.dev_cap_table[HPRE_CLUSTER_NUM_CAP_IDX].cap_val;
1009 for (i = 0; i < clusters_num; i++) {
1010 ret = snprintf(buf, HPRE_DBGFS_VAL_MAX_LEN, "cluster%d", i);
1011 if (ret >= HPRE_DBGFS_VAL_MAX_LEN)
1012 return -EINVAL;
1013 tmp_d = debugfs_create_dir(buf, qm->debug.debug_root);
1014
1015 regset = devm_kzalloc(dev, sizeof(*regset), GFP_KERNEL);
1016 if (!regset)
1017 return -ENOMEM;
1018
1019 regset->regs = hpre_cluster_dfx_regs;
1020 regset->nregs = ARRAY_SIZE(hpre_cluster_dfx_regs);
1021 regset->base = qm->io_base + hpre_cluster_offsets[i];
1022 regset->dev = dev;
1023
1024 debugfs_create_file("regs", 0444, tmp_d, regset,
1025 &hpre_cluster_regs_fops);
1026 ret = hpre_create_debugfs_file(qm, tmp_d, HPRE_CLUSTER_CTRL,
1027 i + HPRE_CLUSTER_CTRL);
1028 if (ret)
1029 return ret;
1030 }
1031
1032 return 0;
1033 }
1034
hpre_ctrl_debug_init(struct hisi_qm * qm)1035 static int hpre_ctrl_debug_init(struct hisi_qm *qm)
1036 {
1037 int ret;
1038
1039 ret = hpre_create_debugfs_file(qm, NULL, HPRE_CLEAR_ENABLE,
1040 HPRE_CLEAR_ENABLE);
1041 if (ret)
1042 return ret;
1043
1044 ret = hpre_pf_comm_regs_debugfs_init(qm);
1045 if (ret)
1046 return ret;
1047
1048 return hpre_cluster_debugfs_init(qm);
1049 }
1050
hpre_dfx_debug_init(struct hisi_qm * qm)1051 static void hpre_dfx_debug_init(struct hisi_qm *qm)
1052 {
1053 struct dfx_diff_registers *hpre_regs = qm->debug.acc_diff_regs;
1054 struct hpre *hpre = container_of(qm, struct hpre, qm);
1055 struct hpre_dfx *dfx = hpre->debug.dfx;
1056 struct dentry *parent;
1057 int i;
1058
1059 parent = debugfs_create_dir("hpre_dfx", qm->debug.debug_root);
1060 for (i = 0; i < HPRE_DFX_FILE_NUM; i++) {
1061 dfx[i].type = i;
1062 debugfs_create_file(hpre_dfx_files[i], 0644, parent, &dfx[i],
1063 &hpre_atomic64_ops);
1064 }
1065
1066 if (qm->fun_type == QM_HW_PF && hpre_regs)
1067 debugfs_create_file("diff_regs", 0444, parent,
1068 qm, &hpre_diff_regs_fops);
1069 }
1070
hpre_debugfs_init(struct hisi_qm * qm)1071 static int hpre_debugfs_init(struct hisi_qm *qm)
1072 {
1073 struct device *dev = &qm->pdev->dev;
1074 int ret;
1075
1076 qm->debug.debug_root = debugfs_create_dir(dev_name(dev),
1077 hpre_debugfs_root);
1078
1079 qm->debug.sqe_mask_offset = HPRE_SQE_MASK_OFFSET;
1080 qm->debug.sqe_mask_len = HPRE_SQE_MASK_LEN;
1081 ret = hisi_qm_regs_debugfs_init(qm, hpre_diff_regs, ARRAY_SIZE(hpre_diff_regs));
1082 if (ret) {
1083 dev_warn(dev, "Failed to init HPRE diff regs!\n");
1084 goto debugfs_remove;
1085 }
1086
1087 hisi_qm_debug_init(qm);
1088
1089 if (qm->pdev->device == PCI_DEVICE_ID_HUAWEI_HPRE_PF) {
1090 ret = hpre_ctrl_debug_init(qm);
1091 if (ret)
1092 goto failed_to_create;
1093 }
1094
1095 hpre_dfx_debug_init(qm);
1096
1097 return 0;
1098
1099 failed_to_create:
1100 hisi_qm_regs_debugfs_uninit(qm, ARRAY_SIZE(hpre_diff_regs));
1101 debugfs_remove:
1102 debugfs_remove_recursive(qm->debug.debug_root);
1103 return ret;
1104 }
1105
hpre_debugfs_exit(struct hisi_qm * qm)1106 static void hpre_debugfs_exit(struct hisi_qm *qm)
1107 {
1108 hisi_qm_regs_debugfs_uninit(qm, ARRAY_SIZE(hpre_diff_regs));
1109
1110 debugfs_remove_recursive(qm->debug.debug_root);
1111 }
1112
hpre_pre_store_cap_reg(struct hisi_qm * qm)1113 static int hpre_pre_store_cap_reg(struct hisi_qm *qm)
1114 {
1115 struct hisi_qm_cap_record *hpre_cap;
1116 struct device *dev = &qm->pdev->dev;
1117 size_t i, size;
1118
1119 size = ARRAY_SIZE(hpre_pre_store_caps);
1120 hpre_cap = devm_kzalloc(dev, sizeof(*hpre_cap) * size, GFP_KERNEL);
1121 if (!hpre_cap)
1122 return -ENOMEM;
1123
1124 for (i = 0; i < size; i++) {
1125 hpre_cap[i].type = hpre_pre_store_caps[i];
1126 hpre_cap[i].cap_val = hisi_qm_get_hw_info(qm, hpre_basic_info,
1127 hpre_pre_store_caps[i], qm->cap_ver);
1128 }
1129
1130 if (hpre_cap[HPRE_CLUSTER_NUM_CAP_IDX].cap_val > HPRE_CLUSTERS_NUM_MAX) {
1131 dev_err(dev, "Device cluster num %u is out of range for driver supports %d!\n",
1132 hpre_cap[HPRE_CLUSTER_NUM_CAP_IDX].cap_val, HPRE_CLUSTERS_NUM_MAX);
1133 return -EINVAL;
1134 }
1135
1136 qm->cap_tables.dev_cap_table = hpre_cap;
1137
1138 return 0;
1139 }
1140
hpre_qm_init(struct hisi_qm * qm,struct pci_dev * pdev)1141 static int hpre_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
1142 {
1143 u64 alg_msk;
1144 int ret;
1145
1146 if (pdev->revision == QM_HW_V1) {
1147 pci_warn(pdev, "HPRE version 1 is not supported!\n");
1148 return -EINVAL;
1149 }
1150
1151 qm->mode = uacce_mode;
1152 qm->pdev = pdev;
1153 qm->ver = pdev->revision;
1154 qm->sqe_size = HPRE_SQE_SIZE;
1155 qm->dev_name = hpre_name;
1156
1157 qm->fun_type = (pdev->device == PCI_DEVICE_ID_HUAWEI_HPRE_PF) ?
1158 QM_HW_PF : QM_HW_VF;
1159 if (qm->fun_type == QM_HW_PF) {
1160 qm->qp_base = HPRE_PF_DEF_Q_BASE;
1161 qm->qp_num = pf_q_num;
1162 qm->debug.curr_qm_qp_num = pf_q_num;
1163 qm->qm_list = &hpre_devices;
1164 if (pf_q_num_flag)
1165 set_bit(QM_MODULE_PARAM, &qm->misc_ctl);
1166 }
1167
1168 ret = hisi_qm_init(qm);
1169 if (ret) {
1170 pci_err(pdev, "Failed to init hpre qm configures!\n");
1171 return ret;
1172 }
1173
1174 /* Fetch and save the value of capability registers */
1175 ret = hpre_pre_store_cap_reg(qm);
1176 if (ret) {
1177 pci_err(pdev, "Failed to pre-store capability registers!\n");
1178 hisi_qm_uninit(qm);
1179 return ret;
1180 }
1181
1182 alg_msk = qm->cap_tables.dev_cap_table[HPRE_DEV_ALG_BITMAP_CAP_IDX].cap_val;
1183 ret = hisi_qm_set_algs(qm, alg_msk, hpre_dev_algs, ARRAY_SIZE(hpre_dev_algs));
1184 if (ret) {
1185 pci_err(pdev, "Failed to set hpre algs!\n");
1186 hisi_qm_uninit(qm);
1187 }
1188
1189 return ret;
1190 }
1191
hpre_show_last_regs_init(struct hisi_qm * qm)1192 static int hpre_show_last_regs_init(struct hisi_qm *qm)
1193 {
1194 int cluster_dfx_regs_num = ARRAY_SIZE(hpre_cluster_dfx_regs);
1195 int com_dfx_regs_num = ARRAY_SIZE(hpre_com_dfx_regs);
1196 struct qm_debug *debug = &qm->debug;
1197 void __iomem *io_base;
1198 u8 clusters_num;
1199 int i, j, idx;
1200
1201 clusters_num = qm->cap_tables.dev_cap_table[HPRE_CLUSTER_NUM_CAP_IDX].cap_val;
1202 debug->last_words = kcalloc(cluster_dfx_regs_num * clusters_num +
1203 com_dfx_regs_num, sizeof(unsigned int), GFP_KERNEL);
1204 if (!debug->last_words)
1205 return -ENOMEM;
1206
1207 for (i = 0; i < com_dfx_regs_num; i++)
1208 debug->last_words[i] = readl_relaxed(qm->io_base +
1209 hpre_com_dfx_regs[i].offset);
1210
1211 for (i = 0; i < clusters_num; i++) {
1212 io_base = qm->io_base + hpre_cluster_offsets[i];
1213 for (j = 0; j < cluster_dfx_regs_num; j++) {
1214 idx = com_dfx_regs_num + i * cluster_dfx_regs_num + j;
1215 debug->last_words[idx] = readl_relaxed(
1216 io_base + hpre_cluster_dfx_regs[j].offset);
1217 }
1218 }
1219
1220 return 0;
1221 }
1222
hpre_show_last_regs_uninit(struct hisi_qm * qm)1223 static void hpre_show_last_regs_uninit(struct hisi_qm *qm)
1224 {
1225 struct qm_debug *debug = &qm->debug;
1226
1227 if (qm->fun_type == QM_HW_VF || !debug->last_words)
1228 return;
1229
1230 kfree(debug->last_words);
1231 debug->last_words = NULL;
1232 }
1233
hpre_show_last_dfx_regs(struct hisi_qm * qm)1234 static void hpre_show_last_dfx_regs(struct hisi_qm *qm)
1235 {
1236 int cluster_dfx_regs_num = ARRAY_SIZE(hpre_cluster_dfx_regs);
1237 int com_dfx_regs_num = ARRAY_SIZE(hpre_com_dfx_regs);
1238 struct qm_debug *debug = &qm->debug;
1239 struct pci_dev *pdev = qm->pdev;
1240 void __iomem *io_base;
1241 u8 clusters_num;
1242 int i, j, idx;
1243 u32 val;
1244
1245 if (qm->fun_type == QM_HW_VF || !debug->last_words)
1246 return;
1247
1248 /* dumps last word of the debugging registers during controller reset */
1249 for (i = 0; i < com_dfx_regs_num; i++) {
1250 val = readl_relaxed(qm->io_base + hpre_com_dfx_regs[i].offset);
1251 if (debug->last_words[i] != val)
1252 pci_info(pdev, "Common_core:%s \t= 0x%08x => 0x%08x\n",
1253 hpre_com_dfx_regs[i].name, debug->last_words[i], val);
1254 }
1255
1256 clusters_num = qm->cap_tables.dev_cap_table[HPRE_CLUSTER_NUM_CAP_IDX].cap_val;
1257 for (i = 0; i < clusters_num; i++) {
1258 io_base = qm->io_base + hpre_cluster_offsets[i];
1259 for (j = 0; j < cluster_dfx_regs_num; j++) {
1260 val = readl_relaxed(io_base +
1261 hpre_cluster_dfx_regs[j].offset);
1262 idx = com_dfx_regs_num + i * cluster_dfx_regs_num + j;
1263 if (debug->last_words[idx] != val)
1264 pci_info(pdev, "cluster-%d:%s \t= 0x%08x => 0x%08x\n",
1265 i, hpre_cluster_dfx_regs[j].name, debug->last_words[idx], val);
1266 }
1267 }
1268 }
1269
hpre_log_hw_error(struct hisi_qm * qm,u32 err_sts)1270 static void hpre_log_hw_error(struct hisi_qm *qm, u32 err_sts)
1271 {
1272 const struct hpre_hw_error *err = hpre_hw_errors;
1273 struct device *dev = &qm->pdev->dev;
1274
1275 while (err->msg) {
1276 if (err->int_msk & err_sts)
1277 dev_warn(dev, "%s [error status=0x%x] found\n",
1278 err->msg, err->int_msk);
1279 err++;
1280 }
1281 }
1282
hpre_get_hw_err_status(struct hisi_qm * qm)1283 static u32 hpre_get_hw_err_status(struct hisi_qm *qm)
1284 {
1285 return readl(qm->io_base + HPRE_INT_STATUS);
1286 }
1287
hpre_clear_hw_err_status(struct hisi_qm * qm,u32 err_sts)1288 static void hpre_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts)
1289 {
1290 u32 nfe;
1291
1292 writel(err_sts, qm->io_base + HPRE_HAC_SOURCE_INT);
1293 nfe = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_NFE_MASK_CAP, qm->cap_ver);
1294 writel(nfe, qm->io_base + HPRE_RAS_NFE_ENB);
1295 }
1296
hpre_open_axi_master_ooo(struct hisi_qm * qm)1297 static void hpre_open_axi_master_ooo(struct hisi_qm *qm)
1298 {
1299 u32 value;
1300
1301 value = readl(qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB);
1302 writel(value & ~HPRE_AM_OOO_SHUTDOWN_ENABLE,
1303 qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB);
1304 writel(value | HPRE_AM_OOO_SHUTDOWN_ENABLE,
1305 qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB);
1306 }
1307
hpre_err_info_init(struct hisi_qm * qm)1308 static void hpre_err_info_init(struct hisi_qm *qm)
1309 {
1310 struct hisi_qm_err_info *err_info = &qm->err_info;
1311
1312 err_info->fe = HPRE_HAC_RAS_FE_ENABLE;
1313 err_info->ce = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_QM_CE_MASK_CAP, qm->cap_ver);
1314 err_info->nfe = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_QM_NFE_MASK_CAP, qm->cap_ver);
1315 err_info->ecc_2bits_mask = HPRE_CORE_ECC_2BIT_ERR | HPRE_OOO_ECC_2BIT_ERR;
1316 err_info->dev_shutdown_mask = hisi_qm_get_hw_info(qm, hpre_basic_info,
1317 HPRE_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver);
1318 err_info->qm_shutdown_mask = hisi_qm_get_hw_info(qm, hpre_basic_info,
1319 HPRE_QM_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver);
1320 err_info->qm_reset_mask = hisi_qm_get_hw_info(qm, hpre_basic_info,
1321 HPRE_QM_RESET_MASK_CAP, qm->cap_ver);
1322 err_info->dev_reset_mask = hisi_qm_get_hw_info(qm, hpre_basic_info,
1323 HPRE_RESET_MASK_CAP, qm->cap_ver);
1324 err_info->msi_wr_port = HPRE_WR_MSI_PORT;
1325 err_info->acpi_rst = "HRST";
1326 }
1327
1328 static const struct hisi_qm_err_ini hpre_err_ini = {
1329 .hw_init = hpre_set_user_domain_and_cache,
1330 .hw_err_enable = hpre_hw_error_enable,
1331 .hw_err_disable = hpre_hw_error_disable,
1332 .get_dev_hw_err_status = hpre_get_hw_err_status,
1333 .clear_dev_hw_err_status = hpre_clear_hw_err_status,
1334 .log_dev_hw_err = hpre_log_hw_error,
1335 .open_axi_master_ooo = hpre_open_axi_master_ooo,
1336 .open_sva_prefetch = hpre_open_sva_prefetch,
1337 .close_sva_prefetch = hpre_close_sva_prefetch,
1338 .show_last_dfx_regs = hpre_show_last_dfx_regs,
1339 .err_info_init = hpre_err_info_init,
1340 };
1341
hpre_pf_probe_init(struct hpre * hpre)1342 static int hpre_pf_probe_init(struct hpre *hpre)
1343 {
1344 struct hisi_qm *qm = &hpre->qm;
1345 int ret;
1346
1347 ret = hpre_set_user_domain_and_cache(qm);
1348 if (ret)
1349 return ret;
1350
1351 hpre_open_sva_prefetch(qm);
1352
1353 qm->err_ini = &hpre_err_ini;
1354 qm->err_ini->err_info_init(qm);
1355 hisi_qm_dev_err_init(qm);
1356 ret = hpre_show_last_regs_init(qm);
1357 if (ret)
1358 pci_err(qm->pdev, "Failed to init last word regs!\n");
1359
1360 return ret;
1361 }
1362
hpre_probe_init(struct hpre * hpre)1363 static int hpre_probe_init(struct hpre *hpre)
1364 {
1365 u32 type_rate = HPRE_SHAPER_TYPE_RATE;
1366 struct hisi_qm *qm = &hpre->qm;
1367 int ret;
1368
1369 if (qm->fun_type == QM_HW_PF) {
1370 ret = hpre_pf_probe_init(hpre);
1371 if (ret)
1372 return ret;
1373 /* Enable shaper type 0 */
1374 if (qm->ver >= QM_HW_V3) {
1375 type_rate |= QM_SHAPER_ENABLE;
1376 qm->type_rate = type_rate;
1377 }
1378 }
1379
1380 return 0;
1381 }
1382
hpre_probe(struct pci_dev * pdev,const struct pci_device_id * id)1383 static int hpre_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1384 {
1385 struct hisi_qm *qm;
1386 struct hpre *hpre;
1387 int ret;
1388
1389 hpre = devm_kzalloc(&pdev->dev, sizeof(*hpre), GFP_KERNEL);
1390 if (!hpre)
1391 return -ENOMEM;
1392
1393 qm = &hpre->qm;
1394 ret = hpre_qm_init(qm, pdev);
1395 if (ret) {
1396 pci_err(pdev, "Failed to init HPRE QM (%d)!\n", ret);
1397 return ret;
1398 }
1399
1400 ret = hpre_probe_init(hpre);
1401 if (ret) {
1402 pci_err(pdev, "Failed to probe (%d)!\n", ret);
1403 goto err_with_qm_init;
1404 }
1405
1406 ret = hisi_qm_start(qm);
1407 if (ret)
1408 goto err_with_err_init;
1409
1410 ret = hpre_debugfs_init(qm);
1411 if (ret)
1412 dev_warn(&pdev->dev, "init debugfs fail!\n");
1413
1414 ret = hisi_qm_alg_register(qm, &hpre_devices);
1415 if (ret < 0) {
1416 pci_err(pdev, "fail to register algs to crypto!\n");
1417 goto err_with_qm_start;
1418 }
1419
1420 if (qm->uacce) {
1421 ret = uacce_register(qm->uacce);
1422 if (ret) {
1423 pci_err(pdev, "failed to register uacce (%d)!\n", ret);
1424 goto err_with_alg_register;
1425 }
1426 }
1427
1428 if (qm->fun_type == QM_HW_PF && vfs_num) {
1429 ret = hisi_qm_sriov_enable(pdev, vfs_num);
1430 if (ret < 0)
1431 goto err_with_alg_register;
1432 }
1433
1434 hisi_qm_pm_init(qm);
1435
1436 return 0;
1437
1438 err_with_alg_register:
1439 hisi_qm_alg_unregister(qm, &hpre_devices);
1440
1441 err_with_qm_start:
1442 hpre_debugfs_exit(qm);
1443 hisi_qm_stop(qm, QM_NORMAL);
1444
1445 err_with_err_init:
1446 hpre_show_last_regs_uninit(qm);
1447 hisi_qm_dev_err_uninit(qm);
1448
1449 err_with_qm_init:
1450 hisi_qm_uninit(qm);
1451
1452 return ret;
1453 }
1454
hpre_remove(struct pci_dev * pdev)1455 static void hpre_remove(struct pci_dev *pdev)
1456 {
1457 struct hisi_qm *qm = pci_get_drvdata(pdev);
1458
1459 hisi_qm_pm_uninit(qm);
1460 hisi_qm_wait_task_finish(qm, &hpre_devices);
1461 hisi_qm_alg_unregister(qm, &hpre_devices);
1462 if (qm->fun_type == QM_HW_PF && qm->vfs_num)
1463 hisi_qm_sriov_disable(pdev, true);
1464
1465 hpre_debugfs_exit(qm);
1466 hisi_qm_stop(qm, QM_NORMAL);
1467
1468 if (qm->fun_type == QM_HW_PF) {
1469 hpre_cnt_regs_clear(qm);
1470 qm->debug.curr_qm_qp_num = 0;
1471 hpre_show_last_regs_uninit(qm);
1472 hisi_qm_dev_err_uninit(qm);
1473 }
1474
1475 hisi_qm_uninit(qm);
1476 }
1477
1478 static const struct dev_pm_ops hpre_pm_ops = {
1479 SET_RUNTIME_PM_OPS(hisi_qm_suspend, hisi_qm_resume, NULL)
1480 };
1481
1482 static const struct pci_error_handlers hpre_err_handler = {
1483 .error_detected = hisi_qm_dev_err_detected,
1484 .slot_reset = hisi_qm_dev_slot_reset,
1485 .reset_prepare = hisi_qm_reset_prepare,
1486 .reset_done = hisi_qm_reset_done,
1487 };
1488
1489 static struct pci_driver hpre_pci_driver = {
1490 .name = hpre_name,
1491 .id_table = hpre_dev_ids,
1492 .probe = hpre_probe,
1493 .remove = hpre_remove,
1494 .sriov_configure = IS_ENABLED(CONFIG_PCI_IOV) ?
1495 hisi_qm_sriov_configure : NULL,
1496 .err_handler = &hpre_err_handler,
1497 .shutdown = hisi_qm_dev_shutdown,
1498 .driver.pm = &hpre_pm_ops,
1499 };
1500
hisi_hpre_get_pf_driver(void)1501 struct pci_driver *hisi_hpre_get_pf_driver(void)
1502 {
1503 return &hpre_pci_driver;
1504 }
1505 EXPORT_SYMBOL_GPL(hisi_hpre_get_pf_driver);
1506
hpre_register_debugfs(void)1507 static void hpre_register_debugfs(void)
1508 {
1509 if (!debugfs_initialized())
1510 return;
1511
1512 hpre_debugfs_root = debugfs_create_dir(hpre_name, NULL);
1513 }
1514
hpre_unregister_debugfs(void)1515 static void hpre_unregister_debugfs(void)
1516 {
1517 debugfs_remove_recursive(hpre_debugfs_root);
1518 }
1519
hpre_init(void)1520 static int __init hpre_init(void)
1521 {
1522 int ret;
1523
1524 hisi_qm_init_list(&hpre_devices);
1525 hpre_register_debugfs();
1526
1527 ret = pci_register_driver(&hpre_pci_driver);
1528 if (ret) {
1529 hpre_unregister_debugfs();
1530 pr_err("hpre: can't register hisi hpre driver.\n");
1531 }
1532
1533 return ret;
1534 }
1535
hpre_exit(void)1536 static void __exit hpre_exit(void)
1537 {
1538 pci_unregister_driver(&hpre_pci_driver);
1539 hpre_unregister_debugfs();
1540 }
1541
1542 module_init(hpre_init);
1543 module_exit(hpre_exit);
1544
1545 MODULE_LICENSE("GPL v2");
1546 MODULE_AUTHOR("Zaibo Xu <xuzaibo@huawei.com>");
1547 MODULE_AUTHOR("Meng Yu <yumeng18@huawei.com>");
1548 MODULE_DESCRIPTION("Driver for HiSilicon HPRE accelerator");
1549