1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Hardware Random Number Generator support.
4  * Cavium Thunder, Marvell OcteonTx/Tx2 processor families.
5  *
6  * Copyright (C) 2016 Cavium, Inc.
7  */
8 
9 #include <linux/hw_random.h>
10 #include <linux/io.h>
11 #include <linux/module.h>
12 #include <linux/pci.h>
13 #include <linux/pci_ids.h>
14 
15 #include <asm/arch_timer.h>
16 
17 /* PCI device IDs */
18 #define	PCI_DEVID_CAVIUM_RNG_PF		0xA018
19 #define	PCI_DEVID_CAVIUM_RNG_VF		0xA033
20 
21 #define HEALTH_STATUS_REG		0x38
22 
23 /* RST device info */
24 #define PCI_DEVICE_ID_RST_OTX2		0xA085
25 #define RST_BOOT_REG			0x1600ULL
26 #define CLOCK_BASE_RATE			50000000ULL
27 #define MSEC_TO_NSEC(x)			(x * 1000000)
28 
29 struct cavium_rng {
30 	struct hwrng ops;
31 	void __iomem *result;
32 	void __iomem *pf_regbase;
33 	struct pci_dev *pdev;
34 	u64  clock_rate;
35 	u64  prev_error;
36 	u64  prev_time;
37 };
38 
39 static inline bool is_octeontx(struct pci_dev *pdev)
40 {
41 	if (midr_is_cpu_model_range(read_cpuid_id(), MIDR_THUNDERX_83XX,
42 				    MIDR_CPU_VAR_REV(0, 0),
43 				    MIDR_CPU_VAR_REV(3, 0)) ||
44 	    midr_is_cpu_model_range(read_cpuid_id(), MIDR_THUNDERX_81XX,
45 				    MIDR_CPU_VAR_REV(0, 0),
46 				    MIDR_CPU_VAR_REV(3, 0)) ||
47 	    midr_is_cpu_model_range(read_cpuid_id(), MIDR_THUNDERX,
48 				    MIDR_CPU_VAR_REV(0, 0),
49 				    MIDR_CPU_VAR_REV(3, 0)))
50 		return true;
51 
52 	return false;
53 }
54 
55 static u64 rng_get_coprocessor_clkrate(void)
56 {
57 	u64 ret = CLOCK_BASE_RATE * 16; /* Assume 800Mhz as default */
58 	struct pci_dev *pdev;
59 	void __iomem *base;
60 
61 	pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
62 			      PCI_DEVICE_ID_RST_OTX2, NULL);
63 	if (!pdev)
64 		goto error;
65 
66 	base = pci_ioremap_bar(pdev, 0);
67 	if (!base)
68 		goto error_put_pdev;
69 
70 	/* RST: PNR_MUL * 50Mhz gives clockrate */
71 	ret = CLOCK_BASE_RATE * ((readq(base + RST_BOOT_REG) >> 33) & 0x3F);
72 
73 	iounmap(base);
74 
75 error_put_pdev:
76 	pci_dev_put(pdev);
77 
78 error:
79 	return ret;
80 }
81 
82 static int check_rng_health(struct cavium_rng *rng)
83 {
84 	u64 cur_err, cur_time;
85 	u64 status, cycles;
86 	u64 time_elapsed;
87 
88 
89 	/* Skip checking health for OcteonTx */
90 	if (!rng->pf_regbase)
91 		return 0;
92 
93 	status = readq(rng->pf_regbase + HEALTH_STATUS_REG);
94 	if (status & BIT_ULL(0)) {
95 		dev_err(&rng->pdev->dev, "HWRNG: Startup health test failed\n");
96 		return -EIO;
97 	}
98 
99 	cycles = status >> 1;
100 	if (!cycles)
101 		return 0;
102 
103 	cur_time = arch_timer_read_counter();
104 
105 	/* RNM_HEALTH_STATUS[CYCLES_SINCE_HEALTH_FAILURE]
106 	 * Number of coprocessor cycles times 2 since the last failure.
107 	 * This field doesn't get cleared/updated until another failure.
108 	 */
109 	cycles = cycles / 2;
110 	cur_err = (cycles * 1000000000) / rng->clock_rate; /* In nanosec */
111 
112 	/* Ignore errors that happenned a long time ago, these
113 	 * are most likely false positive errors.
114 	 */
115 	if (cur_err > MSEC_TO_NSEC(10)) {
116 		rng->prev_error = 0;
117 		rng->prev_time = 0;
118 		return 0;
119 	}
120 
121 	if (rng->prev_error) {
122 		/* Calculate time elapsed since last error
123 		 * '1' tick of CNTVCT is 10ns, since it runs at 100Mhz.
124 		 */
125 		time_elapsed = (cur_time - rng->prev_time) * 10;
126 		time_elapsed += rng->prev_error;
127 
128 		/* Check if current error is a new one or the old one itself.
129 		 * If error is a new one then consider there is a persistent
130 		 * issue with entropy, declare hardware failure.
131 		 */
132 		if (cur_err < time_elapsed) {
133 			dev_err(&rng->pdev->dev, "HWRNG failure detected\n");
134 			rng->prev_error = cur_err;
135 			rng->prev_time = cur_time;
136 			return -EIO;
137 		}
138 	}
139 
140 	rng->prev_error = cur_err;
141 	rng->prev_time = cur_time;
142 	return 0;
143 }
144 
145 /* Read data from the RNG unit */
146 static int cavium_rng_read(struct hwrng *rng, void *dat, size_t max, bool wait)
147 {
148 	struct cavium_rng *p = container_of(rng, struct cavium_rng, ops);
149 	unsigned int size = max;
150 	int err = 0;
151 
152 	err = check_rng_health(p);
153 	if (err)
154 		return err;
155 
156 	while (size >= 8) {
157 		*((u64 *)dat) = readq(p->result);
158 		size -= 8;
159 		dat += 8;
160 	}
161 	while (size > 0) {
162 		*((u8 *)dat) = readb(p->result);
163 		size--;
164 		dat++;
165 	}
166 	return max;
167 }
168 
169 static int cavium_map_pf_regs(struct cavium_rng *rng)
170 {
171 	struct pci_dev *pdev;
172 
173 	/* Health status is not supported on 83xx, skip mapping PF CSRs */
174 	if (is_octeontx(rng->pdev)) {
175 		rng->pf_regbase = NULL;
176 		return 0;
177 	}
178 
179 	pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
180 			      PCI_DEVID_CAVIUM_RNG_PF, NULL);
181 	if (!pdev) {
182 		dev_err(&pdev->dev, "Cannot find RNG PF device\n");
183 		return -EIO;
184 	}
185 
186 	rng->pf_regbase = ioremap(pci_resource_start(pdev, 0),
187 				  pci_resource_len(pdev, 0));
188 	if (!rng->pf_regbase) {
189 		dev_err(&pdev->dev, "Failed to map PF CSR region\n");
190 		pci_dev_put(pdev);
191 		return -ENOMEM;
192 	}
193 
194 	pci_dev_put(pdev);
195 
196 	/* Get co-processor clock rate */
197 	rng->clock_rate = rng_get_coprocessor_clkrate();
198 
199 	return 0;
200 }
201 
202 /* Map Cavium RNG to an HWRNG object */
203 static int cavium_rng_probe_vf(struct	pci_dev		*pdev,
204 			 const struct	pci_device_id	*id)
205 {
206 	struct	cavium_rng *rng;
207 	int	ret;
208 
209 	rng = devm_kzalloc(&pdev->dev, sizeof(*rng), GFP_KERNEL);
210 	if (!rng)
211 		return -ENOMEM;
212 
213 	rng->pdev = pdev;
214 
215 	/* Map the RNG result */
216 	rng->result = pcim_iomap(pdev, 0, 0);
217 	if (!rng->result) {
218 		dev_err(&pdev->dev, "Error iomap failed retrieving result.\n");
219 		return -ENOMEM;
220 	}
221 
222 	rng->ops.name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
223 				       "cavium-rng-%s", dev_name(&pdev->dev));
224 	if (!rng->ops.name)
225 		return -ENOMEM;
226 
227 	rng->ops.read    = cavium_rng_read;
228 	rng->ops.quality = 1000;
229 
230 	pci_set_drvdata(pdev, rng);
231 
232 	/* Health status is available only at PF, hence map PF registers. */
233 	ret = cavium_map_pf_regs(rng);
234 	if (ret)
235 		return ret;
236 
237 	ret = devm_hwrng_register(&pdev->dev, &rng->ops);
238 	if (ret) {
239 		dev_err(&pdev->dev, "Error registering device as HWRNG.\n");
240 		return ret;
241 	}
242 
243 	return 0;
244 }
245 
246 /* Remove the VF */
247 static void cavium_rng_remove_vf(struct pci_dev *pdev)
248 {
249 	struct cavium_rng *rng;
250 
251 	rng = pci_get_drvdata(pdev);
252 	iounmap(rng->pf_regbase);
253 }
254 
255 static const struct pci_device_id cavium_rng_vf_id_table[] = {
256 	{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CAVIUM_RNG_VF) },
257 	{ 0, }
258 };
259 MODULE_DEVICE_TABLE(pci, cavium_rng_vf_id_table);
260 
261 static struct pci_driver cavium_rng_vf_driver = {
262 	.name		= "cavium_rng_vf",
263 	.id_table	= cavium_rng_vf_id_table,
264 	.probe		= cavium_rng_probe_vf,
265 	.remove		= cavium_rng_remove_vf,
266 };
267 module_pci_driver(cavium_rng_vf_driver);
268 
269 MODULE_AUTHOR("Omer Khaliq <okhaliq@caviumnetworks.com>");
270 MODULE_LICENSE("GPL v2");
271