xref: /openbmc/linux/drivers/thunderbolt/nvm.c (revision 2612e3bbc0386368a850140a6c9b990cd496a5ec)
1719a5fe8SMika Westerberg // SPDX-License-Identifier: GPL-2.0
2719a5fe8SMika Westerberg /*
3719a5fe8SMika Westerberg  * NVM helpers
4719a5fe8SMika Westerberg  *
5719a5fe8SMika Westerberg  * Copyright (C) 2020, Intel Corporation
6719a5fe8SMika Westerberg  * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
7719a5fe8SMika Westerberg  */
8719a5fe8SMika Westerberg 
9719a5fe8SMika Westerberg #include <linux/idr.h>
10719a5fe8SMika Westerberg #include <linux/slab.h>
11719a5fe8SMika Westerberg #include <linux/vmalloc.h>
12719a5fe8SMika Westerberg 
13719a5fe8SMika Westerberg #include "tb.h"
14719a5fe8SMika Westerberg 
157c81a578SGil Fine #define NVM_MIN_SIZE		SZ_32K
16*322ff701SGil Fine #define NVM_MAX_SIZE		SZ_1M
177c81a578SGil Fine #define NVM_DATA_DWORDS		16
187c81a578SGil Fine 
19aef9c693SSzuying Chen /* Intel specific NVM offsets */
20aef9c693SSzuying Chen #define INTEL_NVM_DEVID			0x05
21aef9c693SSzuying Chen #define INTEL_NVM_VERSION		0x08
22aef9c693SSzuying Chen #define INTEL_NVM_CSS			0x10
23aef9c693SSzuying Chen #define INTEL_NVM_FLASH_SIZE		0x45
24aef9c693SSzuying Chen 
25a5295832SSzuying Chen /* ASMedia specific NVM offsets */
26a5295832SSzuying Chen #define ASMEDIA_NVM_DATE		0x1c
27a5295832SSzuying Chen #define ASMEDIA_NVM_VERSION		0x28
28a5295832SSzuying Chen 
29719a5fe8SMika Westerberg static DEFINE_IDA(nvm_ida);
30719a5fe8SMika Westerberg 
31719a5fe8SMika Westerberg /**
32aef9c693SSzuying Chen  * struct tb_nvm_vendor_ops - Vendor specific NVM operations
33aef9c693SSzuying Chen  * @read_version: Reads out NVM version from the flash
34aef9c693SSzuying Chen  * @validate: Validates the NVM image before update (optional)
35aef9c693SSzuying Chen  * @write_headers: Writes headers before the rest of the image (optional)
36aef9c693SSzuying Chen  */
37aef9c693SSzuying Chen struct tb_nvm_vendor_ops {
38aef9c693SSzuying Chen 	int (*read_version)(struct tb_nvm *nvm);
39aef9c693SSzuying Chen 	int (*validate)(struct tb_nvm *nvm);
40aef9c693SSzuying Chen 	int (*write_headers)(struct tb_nvm *nvm);
41aef9c693SSzuying Chen };
42aef9c693SSzuying Chen 
43aef9c693SSzuying Chen /**
44aef9c693SSzuying Chen  * struct tb_nvm_vendor - Vendor to &struct tb_nvm_vendor_ops mapping
45aef9c693SSzuying Chen  * @vendor: Vendor ID
46aef9c693SSzuying Chen  * @vops: Vendor specific NVM operations
47aef9c693SSzuying Chen  *
48aef9c693SSzuying Chen  * Maps vendor ID to NVM vendor operations. If there is no mapping then
49aef9c693SSzuying Chen  * NVM firmware upgrade is disabled for the device.
50aef9c693SSzuying Chen  */
51aef9c693SSzuying Chen struct tb_nvm_vendor {
52aef9c693SSzuying Chen 	u16 vendor;
53aef9c693SSzuying Chen 	const struct tb_nvm_vendor_ops *vops;
54aef9c693SSzuying Chen };
55aef9c693SSzuying Chen 
intel_switch_nvm_version(struct tb_nvm * nvm)56aef9c693SSzuying Chen static int intel_switch_nvm_version(struct tb_nvm *nvm)
57aef9c693SSzuying Chen {
58aef9c693SSzuying Chen 	struct tb_switch *sw = tb_to_switch(nvm->dev);
59aef9c693SSzuying Chen 	u32 val, nvm_size, hdr_size;
60aef9c693SSzuying Chen 	int ret;
61aef9c693SSzuying Chen 
62aef9c693SSzuying Chen 	/*
63aef9c693SSzuying Chen 	 * If the switch is in safe-mode the only accessible portion of
64aef9c693SSzuying Chen 	 * the NVM is the non-active one where userspace is expected to
65aef9c693SSzuying Chen 	 * write new functional NVM.
66aef9c693SSzuying Chen 	 */
67aef9c693SSzuying Chen 	if (sw->safe_mode)
68aef9c693SSzuying Chen 		return 0;
69aef9c693SSzuying Chen 
70aef9c693SSzuying Chen 	ret = tb_switch_nvm_read(sw, INTEL_NVM_FLASH_SIZE, &val, sizeof(val));
71aef9c693SSzuying Chen 	if (ret)
72aef9c693SSzuying Chen 		return ret;
73aef9c693SSzuying Chen 
74aef9c693SSzuying Chen 	hdr_size = sw->generation < 3 ? SZ_8K : SZ_16K;
75aef9c693SSzuying Chen 	nvm_size = (SZ_1M << (val & 7)) / 8;
76aef9c693SSzuying Chen 	nvm_size = (nvm_size - hdr_size) / 2;
77aef9c693SSzuying Chen 
78aef9c693SSzuying Chen 	ret = tb_switch_nvm_read(sw, INTEL_NVM_VERSION, &val, sizeof(val));
79aef9c693SSzuying Chen 	if (ret)
80aef9c693SSzuying Chen 		return ret;
81aef9c693SSzuying Chen 
82aef9c693SSzuying Chen 	nvm->major = (val >> 16) & 0xff;
83aef9c693SSzuying Chen 	nvm->minor = (val >> 8) & 0xff;
84aef9c693SSzuying Chen 	nvm->active_size = nvm_size;
85aef9c693SSzuying Chen 
86aef9c693SSzuying Chen 	return 0;
87aef9c693SSzuying Chen }
88aef9c693SSzuying Chen 
intel_switch_nvm_validate(struct tb_nvm * nvm)89aef9c693SSzuying Chen static int intel_switch_nvm_validate(struct tb_nvm *nvm)
90aef9c693SSzuying Chen {
91aef9c693SSzuying Chen 	struct tb_switch *sw = tb_to_switch(nvm->dev);
92aef9c693SSzuying Chen 	unsigned int image_size, hdr_size;
93aef9c693SSzuying Chen 	u16 ds_size, device_id;
94aef9c693SSzuying Chen 	u8 *buf = nvm->buf;
95aef9c693SSzuying Chen 
96aef9c693SSzuying Chen 	image_size = nvm->buf_data_size;
97aef9c693SSzuying Chen 
98aef9c693SSzuying Chen 	/*
99aef9c693SSzuying Chen 	 * FARB pointer must point inside the image and must at least
100aef9c693SSzuying Chen 	 * contain parts of the digital section we will be reading here.
101aef9c693SSzuying Chen 	 */
102aef9c693SSzuying Chen 	hdr_size = (*(u32 *)buf) & 0xffffff;
103aef9c693SSzuying Chen 	if (hdr_size + INTEL_NVM_DEVID + 2 >= image_size)
104aef9c693SSzuying Chen 		return -EINVAL;
105aef9c693SSzuying Chen 
106aef9c693SSzuying Chen 	/* Digital section start should be aligned to 4k page */
107aef9c693SSzuying Chen 	if (!IS_ALIGNED(hdr_size, SZ_4K))
108aef9c693SSzuying Chen 		return -EINVAL;
109aef9c693SSzuying Chen 
110aef9c693SSzuying Chen 	/*
111aef9c693SSzuying Chen 	 * Read digital section size and check that it also fits inside
112aef9c693SSzuying Chen 	 * the image.
113aef9c693SSzuying Chen 	 */
114aef9c693SSzuying Chen 	ds_size = *(u16 *)(buf + hdr_size);
115aef9c693SSzuying Chen 	if (ds_size >= image_size)
116aef9c693SSzuying Chen 		return -EINVAL;
117aef9c693SSzuying Chen 
118aef9c693SSzuying Chen 	if (sw->safe_mode)
119aef9c693SSzuying Chen 		return 0;
120aef9c693SSzuying Chen 
121aef9c693SSzuying Chen 	/*
122aef9c693SSzuying Chen 	 * Make sure the device ID in the image matches the one
123aef9c693SSzuying Chen 	 * we read from the switch config space.
124aef9c693SSzuying Chen 	 */
125aef9c693SSzuying Chen 	device_id = *(u16 *)(buf + hdr_size + INTEL_NVM_DEVID);
126aef9c693SSzuying Chen 	if (device_id != sw->config.device_id)
127aef9c693SSzuying Chen 		return -EINVAL;
128aef9c693SSzuying Chen 
129aef9c693SSzuying Chen 	/* Skip headers in the image */
130aef9c693SSzuying Chen 	nvm->buf_data_start = buf + hdr_size;
131aef9c693SSzuying Chen 	nvm->buf_data_size = image_size - hdr_size;
132aef9c693SSzuying Chen 
133aef9c693SSzuying Chen 	return 0;
134aef9c693SSzuying Chen }
135aef9c693SSzuying Chen 
intel_switch_nvm_write_headers(struct tb_nvm * nvm)136aef9c693SSzuying Chen static int intel_switch_nvm_write_headers(struct tb_nvm *nvm)
137aef9c693SSzuying Chen {
138aef9c693SSzuying Chen 	struct tb_switch *sw = tb_to_switch(nvm->dev);
139aef9c693SSzuying Chen 
140aef9c693SSzuying Chen 	if (sw->generation < 3) {
141aef9c693SSzuying Chen 		int ret;
142aef9c693SSzuying Chen 
143aef9c693SSzuying Chen 		/* Write CSS headers first */
144aef9c693SSzuying Chen 		ret = dma_port_flash_write(sw->dma_port,
145aef9c693SSzuying Chen 			DMA_PORT_CSS_ADDRESS, nvm->buf + INTEL_NVM_CSS,
146aef9c693SSzuying Chen 			DMA_PORT_CSS_MAX_SIZE);
147aef9c693SSzuying Chen 		if (ret)
148aef9c693SSzuying Chen 			return ret;
149aef9c693SSzuying Chen 	}
150aef9c693SSzuying Chen 
151aef9c693SSzuying Chen 	return 0;
152aef9c693SSzuying Chen }
153aef9c693SSzuying Chen 
154aef9c693SSzuying Chen static const struct tb_nvm_vendor_ops intel_switch_nvm_ops = {
155aef9c693SSzuying Chen 	.read_version = intel_switch_nvm_version,
156aef9c693SSzuying Chen 	.validate = intel_switch_nvm_validate,
157aef9c693SSzuying Chen 	.write_headers = intel_switch_nvm_write_headers,
158aef9c693SSzuying Chen };
159aef9c693SSzuying Chen 
asmedia_switch_nvm_version(struct tb_nvm * nvm)160a5295832SSzuying Chen static int asmedia_switch_nvm_version(struct tb_nvm *nvm)
161a5295832SSzuying Chen {
162a5295832SSzuying Chen 	struct tb_switch *sw = tb_to_switch(nvm->dev);
163a5295832SSzuying Chen 	u32 val;
164a5295832SSzuying Chen 	int ret;
165a5295832SSzuying Chen 
166a5295832SSzuying Chen 	ret = tb_switch_nvm_read(sw, ASMEDIA_NVM_VERSION, &val, sizeof(val));
167a5295832SSzuying Chen 	if (ret)
168a5295832SSzuying Chen 		return ret;
169a5295832SSzuying Chen 
170a5295832SSzuying Chen 	nvm->major = (val << 16) & 0xff0000;
171a5295832SSzuying Chen 	nvm->major |= val & 0x00ff00;
172a5295832SSzuying Chen 	nvm->major |= (val >> 16) & 0x0000ff;
173a5295832SSzuying Chen 
174a5295832SSzuying Chen 	ret = tb_switch_nvm_read(sw, ASMEDIA_NVM_DATE, &val, sizeof(val));
175a5295832SSzuying Chen 	if (ret)
176a5295832SSzuying Chen 		return ret;
177a5295832SSzuying Chen 
178a5295832SSzuying Chen 	nvm->minor = (val << 16) & 0xff0000;
179a5295832SSzuying Chen 	nvm->minor |= val & 0x00ff00;
180a5295832SSzuying Chen 	nvm->minor |= (val >> 16) & 0x0000ff;
181a5295832SSzuying Chen 
182a5295832SSzuying Chen 	/* ASMedia NVM size is fixed to 512k */
183a5295832SSzuying Chen 	nvm->active_size = SZ_512K;
184a5295832SSzuying Chen 
185a5295832SSzuying Chen 	return 0;
186a5295832SSzuying Chen }
187a5295832SSzuying Chen 
188a5295832SSzuying Chen static const struct tb_nvm_vendor_ops asmedia_switch_nvm_ops = {
189a5295832SSzuying Chen 	.read_version = asmedia_switch_nvm_version,
190a5295832SSzuying Chen };
191a5295832SSzuying Chen 
192aef9c693SSzuying Chen /* Router vendor NVM support table */
193aef9c693SSzuying Chen static const struct tb_nvm_vendor switch_nvm_vendors[] = {
194a5295832SSzuying Chen 	{ 0x174c, &asmedia_switch_nvm_ops },
195aef9c693SSzuying Chen 	{ PCI_VENDOR_ID_INTEL, &intel_switch_nvm_ops },
196aef9c693SSzuying Chen 	{ 0x8087, &intel_switch_nvm_ops },
197aef9c693SSzuying Chen };
198aef9c693SSzuying Chen 
intel_retimer_nvm_version(struct tb_nvm * nvm)199aef9c693SSzuying Chen static int intel_retimer_nvm_version(struct tb_nvm *nvm)
200aef9c693SSzuying Chen {
201aef9c693SSzuying Chen 	struct tb_retimer *rt = tb_to_retimer(nvm->dev);
202aef9c693SSzuying Chen 	u32 val, nvm_size;
203aef9c693SSzuying Chen 	int ret;
204aef9c693SSzuying Chen 
205aef9c693SSzuying Chen 	ret = tb_retimer_nvm_read(rt, INTEL_NVM_VERSION, &val, sizeof(val));
206aef9c693SSzuying Chen 	if (ret)
207aef9c693SSzuying Chen 		return ret;
208aef9c693SSzuying Chen 
209aef9c693SSzuying Chen 	nvm->major = (val >> 16) & 0xff;
210aef9c693SSzuying Chen 	nvm->minor = (val >> 8) & 0xff;
211aef9c693SSzuying Chen 
212aef9c693SSzuying Chen 	ret = tb_retimer_nvm_read(rt, INTEL_NVM_FLASH_SIZE, &val, sizeof(val));
213aef9c693SSzuying Chen 	if (ret)
214aef9c693SSzuying Chen 		return ret;
215aef9c693SSzuying Chen 
216aef9c693SSzuying Chen 	nvm_size = (SZ_1M << (val & 7)) / 8;
217aef9c693SSzuying Chen 	nvm_size = (nvm_size - SZ_16K) / 2;
218aef9c693SSzuying Chen 	nvm->active_size = nvm_size;
219aef9c693SSzuying Chen 
220aef9c693SSzuying Chen 	return 0;
221aef9c693SSzuying Chen }
222aef9c693SSzuying Chen 
intel_retimer_nvm_validate(struct tb_nvm * nvm)223aef9c693SSzuying Chen static int intel_retimer_nvm_validate(struct tb_nvm *nvm)
224aef9c693SSzuying Chen {
225aef9c693SSzuying Chen 	struct tb_retimer *rt = tb_to_retimer(nvm->dev);
226aef9c693SSzuying Chen 	unsigned int image_size, hdr_size;
227aef9c693SSzuying Chen 	u8 *buf = nvm->buf;
228aef9c693SSzuying Chen 	u16 ds_size, device;
229aef9c693SSzuying Chen 
230aef9c693SSzuying Chen 	image_size = nvm->buf_data_size;
231aef9c693SSzuying Chen 
232aef9c693SSzuying Chen 	/*
233aef9c693SSzuying Chen 	 * FARB pointer must point inside the image and must at least
234aef9c693SSzuying Chen 	 * contain parts of the digital section we will be reading here.
235aef9c693SSzuying Chen 	 */
236aef9c693SSzuying Chen 	hdr_size = (*(u32 *)buf) & 0xffffff;
237aef9c693SSzuying Chen 	if (hdr_size + INTEL_NVM_DEVID + 2 >= image_size)
238aef9c693SSzuying Chen 		return -EINVAL;
239aef9c693SSzuying Chen 
240aef9c693SSzuying Chen 	/* Digital section start should be aligned to 4k page */
241aef9c693SSzuying Chen 	if (!IS_ALIGNED(hdr_size, SZ_4K))
242aef9c693SSzuying Chen 		return -EINVAL;
243aef9c693SSzuying Chen 
244aef9c693SSzuying Chen 	/*
245aef9c693SSzuying Chen 	 * Read digital section size and check that it also fits inside
246aef9c693SSzuying Chen 	 * the image.
247aef9c693SSzuying Chen 	 */
248aef9c693SSzuying Chen 	ds_size = *(u16 *)(buf + hdr_size);
249aef9c693SSzuying Chen 	if (ds_size >= image_size)
250aef9c693SSzuying Chen 		return -EINVAL;
251aef9c693SSzuying Chen 
252aef9c693SSzuying Chen 	/*
253aef9c693SSzuying Chen 	 * Make sure the device ID in the image matches the retimer
254aef9c693SSzuying Chen 	 * hardware.
255aef9c693SSzuying Chen 	 */
256aef9c693SSzuying Chen 	device = *(u16 *)(buf + hdr_size + INTEL_NVM_DEVID);
257aef9c693SSzuying Chen 	if (device != rt->device)
258aef9c693SSzuying Chen 		return -EINVAL;
259aef9c693SSzuying Chen 
260aef9c693SSzuying Chen 	/* Skip headers in the image */
261aef9c693SSzuying Chen 	nvm->buf_data_start = buf + hdr_size;
262aef9c693SSzuying Chen 	nvm->buf_data_size = image_size - hdr_size;
263aef9c693SSzuying Chen 
264aef9c693SSzuying Chen 	return 0;
265aef9c693SSzuying Chen }
266aef9c693SSzuying Chen 
267aef9c693SSzuying Chen static const struct tb_nvm_vendor_ops intel_retimer_nvm_ops = {
268aef9c693SSzuying Chen 	.read_version = intel_retimer_nvm_version,
269aef9c693SSzuying Chen 	.validate = intel_retimer_nvm_validate,
270aef9c693SSzuying Chen };
271aef9c693SSzuying Chen 
272aef9c693SSzuying Chen /* Retimer vendor NVM support table */
273aef9c693SSzuying Chen static const struct tb_nvm_vendor retimer_nvm_vendors[] = {
274aef9c693SSzuying Chen 	{ 0x8087, &intel_retimer_nvm_ops },
275aef9c693SSzuying Chen };
276aef9c693SSzuying Chen 
277aef9c693SSzuying Chen /**
278719a5fe8SMika Westerberg  * tb_nvm_alloc() - Allocate new NVM structure
279719a5fe8SMika Westerberg  * @dev: Device owning the NVM
280719a5fe8SMika Westerberg  *
281719a5fe8SMika Westerberg  * Allocates new NVM structure with unique @id and returns it. In case
282aef9c693SSzuying Chen  * of error returns ERR_PTR(). Specifically returns %-EOPNOTSUPP if the
283aef9c693SSzuying Chen  * NVM format of the @dev is not known by the kernel.
284719a5fe8SMika Westerberg  */
tb_nvm_alloc(struct device * dev)285719a5fe8SMika Westerberg struct tb_nvm *tb_nvm_alloc(struct device *dev)
286719a5fe8SMika Westerberg {
287aef9c693SSzuying Chen 	const struct tb_nvm_vendor_ops *vops = NULL;
288719a5fe8SMika Westerberg 	struct tb_nvm *nvm;
289aef9c693SSzuying Chen 	int ret, i;
290aef9c693SSzuying Chen 
291aef9c693SSzuying Chen 	if (tb_is_switch(dev)) {
292aef9c693SSzuying Chen 		const struct tb_switch *sw = tb_to_switch(dev);
293aef9c693SSzuying Chen 
294aef9c693SSzuying Chen 		for (i = 0; i < ARRAY_SIZE(switch_nvm_vendors); i++) {
295aef9c693SSzuying Chen 			const struct tb_nvm_vendor *v = &switch_nvm_vendors[i];
296aef9c693SSzuying Chen 
297aef9c693SSzuying Chen 			if (v->vendor == sw->config.vendor_id) {
298aef9c693SSzuying Chen 				vops = v->vops;
299aef9c693SSzuying Chen 				break;
300aef9c693SSzuying Chen 			}
301aef9c693SSzuying Chen 		}
302aef9c693SSzuying Chen 
303aef9c693SSzuying Chen 		if (!vops) {
304aef9c693SSzuying Chen 			tb_sw_dbg(sw, "router NVM format of vendor %#x unknown\n",
305aef9c693SSzuying Chen 				  sw->config.vendor_id);
306aef9c693SSzuying Chen 			return ERR_PTR(-EOPNOTSUPP);
307aef9c693SSzuying Chen 		}
308aef9c693SSzuying Chen 	} else if (tb_is_retimer(dev)) {
309aef9c693SSzuying Chen 		const struct tb_retimer *rt = tb_to_retimer(dev);
310aef9c693SSzuying Chen 
311aef9c693SSzuying Chen 		for (i = 0; i < ARRAY_SIZE(retimer_nvm_vendors); i++) {
312aef9c693SSzuying Chen 			const struct tb_nvm_vendor *v = &retimer_nvm_vendors[i];
313aef9c693SSzuying Chen 
314aef9c693SSzuying Chen 			if (v->vendor == rt->vendor) {
315aef9c693SSzuying Chen 				vops = v->vops;
316aef9c693SSzuying Chen 				break;
317aef9c693SSzuying Chen 			}
318aef9c693SSzuying Chen 		}
319aef9c693SSzuying Chen 
320aef9c693SSzuying Chen 		if (!vops) {
321aef9c693SSzuying Chen 			dev_dbg(dev, "retimer NVM format of vendor %#x unknown\n",
322aef9c693SSzuying Chen 				rt->vendor);
323aef9c693SSzuying Chen 			return ERR_PTR(-EOPNOTSUPP);
324aef9c693SSzuying Chen 		}
325aef9c693SSzuying Chen 	} else {
326aef9c693SSzuying Chen 		return ERR_PTR(-EOPNOTSUPP);
327aef9c693SSzuying Chen 	}
328719a5fe8SMika Westerberg 
329719a5fe8SMika Westerberg 	nvm = kzalloc(sizeof(*nvm), GFP_KERNEL);
330719a5fe8SMika Westerberg 	if (!nvm)
331719a5fe8SMika Westerberg 		return ERR_PTR(-ENOMEM);
332719a5fe8SMika Westerberg 
333719a5fe8SMika Westerberg 	ret = ida_simple_get(&nvm_ida, 0, 0, GFP_KERNEL);
334719a5fe8SMika Westerberg 	if (ret < 0) {
335719a5fe8SMika Westerberg 		kfree(nvm);
336719a5fe8SMika Westerberg 		return ERR_PTR(ret);
337719a5fe8SMika Westerberg 	}
338719a5fe8SMika Westerberg 
339719a5fe8SMika Westerberg 	nvm->id = ret;
340719a5fe8SMika Westerberg 	nvm->dev = dev;
341aef9c693SSzuying Chen 	nvm->vops = vops;
342719a5fe8SMika Westerberg 
343719a5fe8SMika Westerberg 	return nvm;
344719a5fe8SMika Westerberg }
345719a5fe8SMika Westerberg 
346719a5fe8SMika Westerberg /**
347aef9c693SSzuying Chen  * tb_nvm_read_version() - Read and populate NVM version
348aef9c693SSzuying Chen  * @nvm: NVM structure
349aef9c693SSzuying Chen  *
350aef9c693SSzuying Chen  * Uses vendor specific means to read out and fill in the existing
351aef9c693SSzuying Chen  * active NVM version. Returns %0 in case of success and negative errno
352aef9c693SSzuying Chen  * otherwise.
353aef9c693SSzuying Chen  */
tb_nvm_read_version(struct tb_nvm * nvm)354aef9c693SSzuying Chen int tb_nvm_read_version(struct tb_nvm *nvm)
355aef9c693SSzuying Chen {
356aef9c693SSzuying Chen 	const struct tb_nvm_vendor_ops *vops = nvm->vops;
357aef9c693SSzuying Chen 
358aef9c693SSzuying Chen 	if (vops && vops->read_version)
359aef9c693SSzuying Chen 		return vops->read_version(nvm);
360aef9c693SSzuying Chen 
361aef9c693SSzuying Chen 	return -EOPNOTSUPP;
362aef9c693SSzuying Chen }
363aef9c693SSzuying Chen 
364aef9c693SSzuying Chen /**
365aef9c693SSzuying Chen  * tb_nvm_validate() - Validate new NVM image
366aef9c693SSzuying Chen  * @nvm: NVM structure
367aef9c693SSzuying Chen  *
368aef9c693SSzuying Chen  * Runs vendor specific validation over the new NVM image and if all
369aef9c693SSzuying Chen  * checks pass returns %0. As side effect updates @nvm->buf_data_start
370aef9c693SSzuying Chen  * and @nvm->buf_data_size fields to match the actual data to be written
371aef9c693SSzuying Chen  * to the NVM.
372aef9c693SSzuying Chen  *
373aef9c693SSzuying Chen  * If the validation does not pass then returns negative errno.
374aef9c693SSzuying Chen  */
tb_nvm_validate(struct tb_nvm * nvm)375aef9c693SSzuying Chen int tb_nvm_validate(struct tb_nvm *nvm)
376aef9c693SSzuying Chen {
377aef9c693SSzuying Chen 	const struct tb_nvm_vendor_ops *vops = nvm->vops;
378aef9c693SSzuying Chen 	unsigned int image_size;
379aef9c693SSzuying Chen 	u8 *buf = nvm->buf;
380aef9c693SSzuying Chen 
381aef9c693SSzuying Chen 	if (!buf)
382aef9c693SSzuying Chen 		return -EINVAL;
383aef9c693SSzuying Chen 	if (!vops)
384aef9c693SSzuying Chen 		return -EOPNOTSUPP;
385aef9c693SSzuying Chen 
386aef9c693SSzuying Chen 	/* Just do basic image size checks */
387aef9c693SSzuying Chen 	image_size = nvm->buf_data_size;
388aef9c693SSzuying Chen 	if (image_size < NVM_MIN_SIZE || image_size > NVM_MAX_SIZE)
389aef9c693SSzuying Chen 		return -EINVAL;
390aef9c693SSzuying Chen 
391aef9c693SSzuying Chen 	/*
392aef9c693SSzuying Chen 	 * Set the default data start in the buffer. The validate method
393aef9c693SSzuying Chen 	 * below can change this if needed.
394aef9c693SSzuying Chen 	 */
395aef9c693SSzuying Chen 	nvm->buf_data_start = buf;
396aef9c693SSzuying Chen 
397aef9c693SSzuying Chen 	return vops->validate ? vops->validate(nvm) : 0;
398aef9c693SSzuying Chen }
399aef9c693SSzuying Chen 
400aef9c693SSzuying Chen /**
401aef9c693SSzuying Chen  * tb_nvm_write_headers() - Write headers before the rest of the image
402aef9c693SSzuying Chen  * @nvm: NVM structure
403aef9c693SSzuying Chen  *
404aef9c693SSzuying Chen  * If the vendor NVM format requires writing headers before the rest of
405aef9c693SSzuying Chen  * the image, this function does that. Can be called even if the device
406aef9c693SSzuying Chen  * does not need this.
407aef9c693SSzuying Chen  *
408aef9c693SSzuying Chen  * Returns %0 in case of success and negative errno otherwise.
409aef9c693SSzuying Chen  */
tb_nvm_write_headers(struct tb_nvm * nvm)410aef9c693SSzuying Chen int tb_nvm_write_headers(struct tb_nvm *nvm)
411aef9c693SSzuying Chen {
412aef9c693SSzuying Chen 	const struct tb_nvm_vendor_ops *vops = nvm->vops;
413aef9c693SSzuying Chen 
414aef9c693SSzuying Chen 	return vops->write_headers ? vops->write_headers(nvm) : 0;
415aef9c693SSzuying Chen }
416aef9c693SSzuying Chen 
417aef9c693SSzuying Chen /**
418719a5fe8SMika Westerberg  * tb_nvm_add_active() - Adds active NVMem device to NVM
419719a5fe8SMika Westerberg  * @nvm: NVM structure
420719a5fe8SMika Westerberg  * @reg_read: Pointer to the function to read the NVM (passed directly to the
421719a5fe8SMika Westerberg  *	      NVMem device)
422719a5fe8SMika Westerberg  *
423719a5fe8SMika Westerberg  * Registers new active NVmem device for @nvm. The @reg_read is called
424719a5fe8SMika Westerberg  * directly from NVMem so it must handle possible concurrent access if
425719a5fe8SMika Westerberg  * needed. The first parameter passed to @reg_read is @nvm structure.
426719a5fe8SMika Westerberg  * Returns %0 in success and negative errno otherwise.
427719a5fe8SMika Westerberg  */
tb_nvm_add_active(struct tb_nvm * nvm,nvmem_reg_read_t reg_read)428aef9c693SSzuying Chen int tb_nvm_add_active(struct tb_nvm *nvm, nvmem_reg_read_t reg_read)
429719a5fe8SMika Westerberg {
430719a5fe8SMika Westerberg 	struct nvmem_config config;
431719a5fe8SMika Westerberg 	struct nvmem_device *nvmem;
432719a5fe8SMika Westerberg 
433719a5fe8SMika Westerberg 	memset(&config, 0, sizeof(config));
434719a5fe8SMika Westerberg 
435719a5fe8SMika Westerberg 	config.name = "nvm_active";
436719a5fe8SMika Westerberg 	config.reg_read = reg_read;
437719a5fe8SMika Westerberg 	config.read_only = true;
438719a5fe8SMika Westerberg 	config.id = nvm->id;
439719a5fe8SMika Westerberg 	config.stride = 4;
440719a5fe8SMika Westerberg 	config.word_size = 4;
441aef9c693SSzuying Chen 	config.size = nvm->active_size;
442719a5fe8SMika Westerberg 	config.dev = nvm->dev;
443719a5fe8SMika Westerberg 	config.owner = THIS_MODULE;
444719a5fe8SMika Westerberg 	config.priv = nvm;
445719a5fe8SMika Westerberg 
446719a5fe8SMika Westerberg 	nvmem = nvmem_register(&config);
447719a5fe8SMika Westerberg 	if (IS_ERR(nvmem))
448719a5fe8SMika Westerberg 		return PTR_ERR(nvmem);
449719a5fe8SMika Westerberg 
450719a5fe8SMika Westerberg 	nvm->active = nvmem;
451719a5fe8SMika Westerberg 	return 0;
452719a5fe8SMika Westerberg }
453719a5fe8SMika Westerberg 
454719a5fe8SMika Westerberg /**
455719a5fe8SMika Westerberg  * tb_nvm_write_buf() - Write data to @nvm buffer
456719a5fe8SMika Westerberg  * @nvm: NVM structure
457719a5fe8SMika Westerberg  * @offset: Offset where to write the data
458719a5fe8SMika Westerberg  * @val: Data buffer to write
459719a5fe8SMika Westerberg  * @bytes: Number of bytes to write
460719a5fe8SMika Westerberg  *
461719a5fe8SMika Westerberg  * Helper function to cache the new NVM image before it is actually
462719a5fe8SMika Westerberg  * written to the flash. Copies @bytes from @val to @nvm->buf starting
463719a5fe8SMika Westerberg  * from @offset.
464719a5fe8SMika Westerberg  */
tb_nvm_write_buf(struct tb_nvm * nvm,unsigned int offset,void * val,size_t bytes)465719a5fe8SMika Westerberg int tb_nvm_write_buf(struct tb_nvm *nvm, unsigned int offset, void *val,
466719a5fe8SMika Westerberg 		     size_t bytes)
467719a5fe8SMika Westerberg {
468719a5fe8SMika Westerberg 	if (!nvm->buf) {
469719a5fe8SMika Westerberg 		nvm->buf = vmalloc(NVM_MAX_SIZE);
470719a5fe8SMika Westerberg 		if (!nvm->buf)
471719a5fe8SMika Westerberg 			return -ENOMEM;
472719a5fe8SMika Westerberg 	}
473719a5fe8SMika Westerberg 
4744b794f80SMario Limonciello 	nvm->flushed = false;
475719a5fe8SMika Westerberg 	nvm->buf_data_size = offset + bytes;
476719a5fe8SMika Westerberg 	memcpy(nvm->buf + offset, val, bytes);
477719a5fe8SMika Westerberg 	return 0;
478719a5fe8SMika Westerberg }
479719a5fe8SMika Westerberg 
480719a5fe8SMika Westerberg /**
481719a5fe8SMika Westerberg  * tb_nvm_add_non_active() - Adds non-active NVMem device to NVM
482719a5fe8SMika Westerberg  * @nvm: NVM structure
483719a5fe8SMika Westerberg  * @reg_write: Pointer to the function to write the NVM (passed directly
484719a5fe8SMika Westerberg  *	       to the NVMem device)
485719a5fe8SMika Westerberg  *
486719a5fe8SMika Westerberg  * Registers new non-active NVmem device for @nvm. The @reg_write is called
487719a5fe8SMika Westerberg  * directly from NVMem so it must handle possible concurrent access if
488719a5fe8SMika Westerberg  * needed. The first parameter passed to @reg_write is @nvm structure.
489aef9c693SSzuying Chen  * The size of the NVMem device is set to %NVM_MAX_SIZE.
490aef9c693SSzuying Chen  *
491719a5fe8SMika Westerberg  * Returns %0 in success and negative errno otherwise.
492719a5fe8SMika Westerberg  */
tb_nvm_add_non_active(struct tb_nvm * nvm,nvmem_reg_write_t reg_write)493aef9c693SSzuying Chen int tb_nvm_add_non_active(struct tb_nvm *nvm, nvmem_reg_write_t reg_write)
494719a5fe8SMika Westerberg {
495719a5fe8SMika Westerberg 	struct nvmem_config config;
496719a5fe8SMika Westerberg 	struct nvmem_device *nvmem;
497719a5fe8SMika Westerberg 
498719a5fe8SMika Westerberg 	memset(&config, 0, sizeof(config));
499719a5fe8SMika Westerberg 
500719a5fe8SMika Westerberg 	config.name = "nvm_non_active";
501719a5fe8SMika Westerberg 	config.reg_write = reg_write;
502719a5fe8SMika Westerberg 	config.root_only = true;
503719a5fe8SMika Westerberg 	config.id = nvm->id;
504719a5fe8SMika Westerberg 	config.stride = 4;
505719a5fe8SMika Westerberg 	config.word_size = 4;
506aef9c693SSzuying Chen 	config.size = NVM_MAX_SIZE;
507719a5fe8SMika Westerberg 	config.dev = nvm->dev;
508719a5fe8SMika Westerberg 	config.owner = THIS_MODULE;
509719a5fe8SMika Westerberg 	config.priv = nvm;
510719a5fe8SMika Westerberg 
511719a5fe8SMika Westerberg 	nvmem = nvmem_register(&config);
512719a5fe8SMika Westerberg 	if (IS_ERR(nvmem))
513719a5fe8SMika Westerberg 		return PTR_ERR(nvmem);
514719a5fe8SMika Westerberg 
515719a5fe8SMika Westerberg 	nvm->non_active = nvmem;
516719a5fe8SMika Westerberg 	return 0;
517719a5fe8SMika Westerberg }
518719a5fe8SMika Westerberg 
519719a5fe8SMika Westerberg /**
520719a5fe8SMika Westerberg  * tb_nvm_free() - Release NVM and its resources
521719a5fe8SMika Westerberg  * @nvm: NVM structure to release
522719a5fe8SMika Westerberg  *
523719a5fe8SMika Westerberg  * Releases NVM and the NVMem devices if they were registered.
524719a5fe8SMika Westerberg  */
tb_nvm_free(struct tb_nvm * nvm)525719a5fe8SMika Westerberg void tb_nvm_free(struct tb_nvm *nvm)
526719a5fe8SMika Westerberg {
527719a5fe8SMika Westerberg 	if (nvm) {
528719a5fe8SMika Westerberg 		nvmem_unregister(nvm->non_active);
529719a5fe8SMika Westerberg 		nvmem_unregister(nvm->active);
530719a5fe8SMika Westerberg 		vfree(nvm->buf);
531719a5fe8SMika Westerberg 		ida_simple_remove(&nvm_ida, nvm->id);
532719a5fe8SMika Westerberg 	}
533719a5fe8SMika Westerberg 	kfree(nvm);
534719a5fe8SMika Westerberg }
535719a5fe8SMika Westerberg 
5369b383037SMika Westerberg /**
5379b383037SMika Westerberg  * tb_nvm_read_data() - Read data from NVM
5389b383037SMika Westerberg  * @address: Start address on the flash
5399b383037SMika Westerberg  * @buf: Buffer where the read data is copied
5409b383037SMika Westerberg  * @size: Size of the buffer in bytes
5419b383037SMika Westerberg  * @retries: Number of retries if block read fails
5429b383037SMika Westerberg  * @read_block: Function that reads block from the flash
5439b383037SMika Westerberg  * @read_block_data: Data passsed to @read_block
5449b383037SMika Westerberg  *
5459b383037SMika Westerberg  * This is a generic function that reads data from NVM or NVM like
5469b383037SMika Westerberg  * device.
5479b383037SMika Westerberg  *
5489b383037SMika Westerberg  * Returns %0 on success and negative errno otherwise.
5499b383037SMika Westerberg  */
tb_nvm_read_data(unsigned int address,void * buf,size_t size,unsigned int retries,read_block_fn read_block,void * read_block_data)5509b383037SMika Westerberg int tb_nvm_read_data(unsigned int address, void *buf, size_t size,
5519b383037SMika Westerberg 		     unsigned int retries, read_block_fn read_block,
5529b383037SMika Westerberg 		     void *read_block_data)
5539b383037SMika Westerberg {
5549b383037SMika Westerberg 	do {
5559b383037SMika Westerberg 		unsigned int dwaddress, dwords, offset;
5569b383037SMika Westerberg 		u8 data[NVM_DATA_DWORDS * 4];
5579b383037SMika Westerberg 		size_t nbytes;
5589b383037SMika Westerberg 		int ret;
5599b383037SMika Westerberg 
5609b383037SMika Westerberg 		offset = address & 3;
5619b383037SMika Westerberg 		nbytes = min_t(size_t, size + offset, NVM_DATA_DWORDS * 4);
5629b383037SMika Westerberg 
5639b383037SMika Westerberg 		dwaddress = address / 4;
5649b383037SMika Westerberg 		dwords = ALIGN(nbytes, 4) / 4;
5659b383037SMika Westerberg 
5669b383037SMika Westerberg 		ret = read_block(read_block_data, dwaddress, data, dwords);
5679b383037SMika Westerberg 		if (ret) {
5689b383037SMika Westerberg 			if (ret != -ENODEV && retries--)
5699b383037SMika Westerberg 				continue;
5709b383037SMika Westerberg 			return ret;
5719b383037SMika Westerberg 		}
5729b383037SMika Westerberg 
5739b383037SMika Westerberg 		nbytes -= offset;
5749b383037SMika Westerberg 		memcpy(buf, data + offset, nbytes);
5759b383037SMika Westerberg 
5769b383037SMika Westerberg 		size -= nbytes;
5779b383037SMika Westerberg 		address += nbytes;
5789b383037SMika Westerberg 		buf += nbytes;
5799b383037SMika Westerberg 	} while (size > 0);
5809b383037SMika Westerberg 
5819b383037SMika Westerberg 	return 0;
5829b383037SMika Westerberg }
5839b383037SMika Westerberg 
5849b383037SMika Westerberg /**
5859b383037SMika Westerberg  * tb_nvm_write_data() - Write data to NVM
5869b383037SMika Westerberg  * @address: Start address on the flash
5879b383037SMika Westerberg  * @buf: Buffer where the data is copied from
5889b383037SMika Westerberg  * @size: Size of the buffer in bytes
5899b383037SMika Westerberg  * @retries: Number of retries if the block write fails
5909b383037SMika Westerberg  * @write_block: Function that writes block to the flash
5919b383037SMika Westerberg  * @write_block_data: Data passwd to @write_block
5929b383037SMika Westerberg  *
5939b383037SMika Westerberg  * This is generic function that writes data to NVM or NVM like device.
5949b383037SMika Westerberg  *
5959b383037SMika Westerberg  * Returns %0 on success and negative errno otherwise.
5969b383037SMika Westerberg  */
tb_nvm_write_data(unsigned int address,const void * buf,size_t size,unsigned int retries,write_block_fn write_block,void * write_block_data)5979b383037SMika Westerberg int tb_nvm_write_data(unsigned int address, const void *buf, size_t size,
5989b383037SMika Westerberg 		      unsigned int retries, write_block_fn write_block,
5999b383037SMika Westerberg 		      void *write_block_data)
6009b383037SMika Westerberg {
6019b383037SMika Westerberg 	do {
6029b383037SMika Westerberg 		unsigned int offset, dwaddress;
6039b383037SMika Westerberg 		u8 data[NVM_DATA_DWORDS * 4];
6049b383037SMika Westerberg 		size_t nbytes;
6059b383037SMika Westerberg 		int ret;
6069b383037SMika Westerberg 
6079b383037SMika Westerberg 		offset = address & 3;
6089b383037SMika Westerberg 		nbytes = min_t(u32, size + offset, NVM_DATA_DWORDS * 4);
6099b383037SMika Westerberg 
6109b383037SMika Westerberg 		memcpy(data + offset, buf, nbytes);
6119b383037SMika Westerberg 
6129b383037SMika Westerberg 		dwaddress = address / 4;
6139b383037SMika Westerberg 		ret = write_block(write_block_data, dwaddress, data, nbytes / 4);
6149b383037SMika Westerberg 		if (ret) {
6159b383037SMika Westerberg 			if (ret == -ETIMEDOUT) {
6169b383037SMika Westerberg 				if (retries--)
6179b383037SMika Westerberg 					continue;
6189b383037SMika Westerberg 				ret = -EIO;
6199b383037SMika Westerberg 			}
6209b383037SMika Westerberg 			return ret;
6219b383037SMika Westerberg 		}
6229b383037SMika Westerberg 
6239b383037SMika Westerberg 		size -= nbytes;
6249b383037SMika Westerberg 		address += nbytes;
6259b383037SMika Westerberg 		buf += nbytes;
6269b383037SMika Westerberg 	} while (size > 0);
6279b383037SMika Westerberg 
6289b383037SMika Westerberg 	return 0;
6299b383037SMika Westerberg }
6309b383037SMika Westerberg 
tb_nvm_exit(void)631719a5fe8SMika Westerberg void tb_nvm_exit(void)
632719a5fe8SMika Westerberg {
633719a5fe8SMika Westerberg 	ida_destroy(&nvm_ida);
634719a5fe8SMika Westerberg }
635