xref: /openbmc/linux/drivers/soc/tegra/fuse/fuse-tegra20.c (revision bbde9fc1824aab58bc78c084163007dd6c03fe5b)
1 /*
2  * Copyright (c) 2013-2014, NVIDIA CORPORATION.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms and conditions of the GNU General Public License,
6  * version 2, as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope it will be useful, but WITHOUT
9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
11  * more details.
12  *
13  * You should have received a copy of the GNU General Public License
14  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
15  *
16  * Based on drivers/misc/eeprom/sunxi_sid.c
17  */
18 
19 #include <linux/device.h>
20 #include <linux/clk.h>
21 #include <linux/completion.h>
22 #include <linux/dmaengine.h>
23 #include <linux/dma-mapping.h>
24 #include <linux/err.h>
25 #include <linux/io.h>
26 #include <linux/kernel.h>
27 #include <linux/kobject.h>
28 #include <linux/of_device.h>
29 #include <linux/platform_device.h>
30 #include <linux/random.h>
31 
32 #include <soc/tegra/fuse.h>
33 
34 #include "fuse.h"
35 
36 #define FUSE_BEGIN	0x100
37 #define FUSE_SIZE	0x1f8
38 #define FUSE_UID_LOW	0x08
39 #define FUSE_UID_HIGH	0x0c
40 
41 static phys_addr_t fuse_phys;
42 static struct clk *fuse_clk;
43 static void __iomem __initdata *fuse_base;
44 
45 static DEFINE_MUTEX(apb_dma_lock);
46 static DECLARE_COMPLETION(apb_dma_wait);
47 static struct dma_chan *apb_dma_chan;
48 static struct dma_slave_config dma_sconfig;
49 static u32 *apb_buffer;
50 static dma_addr_t apb_buffer_phys;
51 
52 static void apb_dma_complete(void *args)
53 {
54 	complete(&apb_dma_wait);
55 }
56 
57 static u32 tegra20_fuse_readl(const unsigned int offset)
58 {
59 	int ret;
60 	u32 val = 0;
61 	struct dma_async_tx_descriptor *dma_desc;
62 	unsigned long time_left;
63 
64 	mutex_lock(&apb_dma_lock);
65 
66 	dma_sconfig.src_addr = fuse_phys + FUSE_BEGIN + offset;
67 	ret = dmaengine_slave_config(apb_dma_chan, &dma_sconfig);
68 	if (ret)
69 		goto out;
70 
71 	dma_desc = dmaengine_prep_slave_single(apb_dma_chan, apb_buffer_phys,
72 			sizeof(u32), DMA_DEV_TO_MEM,
73 			DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
74 	if (!dma_desc)
75 		goto out;
76 
77 	dma_desc->callback = apb_dma_complete;
78 	dma_desc->callback_param = NULL;
79 
80 	reinit_completion(&apb_dma_wait);
81 
82 	clk_prepare_enable(fuse_clk);
83 
84 	dmaengine_submit(dma_desc);
85 	dma_async_issue_pending(apb_dma_chan);
86 	time_left = wait_for_completion_timeout(&apb_dma_wait,
87 						msecs_to_jiffies(50));
88 
89 	if (WARN(time_left == 0, "apb read dma timed out"))
90 		dmaengine_terminate_all(apb_dma_chan);
91 	else
92 		val = *apb_buffer;
93 
94 	clk_disable_unprepare(fuse_clk);
95 out:
96 	mutex_unlock(&apb_dma_lock);
97 
98 	return val;
99 }
100 
101 static const struct of_device_id tegra20_fuse_of_match[] = {
102 	{ .compatible = "nvidia,tegra20-efuse" },
103 	{},
104 };
105 
106 static int apb_dma_init(void)
107 {
108 	dma_cap_mask_t mask;
109 
110 	dma_cap_zero(mask);
111 	dma_cap_set(DMA_SLAVE, mask);
112 	apb_dma_chan = dma_request_channel(mask, NULL, NULL);
113 	if (!apb_dma_chan)
114 		return -EPROBE_DEFER;
115 
116 	apb_buffer = dma_alloc_coherent(NULL, sizeof(u32), &apb_buffer_phys,
117 					GFP_KERNEL);
118 	if (!apb_buffer) {
119 		dma_release_channel(apb_dma_chan);
120 		return -ENOMEM;
121 	}
122 
123 	dma_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
124 	dma_sconfig.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
125 	dma_sconfig.src_maxburst = 1;
126 	dma_sconfig.dst_maxburst = 1;
127 
128 	return 0;
129 }
130 
131 static int tegra20_fuse_probe(struct platform_device *pdev)
132 {
133 	struct resource *res;
134 	int err;
135 
136 	fuse_clk = devm_clk_get(&pdev->dev, NULL);
137 	if (IS_ERR(fuse_clk)) {
138 		dev_err(&pdev->dev, "missing clock");
139 		return PTR_ERR(fuse_clk);
140 	}
141 
142 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
143 	if (!res)
144 		return -EINVAL;
145 	fuse_phys = res->start;
146 
147 	err = apb_dma_init();
148 	if (err)
149 		return err;
150 
151 	if (tegra_fuse_create_sysfs(&pdev->dev, FUSE_SIZE, tegra20_fuse_readl))
152 		return -ENODEV;
153 
154 	dev_dbg(&pdev->dev, "loaded\n");
155 
156 	return 0;
157 }
158 
159 static struct platform_driver tegra20_fuse_driver = {
160 	.probe = tegra20_fuse_probe,
161 	.driver = {
162 		.name = "tegra20_fuse",
163 		.of_match_table = tegra20_fuse_of_match,
164 	}
165 };
166 
167 static int __init tegra20_fuse_init(void)
168 {
169 	return platform_driver_register(&tegra20_fuse_driver);
170 }
171 postcore_initcall(tegra20_fuse_init);
172 
173 /* Early boot code. This code is called before the devices are created */
174 
175 u32 __init tegra20_fuse_early(const unsigned int offset)
176 {
177 	return readl_relaxed(fuse_base + FUSE_BEGIN + offset);
178 }
179 
180 bool __init tegra20_spare_fuse_early(int spare_bit)
181 {
182 	u32 offset = spare_bit * 4;
183 	bool value;
184 
185 	value = tegra20_fuse_early(offset + 0x100);
186 
187 	return value;
188 }
189 
190 static void __init tegra20_fuse_add_randomness(void)
191 {
192 	u32 randomness[7];
193 
194 	randomness[0] = tegra_sku_info.sku_id;
195 	randomness[1] = tegra_read_straps();
196 	randomness[2] = tegra_read_chipid();
197 	randomness[3] = tegra_sku_info.cpu_process_id << 16;
198 	randomness[3] |= tegra_sku_info.core_process_id;
199 	randomness[4] = tegra_sku_info.cpu_speedo_id << 16;
200 	randomness[4] |= tegra_sku_info.soc_speedo_id;
201 	randomness[5] = tegra20_fuse_early(FUSE_UID_LOW);
202 	randomness[6] = tegra20_fuse_early(FUSE_UID_HIGH);
203 
204 	add_device_randomness(randomness, sizeof(randomness));
205 }
206 
207 void __init tegra20_init_fuse_early(void)
208 {
209 	fuse_base = ioremap(TEGRA_FUSE_BASE, TEGRA_FUSE_SIZE);
210 
211 	tegra_init_revision();
212 	tegra20_init_speedo_data(&tegra_sku_info);
213 	tegra20_fuse_add_randomness();
214 
215 	iounmap(fuse_base);
216 }
217