1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2015-2018 Etnaviv Project
4  */
5 
6 #include <linux/devcoredump.h>
7 #include "etnaviv_cmdbuf.h"
8 #include "etnaviv_dump.h"
9 #include "etnaviv_gem.h"
10 #include "etnaviv_gpu.h"
11 #include "etnaviv_mmu.h"
12 #include "etnaviv_sched.h"
13 #include "state.xml.h"
14 #include "state_hi.xml.h"
15 
16 static bool etnaviv_dump_core = true;
17 module_param_named(dump_core, etnaviv_dump_core, bool, 0600);
18 
19 struct core_dump_iterator {
20 	void *start;
21 	struct etnaviv_dump_object_header *hdr;
22 	void *data;
23 };
24 
25 static const unsigned short etnaviv_dump_registers[] = {
26 	VIVS_HI_AXI_STATUS,
27 	VIVS_HI_CLOCK_CONTROL,
28 	VIVS_HI_IDLE_STATE,
29 	VIVS_HI_AXI_CONFIG,
30 	VIVS_HI_INTR_ENBL,
31 	VIVS_HI_CHIP_IDENTITY,
32 	VIVS_HI_CHIP_FEATURE,
33 	VIVS_HI_CHIP_MODEL,
34 	VIVS_HI_CHIP_REV,
35 	VIVS_HI_CHIP_DATE,
36 	VIVS_HI_CHIP_TIME,
37 	VIVS_HI_CHIP_MINOR_FEATURE_0,
38 	VIVS_HI_CACHE_CONTROL,
39 	VIVS_HI_AXI_CONTROL,
40 	VIVS_PM_POWER_CONTROLS,
41 	VIVS_PM_MODULE_CONTROLS,
42 	VIVS_PM_MODULE_STATUS,
43 	VIVS_PM_PULSE_EATER,
44 	VIVS_MC_MMU_FE_PAGE_TABLE,
45 	VIVS_MC_MMU_TX_PAGE_TABLE,
46 	VIVS_MC_MMU_PE_PAGE_TABLE,
47 	VIVS_MC_MMU_PEZ_PAGE_TABLE,
48 	VIVS_MC_MMU_RA_PAGE_TABLE,
49 	VIVS_MC_DEBUG_MEMORY,
50 	VIVS_MC_MEMORY_BASE_ADDR_RA,
51 	VIVS_MC_MEMORY_BASE_ADDR_FE,
52 	VIVS_MC_MEMORY_BASE_ADDR_TX,
53 	VIVS_MC_MEMORY_BASE_ADDR_PEZ,
54 	VIVS_MC_MEMORY_BASE_ADDR_PE,
55 	VIVS_MC_MEMORY_TIMING_CONTROL,
56 	VIVS_MC_BUS_CONFIG,
57 	VIVS_FE_DMA_STATUS,
58 	VIVS_FE_DMA_DEBUG_STATE,
59 	VIVS_FE_DMA_ADDRESS,
60 	VIVS_FE_DMA_LOW,
61 	VIVS_FE_DMA_HIGH,
62 	VIVS_FE_AUTO_FLUSH,
63 };
64 
65 static void etnaviv_core_dump_header(struct core_dump_iterator *iter,
66 	u32 type, void *data_end)
67 {
68 	struct etnaviv_dump_object_header *hdr = iter->hdr;
69 
70 	hdr->magic = cpu_to_le32(ETDUMP_MAGIC);
71 	hdr->type = cpu_to_le32(type);
72 	hdr->file_offset = cpu_to_le32(iter->data - iter->start);
73 	hdr->file_size = cpu_to_le32(data_end - iter->data);
74 
75 	iter->hdr++;
76 	iter->data += hdr->file_size;
77 }
78 
79 static void etnaviv_core_dump_registers(struct core_dump_iterator *iter,
80 	struct etnaviv_gpu *gpu)
81 {
82 	struct etnaviv_dump_registers *reg = iter->data;
83 	unsigned int i;
84 
85 	for (i = 0; i < ARRAY_SIZE(etnaviv_dump_registers); i++, reg++) {
86 		reg->reg = etnaviv_dump_registers[i];
87 		reg->value = gpu_read(gpu, etnaviv_dump_registers[i]);
88 	}
89 
90 	etnaviv_core_dump_header(iter, ETDUMP_BUF_REG, reg);
91 }
92 
93 static void etnaviv_core_dump_mmu(struct core_dump_iterator *iter,
94 	struct etnaviv_gpu *gpu, size_t mmu_size)
95 {
96 	etnaviv_iommu_dump(gpu->mmu, iter->data);
97 
98 	etnaviv_core_dump_header(iter, ETDUMP_BUF_MMU, iter->data + mmu_size);
99 }
100 
101 static void etnaviv_core_dump_mem(struct core_dump_iterator *iter, u32 type,
102 	void *ptr, size_t size, u64 iova)
103 {
104 	memcpy(iter->data, ptr, size);
105 
106 	iter->hdr->iova = cpu_to_le64(iova);
107 
108 	etnaviv_core_dump_header(iter, type, iter->data + size);
109 }
110 
111 void etnaviv_core_dump(struct etnaviv_gpu *gpu)
112 {
113 	struct core_dump_iterator iter;
114 	struct etnaviv_vram_mapping *vram;
115 	struct etnaviv_gem_object *obj;
116 	struct etnaviv_gem_submit *submit;
117 	struct drm_sched_job *s_job;
118 	unsigned int n_obj, n_bomap_pages;
119 	size_t file_size, mmu_size;
120 	__le64 *bomap, *bomap_start;
121 
122 	/* Only catch the first event, or when manually re-armed */
123 	if (!etnaviv_dump_core)
124 		return;
125 	etnaviv_dump_core = false;
126 
127 	mmu_size = etnaviv_iommu_dump_size(gpu->mmu);
128 
129 	/* We always dump registers, mmu, ring and end marker */
130 	n_obj = 4;
131 	n_bomap_pages = 0;
132 	file_size = ARRAY_SIZE(etnaviv_dump_registers) *
133 			sizeof(struct etnaviv_dump_registers) +
134 		    mmu_size + gpu->buffer.size;
135 
136 	/* Add in the active command buffers */
137 	spin_lock(&gpu->sched.job_list_lock);
138 	list_for_each_entry(s_job, &gpu->sched.ring_mirror_list, node) {
139 		submit = to_etnaviv_submit(s_job);
140 		file_size += submit->cmdbuf.size;
141 		n_obj++;
142 	}
143 	spin_unlock(&gpu->sched.job_list_lock);
144 
145 	/* Add in the active buffer objects */
146 	list_for_each_entry(vram, &gpu->mmu->mappings, mmu_node) {
147 		if (!vram->use)
148 			continue;
149 
150 		obj = vram->object;
151 		file_size += obj->base.size;
152 		n_bomap_pages += obj->base.size >> PAGE_SHIFT;
153 		n_obj++;
154 	}
155 
156 	/* If we have any buffer objects, add a bomap object */
157 	if (n_bomap_pages) {
158 		file_size += n_bomap_pages * sizeof(__le64);
159 		n_obj++;
160 	}
161 
162 	/* Add the size of the headers */
163 	file_size += sizeof(*iter.hdr) * n_obj;
164 
165 	/* Allocate the file in vmalloc memory, it's likely to be big */
166 	iter.start = __vmalloc(file_size, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY,
167 			       PAGE_KERNEL);
168 	if (!iter.start) {
169 		dev_warn(gpu->dev, "failed to allocate devcoredump file\n");
170 		return;
171 	}
172 
173 	/* Point the data member after the headers */
174 	iter.hdr = iter.start;
175 	iter.data = &iter.hdr[n_obj];
176 
177 	memset(iter.hdr, 0, iter.data - iter.start);
178 
179 	etnaviv_core_dump_registers(&iter, gpu);
180 	etnaviv_core_dump_mmu(&iter, gpu, mmu_size);
181 	etnaviv_core_dump_mem(&iter, ETDUMP_BUF_RING, gpu->buffer.vaddr,
182 			      gpu->buffer.size,
183 			      etnaviv_cmdbuf_get_va(&gpu->buffer));
184 
185 	spin_lock(&gpu->sched.job_list_lock);
186 	list_for_each_entry(s_job, &gpu->sched.ring_mirror_list, node) {
187 		submit = to_etnaviv_submit(s_job);
188 		etnaviv_core_dump_mem(&iter, ETDUMP_BUF_CMD,
189 				      submit->cmdbuf.vaddr, submit->cmdbuf.size,
190 				      etnaviv_cmdbuf_get_va(&submit->cmdbuf));
191 	}
192 	spin_unlock(&gpu->sched.job_list_lock);
193 
194 	/* Reserve space for the bomap */
195 	if (n_bomap_pages) {
196 		bomap_start = bomap = iter.data;
197 		memset(bomap, 0, sizeof(*bomap) * n_bomap_pages);
198 		etnaviv_core_dump_header(&iter, ETDUMP_BUF_BOMAP,
199 					 bomap + n_bomap_pages);
200 	} else {
201 		/* Silence warning */
202 		bomap_start = bomap = NULL;
203 	}
204 
205 	list_for_each_entry(vram, &gpu->mmu->mappings, mmu_node) {
206 		struct page **pages;
207 		void *vaddr;
208 
209 		if (vram->use == 0)
210 			continue;
211 
212 		obj = vram->object;
213 
214 		mutex_lock(&obj->lock);
215 		pages = etnaviv_gem_get_pages(obj);
216 		mutex_unlock(&obj->lock);
217 		if (pages) {
218 			int j;
219 
220 			iter.hdr->data[0] = bomap - bomap_start;
221 
222 			for (j = 0; j < obj->base.size >> PAGE_SHIFT; j++)
223 				*bomap++ = cpu_to_le64(page_to_phys(*pages++));
224 		}
225 
226 		iter.hdr->iova = cpu_to_le64(vram->iova);
227 
228 		vaddr = etnaviv_gem_vmap(&obj->base);
229 		if (vaddr)
230 			memcpy(iter.data, vaddr, obj->base.size);
231 
232 		etnaviv_core_dump_header(&iter, ETDUMP_BUF_BO, iter.data +
233 					 obj->base.size);
234 	}
235 
236 	etnaviv_core_dump_header(&iter, ETDUMP_BUF_END, iter.data);
237 
238 	dev_coredumpv(gpu->dev, iter.start, iter.data - iter.start, GFP_KERNEL);
239 }
240