1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/types.h>
3 #include <asm/byteorder.h>
4 #include <linux/bug.h>
5 #include <linux/errno.h>
6 #include <linux/kernel.h>
7 #include <linux/slab.h>
8 #include <linux/string.h>
9 #include <linux/vmalloc.h>
10 #include "qed.h"
11 #include "qed_hw.h"
12 #include "qed_mcp.h"
13 #include "qed_reg_addr.h"
14 
15 #define TLV_TYPE(p)     (p[0])
16 #define TLV_LENGTH(p)   (p[1])
17 #define TLV_FLAGS(p)    (p[3])
18 
19 #define QED_TLV_DATA_MAX (14)
20 struct qed_tlv_parsed_buf {
21 	/* To be filled with the address to set in Value field */
22 	void *p_val;
23 
24 	/* To be used internally in case the value has to be modified */
25 	u8 data[QED_TLV_DATA_MAX];
26 };
27 
28 static int qed_mfw_get_tlv_group(u8 tlv_type, u8 *tlv_group)
29 {
30 	switch (tlv_type) {
31 	case DRV_TLV_FEATURE_FLAGS:
32 	case DRV_TLV_LOCAL_ADMIN_ADDR:
33 	case DRV_TLV_ADDITIONAL_MAC_ADDR_1:
34 	case DRV_TLV_ADDITIONAL_MAC_ADDR_2:
35 	case DRV_TLV_OS_DRIVER_STATES:
36 	case DRV_TLV_PXE_BOOT_PROGRESS:
37 	case DRV_TLV_RX_FRAMES_RECEIVED:
38 	case DRV_TLV_RX_BYTES_RECEIVED:
39 	case DRV_TLV_TX_FRAMES_SENT:
40 	case DRV_TLV_TX_BYTES_SENT:
41 	case DRV_TLV_NPIV_ENABLED:
42 	case DRV_TLV_PCIE_BUS_RX_UTILIZATION:
43 	case DRV_TLV_PCIE_BUS_TX_UTILIZATION:
44 	case DRV_TLV_DEVICE_CPU_CORES_UTILIZATION:
45 	case DRV_TLV_LAST_VALID_DCC_TLV_RECEIVED:
46 	case DRV_TLV_NCSI_RX_BYTES_RECEIVED:
47 	case DRV_TLV_NCSI_TX_BYTES_SENT:
48 		*tlv_group |= QED_MFW_TLV_GENERIC;
49 		break;
50 	case DRV_TLV_LSO_MAX_OFFLOAD_SIZE:
51 	case DRV_TLV_LSO_MIN_SEGMENT_COUNT:
52 	case DRV_TLV_PROMISCUOUS_MODE:
53 	case DRV_TLV_TX_DESCRIPTORS_QUEUE_SIZE:
54 	case DRV_TLV_RX_DESCRIPTORS_QUEUE_SIZE:
55 	case DRV_TLV_NUM_OF_NET_QUEUE_VMQ_CFG:
56 	case DRV_TLV_NUM_OFFLOADED_CONNECTIONS_TCP_IPV4:
57 	case DRV_TLV_NUM_OFFLOADED_CONNECTIONS_TCP_IPV6:
58 	case DRV_TLV_TX_DESCRIPTOR_QUEUE_AVG_DEPTH:
59 	case DRV_TLV_RX_DESCRIPTORS_QUEUE_AVG_DEPTH:
60 	case DRV_TLV_IOV_OFFLOAD:
61 	case DRV_TLV_TX_QUEUES_EMPTY:
62 	case DRV_TLV_RX_QUEUES_EMPTY:
63 	case DRV_TLV_TX_QUEUES_FULL:
64 	case DRV_TLV_RX_QUEUES_FULL:
65 		*tlv_group |= QED_MFW_TLV_ETH;
66 		break;
67 	default:
68 		return -EINVAL;
69 	}
70 
71 	return 0;
72 }
73 
74 /* Returns size of the data buffer or, -1 in case TLV data is not available. */
75 static int
76 qed_mfw_get_gen_tlv_value(struct qed_drv_tlv_hdr *p_tlv,
77 			  struct qed_mfw_tlv_generic *p_drv_buf,
78 			  struct qed_tlv_parsed_buf *p_buf)
79 {
80 	switch (p_tlv->tlv_type) {
81 	case DRV_TLV_FEATURE_FLAGS:
82 		if (p_drv_buf->flags.b_set) {
83 			memset(p_buf->data, 0, sizeof(u8) * QED_TLV_DATA_MAX);
84 			p_buf->data[0] = p_drv_buf->flags.ipv4_csum_offload ?
85 			    1 : 0;
86 			p_buf->data[0] |= (p_drv_buf->flags.lso_supported ?
87 					   1 : 0) << 1;
88 			p_buf->p_val = p_buf->data;
89 			return QED_MFW_TLV_FLAGS_SIZE;
90 		}
91 		break;
92 
93 	case DRV_TLV_LOCAL_ADMIN_ADDR:
94 	case DRV_TLV_ADDITIONAL_MAC_ADDR_1:
95 	case DRV_TLV_ADDITIONAL_MAC_ADDR_2:
96 		{
97 			int idx = p_tlv->tlv_type - DRV_TLV_LOCAL_ADMIN_ADDR;
98 
99 			if (p_drv_buf->mac_set[idx]) {
100 				p_buf->p_val = p_drv_buf->mac[idx];
101 				return ETH_ALEN;
102 			}
103 			break;
104 		}
105 
106 	case DRV_TLV_RX_FRAMES_RECEIVED:
107 		if (p_drv_buf->rx_frames_set) {
108 			p_buf->p_val = &p_drv_buf->rx_frames;
109 			return sizeof(p_drv_buf->rx_frames);
110 		}
111 		break;
112 	case DRV_TLV_RX_BYTES_RECEIVED:
113 		if (p_drv_buf->rx_bytes_set) {
114 			p_buf->p_val = &p_drv_buf->rx_bytes;
115 			return sizeof(p_drv_buf->rx_bytes);
116 		}
117 		break;
118 	case DRV_TLV_TX_FRAMES_SENT:
119 		if (p_drv_buf->tx_frames_set) {
120 			p_buf->p_val = &p_drv_buf->tx_frames;
121 			return sizeof(p_drv_buf->tx_frames);
122 		}
123 		break;
124 	case DRV_TLV_TX_BYTES_SENT:
125 		if (p_drv_buf->tx_bytes_set) {
126 			p_buf->p_val = &p_drv_buf->tx_bytes;
127 			return sizeof(p_drv_buf->tx_bytes);
128 		}
129 		break;
130 	default:
131 		break;
132 	}
133 
134 	return -1;
135 }
136 
137 static int
138 qed_mfw_get_eth_tlv_value(struct qed_drv_tlv_hdr *p_tlv,
139 			  struct qed_mfw_tlv_eth *p_drv_buf,
140 			  struct qed_tlv_parsed_buf *p_buf)
141 {
142 	switch (p_tlv->tlv_type) {
143 	case DRV_TLV_LSO_MAX_OFFLOAD_SIZE:
144 		if (p_drv_buf->lso_maxoff_size_set) {
145 			p_buf->p_val = &p_drv_buf->lso_maxoff_size;
146 			return sizeof(p_drv_buf->lso_maxoff_size);
147 		}
148 		break;
149 	case DRV_TLV_LSO_MIN_SEGMENT_COUNT:
150 		if (p_drv_buf->lso_minseg_size_set) {
151 			p_buf->p_val = &p_drv_buf->lso_minseg_size;
152 			return sizeof(p_drv_buf->lso_minseg_size);
153 		}
154 		break;
155 	case DRV_TLV_PROMISCUOUS_MODE:
156 		if (p_drv_buf->prom_mode_set) {
157 			p_buf->p_val = &p_drv_buf->prom_mode;
158 			return sizeof(p_drv_buf->prom_mode);
159 		}
160 		break;
161 	case DRV_TLV_TX_DESCRIPTORS_QUEUE_SIZE:
162 		if (p_drv_buf->tx_descr_size_set) {
163 			p_buf->p_val = &p_drv_buf->tx_descr_size;
164 			return sizeof(p_drv_buf->tx_descr_size);
165 		}
166 		break;
167 	case DRV_TLV_RX_DESCRIPTORS_QUEUE_SIZE:
168 		if (p_drv_buf->rx_descr_size_set) {
169 			p_buf->p_val = &p_drv_buf->rx_descr_size;
170 			return sizeof(p_drv_buf->rx_descr_size);
171 		}
172 		break;
173 	case DRV_TLV_NUM_OF_NET_QUEUE_VMQ_CFG:
174 		if (p_drv_buf->netq_count_set) {
175 			p_buf->p_val = &p_drv_buf->netq_count;
176 			return sizeof(p_drv_buf->netq_count);
177 		}
178 		break;
179 	case DRV_TLV_NUM_OFFLOADED_CONNECTIONS_TCP_IPV4:
180 		if (p_drv_buf->tcp4_offloads_set) {
181 			p_buf->p_val = &p_drv_buf->tcp4_offloads;
182 			return sizeof(p_drv_buf->tcp4_offloads);
183 		}
184 		break;
185 	case DRV_TLV_NUM_OFFLOADED_CONNECTIONS_TCP_IPV6:
186 		if (p_drv_buf->tcp6_offloads_set) {
187 			p_buf->p_val = &p_drv_buf->tcp6_offloads;
188 			return sizeof(p_drv_buf->tcp6_offloads);
189 		}
190 		break;
191 	case DRV_TLV_TX_DESCRIPTOR_QUEUE_AVG_DEPTH:
192 		if (p_drv_buf->tx_descr_qdepth_set) {
193 			p_buf->p_val = &p_drv_buf->tx_descr_qdepth;
194 			return sizeof(p_drv_buf->tx_descr_qdepth);
195 		}
196 		break;
197 	case DRV_TLV_RX_DESCRIPTORS_QUEUE_AVG_DEPTH:
198 		if (p_drv_buf->rx_descr_qdepth_set) {
199 			p_buf->p_val = &p_drv_buf->rx_descr_qdepth;
200 			return sizeof(p_drv_buf->rx_descr_qdepth);
201 		}
202 		break;
203 	case DRV_TLV_IOV_OFFLOAD:
204 		if (p_drv_buf->iov_offload_set) {
205 			p_buf->p_val = &p_drv_buf->iov_offload;
206 			return sizeof(p_drv_buf->iov_offload);
207 		}
208 		break;
209 	case DRV_TLV_TX_QUEUES_EMPTY:
210 		if (p_drv_buf->txqs_empty_set) {
211 			p_buf->p_val = &p_drv_buf->txqs_empty;
212 			return sizeof(p_drv_buf->txqs_empty);
213 		}
214 		break;
215 	case DRV_TLV_RX_QUEUES_EMPTY:
216 		if (p_drv_buf->rxqs_empty_set) {
217 			p_buf->p_val = &p_drv_buf->rxqs_empty;
218 			return sizeof(p_drv_buf->rxqs_empty);
219 		}
220 		break;
221 	case DRV_TLV_TX_QUEUES_FULL:
222 		if (p_drv_buf->num_txqs_full_set) {
223 			p_buf->p_val = &p_drv_buf->num_txqs_full;
224 			return sizeof(p_drv_buf->num_txqs_full);
225 		}
226 		break;
227 	case DRV_TLV_RX_QUEUES_FULL:
228 		if (p_drv_buf->num_rxqs_full_set) {
229 			p_buf->p_val = &p_drv_buf->num_rxqs_full;
230 			return sizeof(p_drv_buf->num_rxqs_full);
231 		}
232 		break;
233 	default:
234 		break;
235 	}
236 
237 	return -1;
238 }
239 
240 static int qed_mfw_update_tlvs(struct qed_hwfn *p_hwfn,
241 			       u8 tlv_group, u8 *p_mfw_buf, u32 size)
242 {
243 	union qed_mfw_tlv_data *p_tlv_data;
244 	struct qed_tlv_parsed_buf buffer;
245 	struct qed_drv_tlv_hdr tlv;
246 	int len = 0;
247 	u32 offset;
248 	u8 *p_tlv;
249 
250 	p_tlv_data = vzalloc(sizeof(*p_tlv_data));
251 	if (!p_tlv_data)
252 		return -ENOMEM;
253 
254 	if (qed_mfw_fill_tlv_data(p_hwfn, tlv_group, p_tlv_data)) {
255 		vfree(p_tlv_data);
256 		return -EINVAL;
257 	}
258 
259 	memset(&tlv, 0, sizeof(tlv));
260 	for (offset = 0; offset < size;
261 	     offset += sizeof(tlv) + sizeof(u32) * tlv.tlv_length) {
262 		p_tlv = &p_mfw_buf[offset];
263 		tlv.tlv_type = TLV_TYPE(p_tlv);
264 		tlv.tlv_length = TLV_LENGTH(p_tlv);
265 		tlv.tlv_flags = TLV_FLAGS(p_tlv);
266 
267 		DP_VERBOSE(p_hwfn, QED_MSG_SP,
268 			   "Type %d length = %d flags = 0x%x\n", tlv.tlv_type,
269 			   tlv.tlv_length, tlv.tlv_flags);
270 
271 		if (tlv_group == QED_MFW_TLV_GENERIC)
272 			len = qed_mfw_get_gen_tlv_value(&tlv,
273 							&p_tlv_data->generic,
274 							&buffer);
275 		else if (tlv_group == QED_MFW_TLV_ETH)
276 			len = qed_mfw_get_eth_tlv_value(&tlv,
277 							&p_tlv_data->eth,
278 							&buffer);
279 
280 		if (len > 0) {
281 			WARN(len > 4 * tlv.tlv_length,
282 			     "Incorrect MFW TLV length %d, it shouldn't be greater than %d\n",
283 			     len, 4 * tlv.tlv_length);
284 			len = min_t(int, len, 4 * tlv.tlv_length);
285 			tlv.tlv_flags |= QED_DRV_TLV_FLAGS_CHANGED;
286 			TLV_FLAGS(p_tlv) = tlv.tlv_flags;
287 			memcpy(p_mfw_buf + offset + sizeof(tlv),
288 			       buffer.p_val, len);
289 		}
290 	}
291 
292 	vfree(p_tlv_data);
293 
294 	return 0;
295 }
296 
297 int qed_mfw_process_tlv_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
298 {
299 	u32 addr, size, offset, resp, param, val, global_offsize, global_addr;
300 	u8 tlv_group = 0, id, *p_mfw_buf = NULL, *p_temp;
301 	struct qed_drv_tlv_hdr tlv;
302 	int rc;
303 
304 	addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
305 				    PUBLIC_GLOBAL);
306 	global_offsize = qed_rd(p_hwfn, p_ptt, addr);
307 	global_addr = SECTION_ADDR(global_offsize, 0);
308 	addr = global_addr + offsetof(struct public_global, data_ptr);
309 	addr = qed_rd(p_hwfn, p_ptt, addr);
310 	size = qed_rd(p_hwfn, p_ptt, global_addr +
311 		      offsetof(struct public_global, data_size));
312 
313 	if (!size) {
314 		DP_NOTICE(p_hwfn, "Invalid TLV req size = %d\n", size);
315 		goto drv_done;
316 	}
317 
318 	p_mfw_buf = vzalloc(size);
319 	if (!p_mfw_buf) {
320 		DP_NOTICE(p_hwfn, "Failed allocate memory for p_mfw_buf\n");
321 		goto drv_done;
322 	}
323 
324 	/* Read the TLV request to local buffer. MFW represents the TLV in
325 	 * little endian format and mcp returns it bigendian format. Hence
326 	 * driver need to convert data to little endian first and then do the
327 	 * memcpy (casting) to preserve the MFW TLV format in the driver buffer.
328 	 *
329 	 */
330 	for (offset = 0; offset < size; offset += sizeof(u32)) {
331 		val = qed_rd(p_hwfn, p_ptt, addr + offset);
332 		val = be32_to_cpu(val);
333 		memcpy(&p_mfw_buf[offset], &val, sizeof(u32));
334 	}
335 
336 	/* Parse the headers to enumerate the requested TLV groups */
337 	for (offset = 0; offset < size;
338 	     offset += sizeof(tlv) + sizeof(u32) * tlv.tlv_length) {
339 		p_temp = &p_mfw_buf[offset];
340 		tlv.tlv_type = TLV_TYPE(p_temp);
341 		tlv.tlv_length = TLV_LENGTH(p_temp);
342 		if (qed_mfw_get_tlv_group(tlv.tlv_type, &tlv_group))
343 			DP_VERBOSE(p_hwfn, NETIF_MSG_DRV,
344 				   "Un recognized TLV %d\n", tlv.tlv_type);
345 	}
346 
347 	/* Sanitize the TLV groups according to personality */
348 	if ((tlv_group & QED_MFW_TLV_ETH) && !QED_IS_L2_PERSONALITY(p_hwfn)) {
349 		DP_VERBOSE(p_hwfn, QED_MSG_SP,
350 			   "Skipping L2 TLVs for non-L2 function\n");
351 		tlv_group &= ~QED_MFW_TLV_ETH;
352 	}
353 
354 	/* Update the TLV values in the local buffer */
355 	for (id = QED_MFW_TLV_GENERIC; id < QED_MFW_TLV_MAX; id <<= 1) {
356 		if (tlv_group & id)
357 			if (qed_mfw_update_tlvs(p_hwfn, id, p_mfw_buf, size))
358 				goto drv_done;
359 	}
360 
361 	/* Write the TLV data to shared memory. The stream of 4 bytes first need
362 	 * to be mem-copied to u32 element to make it as LSB format. And then
363 	 * converted to big endian as required by mcp-write.
364 	 */
365 	for (offset = 0; offset < size; offset += sizeof(u32)) {
366 		memcpy(&val, &p_mfw_buf[offset], sizeof(u32));
367 		val = cpu_to_be32(val);
368 		qed_wr(p_hwfn, p_ptt, addr + offset, val);
369 	}
370 
371 drv_done:
372 	rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GET_TLV_DONE, 0, &resp,
373 			 &param);
374 
375 	vfree(p_mfw_buf);
376 
377 	return rc;
378 }
379