1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2020, Intel Corporation. */
3 
4 #include <linux/vmalloc.h>
5 
6 #include "ice.h"
7 #include "ice_lib.h"
8 #include "ice_devlink.h"
9 #include "ice_eswitch.h"
10 #include "ice_fw_update.h"
11 
12 /* context for devlink info version reporting */
13 struct ice_info_ctx {
14 	char buf[128];
15 	struct ice_orom_info pending_orom;
16 	struct ice_nvm_info pending_nvm;
17 	struct ice_netlist_info pending_netlist;
18 	struct ice_hw_dev_caps dev_caps;
19 };
20 
21 /* The following functions are used to format specific strings for various
22  * devlink info versions. The ctx parameter is used to provide the storage
23  * buffer, as well as any ancillary information calculated when the info
24  * request was made.
25  *
26  * If a version does not exist, for example when attempting to get the
27  * inactive version of flash when there is no pending update, the function
28  * should leave the buffer in the ctx structure empty.
29  */
30 
31 static void ice_info_get_dsn(struct ice_pf *pf, struct ice_info_ctx *ctx)
32 {
33 	u8 dsn[8];
34 
35 	/* Copy the DSN into an array in Big Endian format */
36 	put_unaligned_be64(pci_get_dsn(pf->pdev), dsn);
37 
38 	snprintf(ctx->buf, sizeof(ctx->buf), "%8phD", dsn);
39 }
40 
41 static void ice_info_pba(struct ice_pf *pf, struct ice_info_ctx *ctx)
42 {
43 	struct ice_hw *hw = &pf->hw;
44 	int status;
45 
46 	status = ice_read_pba_string(hw, (u8 *)ctx->buf, sizeof(ctx->buf));
47 	if (status)
48 		/* We failed to locate the PBA, so just skip this entry */
49 		dev_dbg(ice_pf_to_dev(pf), "Failed to read Product Board Assembly string, status %d\n",
50 			status);
51 }
52 
53 static void ice_info_fw_mgmt(struct ice_pf *pf, struct ice_info_ctx *ctx)
54 {
55 	struct ice_hw *hw = &pf->hw;
56 
57 	snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u.%u",
58 		 hw->fw_maj_ver, hw->fw_min_ver, hw->fw_patch);
59 }
60 
61 static void ice_info_fw_api(struct ice_pf *pf, struct ice_info_ctx *ctx)
62 {
63 	struct ice_hw *hw = &pf->hw;
64 
65 	snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u.%u", hw->api_maj_ver,
66 		 hw->api_min_ver, hw->api_patch);
67 }
68 
69 static void ice_info_fw_build(struct ice_pf *pf, struct ice_info_ctx *ctx)
70 {
71 	struct ice_hw *hw = &pf->hw;
72 
73 	snprintf(ctx->buf, sizeof(ctx->buf), "0x%08x", hw->fw_build);
74 }
75 
76 static void ice_info_orom_ver(struct ice_pf *pf, struct ice_info_ctx *ctx)
77 {
78 	struct ice_orom_info *orom = &pf->hw.flash.orom;
79 
80 	snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u.%u",
81 		 orom->major, orom->build, orom->patch);
82 }
83 
84 static void
85 ice_info_pending_orom_ver(struct ice_pf __always_unused *pf,
86 			  struct ice_info_ctx *ctx)
87 {
88 	struct ice_orom_info *orom = &ctx->pending_orom;
89 
90 	if (ctx->dev_caps.common_cap.nvm_update_pending_orom)
91 		snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u.%u",
92 			 orom->major, orom->build, orom->patch);
93 }
94 
95 static void ice_info_nvm_ver(struct ice_pf *pf, struct ice_info_ctx *ctx)
96 {
97 	struct ice_nvm_info *nvm = &pf->hw.flash.nvm;
98 
99 	snprintf(ctx->buf, sizeof(ctx->buf), "%x.%02x", nvm->major, nvm->minor);
100 }
101 
102 static void
103 ice_info_pending_nvm_ver(struct ice_pf __always_unused *pf,
104 			 struct ice_info_ctx *ctx)
105 {
106 	struct ice_nvm_info *nvm = &ctx->pending_nvm;
107 
108 	if (ctx->dev_caps.common_cap.nvm_update_pending_nvm)
109 		snprintf(ctx->buf, sizeof(ctx->buf), "%x.%02x",
110 			 nvm->major, nvm->minor);
111 }
112 
113 static void ice_info_eetrack(struct ice_pf *pf, struct ice_info_ctx *ctx)
114 {
115 	struct ice_nvm_info *nvm = &pf->hw.flash.nvm;
116 
117 	snprintf(ctx->buf, sizeof(ctx->buf), "0x%08x", nvm->eetrack);
118 }
119 
120 static void
121 ice_info_pending_eetrack(struct ice_pf *pf, struct ice_info_ctx *ctx)
122 {
123 	struct ice_nvm_info *nvm = &ctx->pending_nvm;
124 
125 	if (ctx->dev_caps.common_cap.nvm_update_pending_nvm)
126 		snprintf(ctx->buf, sizeof(ctx->buf), "0x%08x", nvm->eetrack);
127 }
128 
129 static void ice_info_ddp_pkg_name(struct ice_pf *pf, struct ice_info_ctx *ctx)
130 {
131 	struct ice_hw *hw = &pf->hw;
132 
133 	snprintf(ctx->buf, sizeof(ctx->buf), "%s", hw->active_pkg_name);
134 }
135 
136 static void
137 ice_info_ddp_pkg_version(struct ice_pf *pf, struct ice_info_ctx *ctx)
138 {
139 	struct ice_pkg_ver *pkg = &pf->hw.active_pkg_ver;
140 
141 	snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u.%u.%u",
142 		 pkg->major, pkg->minor, pkg->update, pkg->draft);
143 }
144 
145 static void
146 ice_info_ddp_pkg_bundle_id(struct ice_pf *pf, struct ice_info_ctx *ctx)
147 {
148 	snprintf(ctx->buf, sizeof(ctx->buf), "0x%08x", pf->hw.active_track_id);
149 }
150 
151 static void ice_info_netlist_ver(struct ice_pf *pf, struct ice_info_ctx *ctx)
152 {
153 	struct ice_netlist_info *netlist = &pf->hw.flash.netlist;
154 
155 	/* The netlist version fields are BCD formatted */
156 	snprintf(ctx->buf, sizeof(ctx->buf), "%x.%x.%x-%x.%x.%x",
157 		 netlist->major, netlist->minor,
158 		 netlist->type >> 16, netlist->type & 0xFFFF,
159 		 netlist->rev, netlist->cust_ver);
160 }
161 
162 static void ice_info_netlist_build(struct ice_pf *pf, struct ice_info_ctx *ctx)
163 {
164 	struct ice_netlist_info *netlist = &pf->hw.flash.netlist;
165 
166 	snprintf(ctx->buf, sizeof(ctx->buf), "0x%08x", netlist->hash);
167 }
168 
169 static void
170 ice_info_pending_netlist_ver(struct ice_pf __always_unused *pf,
171 			     struct ice_info_ctx *ctx)
172 {
173 	struct ice_netlist_info *netlist = &ctx->pending_netlist;
174 
175 	/* The netlist version fields are BCD formatted */
176 	if (ctx->dev_caps.common_cap.nvm_update_pending_netlist)
177 		snprintf(ctx->buf, sizeof(ctx->buf), "%x.%x.%x-%x.%x.%x",
178 			 netlist->major, netlist->minor,
179 			 netlist->type >> 16, netlist->type & 0xFFFF,
180 			 netlist->rev, netlist->cust_ver);
181 }
182 
183 static void
184 ice_info_pending_netlist_build(struct ice_pf __always_unused *pf,
185 			       struct ice_info_ctx *ctx)
186 {
187 	struct ice_netlist_info *netlist = &ctx->pending_netlist;
188 
189 	if (ctx->dev_caps.common_cap.nvm_update_pending_netlist)
190 		snprintf(ctx->buf, sizeof(ctx->buf), "0x%08x", netlist->hash);
191 }
192 
193 #define fixed(key, getter) { ICE_VERSION_FIXED, key, getter, NULL }
194 #define running(key, getter) { ICE_VERSION_RUNNING, key, getter, NULL }
195 #define stored(key, getter, fallback) { ICE_VERSION_STORED, key, getter, fallback }
196 
197 /* The combined() macro inserts both the running entry as well as a stored
198  * entry. The running entry will always report the version from the active
199  * handler. The stored entry will first try the pending handler, and fallback
200  * to the active handler if the pending function does not report a version.
201  * The pending handler should check the status of a pending update for the
202  * relevant flash component. It should only fill in the buffer in the case
203  * where a valid pending version is available. This ensures that the related
204  * stored and running versions remain in sync, and that stored versions are
205  * correctly reported as expected.
206  */
207 #define combined(key, active, pending) \
208 	running(key, active), \
209 	stored(key, pending, active)
210 
211 enum ice_version_type {
212 	ICE_VERSION_FIXED,
213 	ICE_VERSION_RUNNING,
214 	ICE_VERSION_STORED,
215 };
216 
217 static const struct ice_devlink_version {
218 	enum ice_version_type type;
219 	const char *key;
220 	void (*getter)(struct ice_pf *pf, struct ice_info_ctx *ctx);
221 	void (*fallback)(struct ice_pf *pf, struct ice_info_ctx *ctx);
222 } ice_devlink_versions[] = {
223 	fixed(DEVLINK_INFO_VERSION_GENERIC_BOARD_ID, ice_info_pba),
224 	running(DEVLINK_INFO_VERSION_GENERIC_FW_MGMT, ice_info_fw_mgmt),
225 	running("fw.mgmt.api", ice_info_fw_api),
226 	running("fw.mgmt.build", ice_info_fw_build),
227 	combined(DEVLINK_INFO_VERSION_GENERIC_FW_UNDI, ice_info_orom_ver, ice_info_pending_orom_ver),
228 	combined("fw.psid.api", ice_info_nvm_ver, ice_info_pending_nvm_ver),
229 	combined(DEVLINK_INFO_VERSION_GENERIC_FW_BUNDLE_ID, ice_info_eetrack, ice_info_pending_eetrack),
230 	running("fw.app.name", ice_info_ddp_pkg_name),
231 	running(DEVLINK_INFO_VERSION_GENERIC_FW_APP, ice_info_ddp_pkg_version),
232 	running("fw.app.bundle_id", ice_info_ddp_pkg_bundle_id),
233 	combined("fw.netlist", ice_info_netlist_ver, ice_info_pending_netlist_ver),
234 	combined("fw.netlist.build", ice_info_netlist_build, ice_info_pending_netlist_build),
235 };
236 
237 /**
238  * ice_devlink_info_get - .info_get devlink handler
239  * @devlink: devlink instance structure
240  * @req: the devlink info request
241  * @extack: extended netdev ack structure
242  *
243  * Callback for the devlink .info_get operation. Reports information about the
244  * device.
245  *
246  * Return: zero on success or an error code on failure.
247  */
248 static int ice_devlink_info_get(struct devlink *devlink,
249 				struct devlink_info_req *req,
250 				struct netlink_ext_ack *extack)
251 {
252 	struct ice_pf *pf = devlink_priv(devlink);
253 	struct device *dev = ice_pf_to_dev(pf);
254 	struct ice_hw *hw = &pf->hw;
255 	struct ice_info_ctx *ctx;
256 	size_t i;
257 	int err;
258 
259 	err = ice_wait_for_reset(pf, 10 * HZ);
260 	if (err) {
261 		NL_SET_ERR_MSG_MOD(extack, "Device is busy resetting");
262 		return err;
263 	}
264 
265 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
266 	if (!ctx)
267 		return -ENOMEM;
268 
269 	/* discover capabilities first */
270 	err = ice_discover_dev_caps(hw, &ctx->dev_caps);
271 	if (err) {
272 		dev_dbg(dev, "Failed to discover device capabilities, status %d aq_err %s\n",
273 			err, ice_aq_str(hw->adminq.sq_last_status));
274 		NL_SET_ERR_MSG_MOD(extack, "Unable to discover device capabilities");
275 		goto out_free_ctx;
276 	}
277 
278 	if (ctx->dev_caps.common_cap.nvm_update_pending_orom) {
279 		err = ice_get_inactive_orom_ver(hw, &ctx->pending_orom);
280 		if (err) {
281 			dev_dbg(dev, "Unable to read inactive Option ROM version data, status %d aq_err %s\n",
282 				err, ice_aq_str(hw->adminq.sq_last_status));
283 
284 			/* disable display of pending Option ROM */
285 			ctx->dev_caps.common_cap.nvm_update_pending_orom = false;
286 		}
287 	}
288 
289 	if (ctx->dev_caps.common_cap.nvm_update_pending_nvm) {
290 		err = ice_get_inactive_nvm_ver(hw, &ctx->pending_nvm);
291 		if (err) {
292 			dev_dbg(dev, "Unable to read inactive NVM version data, status %d aq_err %s\n",
293 				err, ice_aq_str(hw->adminq.sq_last_status));
294 
295 			/* disable display of pending Option ROM */
296 			ctx->dev_caps.common_cap.nvm_update_pending_nvm = false;
297 		}
298 	}
299 
300 	if (ctx->dev_caps.common_cap.nvm_update_pending_netlist) {
301 		err = ice_get_inactive_netlist_ver(hw, &ctx->pending_netlist);
302 		if (err) {
303 			dev_dbg(dev, "Unable to read inactive Netlist version data, status %d aq_err %s\n",
304 				err, ice_aq_str(hw->adminq.sq_last_status));
305 
306 			/* disable display of pending Option ROM */
307 			ctx->dev_caps.common_cap.nvm_update_pending_netlist = false;
308 		}
309 	}
310 
311 	err = devlink_info_driver_name_put(req, KBUILD_MODNAME);
312 	if (err) {
313 		NL_SET_ERR_MSG_MOD(extack, "Unable to set driver name");
314 		goto out_free_ctx;
315 	}
316 
317 	ice_info_get_dsn(pf, ctx);
318 
319 	err = devlink_info_serial_number_put(req, ctx->buf);
320 	if (err) {
321 		NL_SET_ERR_MSG_MOD(extack, "Unable to set serial number");
322 		goto out_free_ctx;
323 	}
324 
325 	for (i = 0; i < ARRAY_SIZE(ice_devlink_versions); i++) {
326 		enum ice_version_type type = ice_devlink_versions[i].type;
327 		const char *key = ice_devlink_versions[i].key;
328 
329 		memset(ctx->buf, 0, sizeof(ctx->buf));
330 
331 		ice_devlink_versions[i].getter(pf, ctx);
332 
333 		/* If the default getter doesn't report a version, use the
334 		 * fallback function. This is primarily useful in the case of
335 		 * "stored" versions that want to report the same value as the
336 		 * running version in the normal case of no pending update.
337 		 */
338 		if (ctx->buf[0] == '\0' && ice_devlink_versions[i].fallback)
339 			ice_devlink_versions[i].fallback(pf, ctx);
340 
341 		/* Do not report missing versions */
342 		if (ctx->buf[0] == '\0')
343 			continue;
344 
345 		switch (type) {
346 		case ICE_VERSION_FIXED:
347 			err = devlink_info_version_fixed_put(req, key, ctx->buf);
348 			if (err) {
349 				NL_SET_ERR_MSG_MOD(extack, "Unable to set fixed version");
350 				goto out_free_ctx;
351 			}
352 			break;
353 		case ICE_VERSION_RUNNING:
354 			err = devlink_info_version_running_put(req, key, ctx->buf);
355 			if (err) {
356 				NL_SET_ERR_MSG_MOD(extack, "Unable to set running version");
357 				goto out_free_ctx;
358 			}
359 			break;
360 		case ICE_VERSION_STORED:
361 			err = devlink_info_version_stored_put(req, key, ctx->buf);
362 			if (err) {
363 				NL_SET_ERR_MSG_MOD(extack, "Unable to set stored version");
364 				goto out_free_ctx;
365 			}
366 			break;
367 		}
368 	}
369 
370 out_free_ctx:
371 	kfree(ctx);
372 	return err;
373 }
374 
375 /**
376  * ice_devlink_reload_empr_start - Start EMP reset to activate new firmware
377  * @devlink: pointer to the devlink instance to reload
378  * @netns_change: if true, the network namespace is changing
379  * @action: the action to perform. Must be DEVLINK_RELOAD_ACTION_FW_ACTIVATE
380  * @limit: limits on what reload should do, such as not resetting
381  * @extack: netlink extended ACK structure
382  *
383  * Allow user to activate new Embedded Management Processor firmware by
384  * issuing device specific EMP reset. Called in response to
385  * a DEVLINK_CMD_RELOAD with the DEVLINK_RELOAD_ACTION_FW_ACTIVATE.
386  *
387  * Note that teardown and rebuild of the driver state happens automatically as
388  * part of an interrupt and watchdog task. This is because all physical
389  * functions on the device must be able to reset when an EMP reset occurs from
390  * any source.
391  */
392 static int
393 ice_devlink_reload_empr_start(struct devlink *devlink, bool netns_change,
394 			      enum devlink_reload_action action,
395 			      enum devlink_reload_limit limit,
396 			      struct netlink_ext_ack *extack)
397 {
398 	struct ice_pf *pf = devlink_priv(devlink);
399 	struct device *dev = ice_pf_to_dev(pf);
400 	struct ice_hw *hw = &pf->hw;
401 	u8 pending;
402 	int err;
403 
404 	err = ice_get_pending_updates(pf, &pending, extack);
405 	if (err)
406 		return err;
407 
408 	/* pending is a bitmask of which flash banks have a pending update,
409 	 * including the main NVM bank, the Option ROM bank, and the netlist
410 	 * bank. If any of these bits are set, then there is a pending update
411 	 * waiting to be activated.
412 	 */
413 	if (!pending) {
414 		NL_SET_ERR_MSG_MOD(extack, "No pending firmware update");
415 		return -ECANCELED;
416 	}
417 
418 	if (pf->fw_emp_reset_disabled) {
419 		NL_SET_ERR_MSG_MOD(extack, "EMP reset is not available. To activate firmware, a reboot or power cycle is needed");
420 		return -ECANCELED;
421 	}
422 
423 	dev_dbg(dev, "Issuing device EMP reset to activate firmware\n");
424 
425 	err = ice_aq_nvm_update_empr(hw);
426 	if (err) {
427 		dev_err(dev, "Failed to trigger EMP device reset to reload firmware, err %d aq_err %s\n",
428 			err, ice_aq_str(hw->adminq.sq_last_status));
429 		NL_SET_ERR_MSG_MOD(extack, "Failed to trigger EMP device reset to reload firmware");
430 		return err;
431 	}
432 
433 	return 0;
434 }
435 
436 /**
437  * ice_devlink_reload_empr_finish - Wait for EMP reset to finish
438  * @devlink: pointer to the devlink instance reloading
439  * @action: the action requested
440  * @limit: limits imposed by userspace, such as not resetting
441  * @actions_performed: on return, indicate what actions actually performed
442  * @extack: netlink extended ACK structure
443  *
444  * Wait for driver to finish rebuilding after EMP reset is completed. This
445  * includes time to wait for both the actual device reset as well as the time
446  * for the driver's rebuild to complete.
447  */
448 static int
449 ice_devlink_reload_empr_finish(struct devlink *devlink,
450 			       enum devlink_reload_action action,
451 			       enum devlink_reload_limit limit,
452 			       u32 *actions_performed,
453 			       struct netlink_ext_ack *extack)
454 {
455 	struct ice_pf *pf = devlink_priv(devlink);
456 	int err;
457 
458 	*actions_performed = BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE);
459 
460 	err = ice_wait_for_reset(pf, 60 * HZ);
461 	if (err) {
462 		NL_SET_ERR_MSG_MOD(extack, "Device still resetting after 1 minute");
463 		return err;
464 	}
465 
466 	return 0;
467 }
468 
469 static const struct devlink_ops ice_devlink_ops = {
470 	.supported_flash_update_params = DEVLINK_SUPPORT_FLASH_UPDATE_OVERWRITE_MASK,
471 	.reload_actions = BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE),
472 	/* The ice driver currently does not support driver reinit */
473 	.reload_down = ice_devlink_reload_empr_start,
474 	.reload_up = ice_devlink_reload_empr_finish,
475 	.eswitch_mode_get = ice_eswitch_mode_get,
476 	.eswitch_mode_set = ice_eswitch_mode_set,
477 	.info_get = ice_devlink_info_get,
478 	.flash_update = ice_devlink_flash_update,
479 };
480 
481 static int
482 ice_devlink_enable_roce_get(struct devlink *devlink, u32 id,
483 			    struct devlink_param_gset_ctx *ctx)
484 {
485 	struct ice_pf *pf = devlink_priv(devlink);
486 
487 	ctx->val.vbool = pf->rdma_mode & IIDC_RDMA_PROTOCOL_ROCEV2 ? true : false;
488 
489 	return 0;
490 }
491 
492 static int
493 ice_devlink_enable_roce_set(struct devlink *devlink, u32 id,
494 			    struct devlink_param_gset_ctx *ctx)
495 {
496 	struct ice_pf *pf = devlink_priv(devlink);
497 	bool roce_ena = ctx->val.vbool;
498 	int ret;
499 
500 	if (!roce_ena) {
501 		ice_unplug_aux_dev(pf);
502 		pf->rdma_mode &= ~IIDC_RDMA_PROTOCOL_ROCEV2;
503 		return 0;
504 	}
505 
506 	pf->rdma_mode |= IIDC_RDMA_PROTOCOL_ROCEV2;
507 	ret = ice_plug_aux_dev(pf);
508 	if (ret)
509 		pf->rdma_mode &= ~IIDC_RDMA_PROTOCOL_ROCEV2;
510 
511 	return ret;
512 }
513 
514 static int
515 ice_devlink_enable_roce_validate(struct devlink *devlink, u32 id,
516 				 union devlink_param_value val,
517 				 struct netlink_ext_ack *extack)
518 {
519 	struct ice_pf *pf = devlink_priv(devlink);
520 
521 	if (!test_bit(ICE_FLAG_RDMA_ENA, pf->flags))
522 		return -EOPNOTSUPP;
523 
524 	if (pf->rdma_mode & IIDC_RDMA_PROTOCOL_IWARP) {
525 		NL_SET_ERR_MSG_MOD(extack, "iWARP is currently enabled. This device cannot enable iWARP and RoCEv2 simultaneously");
526 		return -EOPNOTSUPP;
527 	}
528 
529 	return 0;
530 }
531 
532 static int
533 ice_devlink_enable_iw_get(struct devlink *devlink, u32 id,
534 			  struct devlink_param_gset_ctx *ctx)
535 {
536 	struct ice_pf *pf = devlink_priv(devlink);
537 
538 	ctx->val.vbool = pf->rdma_mode & IIDC_RDMA_PROTOCOL_IWARP;
539 
540 	return 0;
541 }
542 
543 static int
544 ice_devlink_enable_iw_set(struct devlink *devlink, u32 id,
545 			  struct devlink_param_gset_ctx *ctx)
546 {
547 	struct ice_pf *pf = devlink_priv(devlink);
548 	bool iw_ena = ctx->val.vbool;
549 	int ret;
550 
551 	if (!iw_ena) {
552 		ice_unplug_aux_dev(pf);
553 		pf->rdma_mode &= ~IIDC_RDMA_PROTOCOL_IWARP;
554 		return 0;
555 	}
556 
557 	pf->rdma_mode |= IIDC_RDMA_PROTOCOL_IWARP;
558 	ret = ice_plug_aux_dev(pf);
559 	if (ret)
560 		pf->rdma_mode &= ~IIDC_RDMA_PROTOCOL_IWARP;
561 
562 	return ret;
563 }
564 
565 static int
566 ice_devlink_enable_iw_validate(struct devlink *devlink, u32 id,
567 			       union devlink_param_value val,
568 			       struct netlink_ext_ack *extack)
569 {
570 	struct ice_pf *pf = devlink_priv(devlink);
571 
572 	if (!test_bit(ICE_FLAG_RDMA_ENA, pf->flags))
573 		return -EOPNOTSUPP;
574 
575 	if (pf->rdma_mode & IIDC_RDMA_PROTOCOL_ROCEV2) {
576 		NL_SET_ERR_MSG_MOD(extack, "RoCEv2 is currently enabled. This device cannot enable iWARP and RoCEv2 simultaneously");
577 		return -EOPNOTSUPP;
578 	}
579 
580 	return 0;
581 }
582 
583 static const struct devlink_param ice_devlink_params[] = {
584 	DEVLINK_PARAM_GENERIC(ENABLE_ROCE, BIT(DEVLINK_PARAM_CMODE_RUNTIME),
585 			      ice_devlink_enable_roce_get,
586 			      ice_devlink_enable_roce_set,
587 			      ice_devlink_enable_roce_validate),
588 	DEVLINK_PARAM_GENERIC(ENABLE_IWARP, BIT(DEVLINK_PARAM_CMODE_RUNTIME),
589 			      ice_devlink_enable_iw_get,
590 			      ice_devlink_enable_iw_set,
591 			      ice_devlink_enable_iw_validate),
592 
593 };
594 
595 static void ice_devlink_free(void *devlink_ptr)
596 {
597 	devlink_free((struct devlink *)devlink_ptr);
598 }
599 
600 /**
601  * ice_allocate_pf - Allocate devlink and return PF structure pointer
602  * @dev: the device to allocate for
603  *
604  * Allocate a devlink instance for this device and return the private area as
605  * the PF structure. The devlink memory is kept track of through devres by
606  * adding an action to remove it when unwinding.
607  */
608 struct ice_pf *ice_allocate_pf(struct device *dev)
609 {
610 	struct devlink *devlink;
611 
612 	devlink = devlink_alloc(&ice_devlink_ops, sizeof(struct ice_pf), dev);
613 	if (!devlink)
614 		return NULL;
615 
616 	/* Add an action to teardown the devlink when unwinding the driver */
617 	if (devm_add_action_or_reset(dev, ice_devlink_free, devlink))
618 		return NULL;
619 
620 	return devlink_priv(devlink);
621 }
622 
623 /**
624  * ice_devlink_register - Register devlink interface for this PF
625  * @pf: the PF to register the devlink for.
626  *
627  * Register the devlink instance associated with this physical function.
628  *
629  * Return: zero on success or an error code on failure.
630  */
631 void ice_devlink_register(struct ice_pf *pf)
632 {
633 	struct devlink *devlink = priv_to_devlink(pf);
634 
635 	devlink_set_features(devlink, DEVLINK_F_RELOAD);
636 	devlink_register(devlink);
637 }
638 
639 /**
640  * ice_devlink_unregister - Unregister devlink resources for this PF.
641  * @pf: the PF structure to cleanup
642  *
643  * Releases resources used by devlink and cleans up associated memory.
644  */
645 void ice_devlink_unregister(struct ice_pf *pf)
646 {
647 	devlink_unregister(priv_to_devlink(pf));
648 }
649 
650 /**
651  * ice_devlink_set_switch_id - Set unique switch id based on pci dsn
652  * @pf: the PF to create a devlink port for
653  * @ppid: struct with switch id information
654  */
655 static void
656 ice_devlink_set_switch_id(struct ice_pf *pf, struct netdev_phys_item_id *ppid)
657 {
658 	struct pci_dev *pdev = pf->pdev;
659 	u64 id;
660 
661 	id = pci_get_dsn(pdev);
662 
663 	ppid->id_len = sizeof(id);
664 	put_unaligned_be64(id, &ppid->id);
665 }
666 
667 int ice_devlink_register_params(struct ice_pf *pf)
668 {
669 	struct devlink *devlink = priv_to_devlink(pf);
670 	union devlink_param_value value;
671 	int err;
672 
673 	err = devlink_params_register(devlink, ice_devlink_params,
674 				      ARRAY_SIZE(ice_devlink_params));
675 	if (err)
676 		return err;
677 
678 	value.vbool = false;
679 	devlink_param_driverinit_value_set(devlink,
680 					   DEVLINK_PARAM_GENERIC_ID_ENABLE_IWARP,
681 					   value);
682 
683 	value.vbool = test_bit(ICE_FLAG_RDMA_ENA, pf->flags) ? true : false;
684 	devlink_param_driverinit_value_set(devlink,
685 					   DEVLINK_PARAM_GENERIC_ID_ENABLE_ROCE,
686 					   value);
687 
688 	return 0;
689 }
690 
691 void ice_devlink_unregister_params(struct ice_pf *pf)
692 {
693 	devlink_params_unregister(priv_to_devlink(pf), ice_devlink_params,
694 				  ARRAY_SIZE(ice_devlink_params));
695 }
696 
697 /**
698  * ice_devlink_create_pf_port - Create a devlink port for this PF
699  * @pf: the PF to create a devlink port for
700  *
701  * Create and register a devlink_port for this PF.
702  *
703  * Return: zero on success or an error code on failure.
704  */
705 int ice_devlink_create_pf_port(struct ice_pf *pf)
706 {
707 	struct devlink_port_attrs attrs = {};
708 	struct devlink_port *devlink_port;
709 	struct devlink *devlink;
710 	struct ice_vsi *vsi;
711 	struct device *dev;
712 	int err;
713 
714 	dev = ice_pf_to_dev(pf);
715 
716 	devlink_port = &pf->devlink_port;
717 
718 	vsi = ice_get_main_vsi(pf);
719 	if (!vsi)
720 		return -EIO;
721 
722 	attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
723 	attrs.phys.port_number = pf->hw.bus.func;
724 
725 	ice_devlink_set_switch_id(pf, &attrs.switch_id);
726 
727 	devlink_port_attrs_set(devlink_port, &attrs);
728 	devlink = priv_to_devlink(pf);
729 
730 	err = devlink_port_register(devlink, devlink_port, vsi->idx);
731 	if (err) {
732 		dev_err(dev, "Failed to create devlink port for PF %d, error %d\n",
733 			pf->hw.pf_id, err);
734 		return err;
735 	}
736 
737 	return 0;
738 }
739 
740 /**
741  * ice_devlink_destroy_pf_port - Destroy the devlink_port for this PF
742  * @pf: the PF to cleanup
743  *
744  * Unregisters the devlink_port structure associated with this PF.
745  */
746 void ice_devlink_destroy_pf_port(struct ice_pf *pf)
747 {
748 	struct devlink_port *devlink_port;
749 
750 	devlink_port = &pf->devlink_port;
751 
752 	devlink_port_type_clear(devlink_port);
753 	devlink_port_unregister(devlink_port);
754 }
755 
756 /**
757  * ice_devlink_create_vf_port - Create a devlink port for this VF
758  * @vf: the VF to create a port for
759  *
760  * Create and register a devlink_port for this VF.
761  *
762  * Return: zero on success or an error code on failure.
763  */
764 int ice_devlink_create_vf_port(struct ice_vf *vf)
765 {
766 	struct devlink_port_attrs attrs = {};
767 	struct devlink_port *devlink_port;
768 	struct devlink *devlink;
769 	struct ice_vsi *vsi;
770 	struct device *dev;
771 	struct ice_pf *pf;
772 	int err;
773 
774 	pf = vf->pf;
775 	dev = ice_pf_to_dev(pf);
776 	devlink_port = &vf->devlink_port;
777 
778 	vsi = ice_get_vf_vsi(vf);
779 	if (!vsi)
780 		return -EINVAL;
781 
782 	attrs.flavour = DEVLINK_PORT_FLAVOUR_PCI_VF;
783 	attrs.pci_vf.pf = pf->hw.bus.func;
784 	attrs.pci_vf.vf = vf->vf_id;
785 
786 	ice_devlink_set_switch_id(pf, &attrs.switch_id);
787 
788 	devlink_port_attrs_set(devlink_port, &attrs);
789 	devlink = priv_to_devlink(pf);
790 
791 	err = devlink_port_register(devlink, devlink_port, vsi->idx);
792 	if (err) {
793 		dev_err(dev, "Failed to create devlink port for VF %d, error %d\n",
794 			vf->vf_id, err);
795 		return err;
796 	}
797 
798 	return 0;
799 }
800 
801 /**
802  * ice_devlink_destroy_vf_port - Destroy the devlink_port for this VF
803  * @vf: the VF to cleanup
804  *
805  * Unregisters the devlink_port structure associated with this VF.
806  */
807 void ice_devlink_destroy_vf_port(struct ice_vf *vf)
808 {
809 	struct devlink_port *devlink_port;
810 
811 	devlink_port = &vf->devlink_port;
812 
813 	devlink_port_type_clear(devlink_port);
814 	devlink_port_unregister(devlink_port);
815 }
816 
817 /**
818  * ice_devlink_nvm_snapshot - Capture a snapshot of the NVM flash contents
819  * @devlink: the devlink instance
820  * @ops: the devlink region being snapshotted
821  * @extack: extended ACK response structure
822  * @data: on exit points to snapshot data buffer
823  *
824  * This function is called in response to the DEVLINK_CMD_REGION_TRIGGER for
825  * the nvm-flash devlink region. It captures a snapshot of the full NVM flash
826  * contents, including both banks of flash. This snapshot can later be viewed
827  * via the devlink-region interface.
828  *
829  * It captures the flash using the FLASH_ONLY bit set when reading via
830  * firmware, so it does not read the current Shadow RAM contents. For that,
831  * use the shadow-ram region.
832  *
833  * @returns zero on success, and updates the data pointer. Returns a non-zero
834  * error code on failure.
835  */
836 static int ice_devlink_nvm_snapshot(struct devlink *devlink,
837 				    const struct devlink_region_ops *ops,
838 				    struct netlink_ext_ack *extack, u8 **data)
839 {
840 	struct ice_pf *pf = devlink_priv(devlink);
841 	struct device *dev = ice_pf_to_dev(pf);
842 	struct ice_hw *hw = &pf->hw;
843 	void *nvm_data;
844 	u32 nvm_size;
845 	int status;
846 
847 	nvm_size = hw->flash.flash_size;
848 	nvm_data = vzalloc(nvm_size);
849 	if (!nvm_data)
850 		return -ENOMEM;
851 
852 	status = ice_acquire_nvm(hw, ICE_RES_READ);
853 	if (status) {
854 		dev_dbg(dev, "ice_acquire_nvm failed, err %d aq_err %d\n",
855 			status, hw->adminq.sq_last_status);
856 		NL_SET_ERR_MSG_MOD(extack, "Failed to acquire NVM semaphore");
857 		vfree(nvm_data);
858 		return status;
859 	}
860 
861 	status = ice_read_flat_nvm(hw, 0, &nvm_size, nvm_data, false);
862 	if (status) {
863 		dev_dbg(dev, "ice_read_flat_nvm failed after reading %u bytes, err %d aq_err %d\n",
864 			nvm_size, status, hw->adminq.sq_last_status);
865 		NL_SET_ERR_MSG_MOD(extack, "Failed to read NVM contents");
866 		ice_release_nvm(hw);
867 		vfree(nvm_data);
868 		return status;
869 	}
870 
871 	ice_release_nvm(hw);
872 
873 	*data = nvm_data;
874 
875 	return 0;
876 }
877 
878 /**
879  * ice_devlink_sram_snapshot - Capture a snapshot of the Shadow RAM contents
880  * @devlink: the devlink instance
881  * @ops: the devlink region being snapshotted
882  * @extack: extended ACK response structure
883  * @data: on exit points to snapshot data buffer
884  *
885  * This function is called in response to the DEVLINK_CMD_REGION_TRIGGER for
886  * the shadow-ram devlink region. It captures a snapshot of the shadow ram
887  * contents. This snapshot can later be viewed via the devlink-region
888  * interface.
889  *
890  * @returns zero on success, and updates the data pointer. Returns a non-zero
891  * error code on failure.
892  */
893 static int
894 ice_devlink_sram_snapshot(struct devlink *devlink,
895 			  const struct devlink_region_ops __always_unused *ops,
896 			  struct netlink_ext_ack *extack, u8 **data)
897 {
898 	struct ice_pf *pf = devlink_priv(devlink);
899 	struct device *dev = ice_pf_to_dev(pf);
900 	struct ice_hw *hw = &pf->hw;
901 	u8 *sram_data;
902 	u32 sram_size;
903 	int err;
904 
905 	sram_size = hw->flash.sr_words * 2u;
906 	sram_data = vzalloc(sram_size);
907 	if (!sram_data)
908 		return -ENOMEM;
909 
910 	err = ice_acquire_nvm(hw, ICE_RES_READ);
911 	if (err) {
912 		dev_dbg(dev, "ice_acquire_nvm failed, err %d aq_err %d\n",
913 			err, hw->adminq.sq_last_status);
914 		NL_SET_ERR_MSG_MOD(extack, "Failed to acquire NVM semaphore");
915 		vfree(sram_data);
916 		return err;
917 	}
918 
919 	/* Read from the Shadow RAM, rather than directly from NVM */
920 	err = ice_read_flat_nvm(hw, 0, &sram_size, sram_data, true);
921 	if (err) {
922 		dev_dbg(dev, "ice_read_flat_nvm failed after reading %u bytes, err %d aq_err %d\n",
923 			sram_size, err, hw->adminq.sq_last_status);
924 		NL_SET_ERR_MSG_MOD(extack,
925 				   "Failed to read Shadow RAM contents");
926 		ice_release_nvm(hw);
927 		vfree(sram_data);
928 		return err;
929 	}
930 
931 	ice_release_nvm(hw);
932 
933 	*data = sram_data;
934 
935 	return 0;
936 }
937 
938 /**
939  * ice_devlink_devcaps_snapshot - Capture snapshot of device capabilities
940  * @devlink: the devlink instance
941  * @ops: the devlink region being snapshotted
942  * @extack: extended ACK response structure
943  * @data: on exit points to snapshot data buffer
944  *
945  * This function is called in response to the DEVLINK_CMD_REGION_TRIGGER for
946  * the device-caps devlink region. It captures a snapshot of the device
947  * capabilities reported by firmware.
948  *
949  * @returns zero on success, and updates the data pointer. Returns a non-zero
950  * error code on failure.
951  */
952 static int
953 ice_devlink_devcaps_snapshot(struct devlink *devlink,
954 			     const struct devlink_region_ops *ops,
955 			     struct netlink_ext_ack *extack, u8 **data)
956 {
957 	struct ice_pf *pf = devlink_priv(devlink);
958 	struct device *dev = ice_pf_to_dev(pf);
959 	struct ice_hw *hw = &pf->hw;
960 	void *devcaps;
961 	int status;
962 
963 	devcaps = vzalloc(ICE_AQ_MAX_BUF_LEN);
964 	if (!devcaps)
965 		return -ENOMEM;
966 
967 	status = ice_aq_list_caps(hw, devcaps, ICE_AQ_MAX_BUF_LEN, NULL,
968 				  ice_aqc_opc_list_dev_caps, NULL);
969 	if (status) {
970 		dev_dbg(dev, "ice_aq_list_caps: failed to read device capabilities, err %d aq_err %d\n",
971 			status, hw->adminq.sq_last_status);
972 		NL_SET_ERR_MSG_MOD(extack, "Failed to read device capabilities");
973 		vfree(devcaps);
974 		return status;
975 	}
976 
977 	*data = (u8 *)devcaps;
978 
979 	return 0;
980 }
981 
982 static const struct devlink_region_ops ice_nvm_region_ops = {
983 	.name = "nvm-flash",
984 	.destructor = vfree,
985 	.snapshot = ice_devlink_nvm_snapshot,
986 };
987 
988 static const struct devlink_region_ops ice_sram_region_ops = {
989 	.name = "shadow-ram",
990 	.destructor = vfree,
991 	.snapshot = ice_devlink_sram_snapshot,
992 };
993 
994 static const struct devlink_region_ops ice_devcaps_region_ops = {
995 	.name = "device-caps",
996 	.destructor = vfree,
997 	.snapshot = ice_devlink_devcaps_snapshot,
998 };
999 
1000 /**
1001  * ice_devlink_init_regions - Initialize devlink regions
1002  * @pf: the PF device structure
1003  *
1004  * Create devlink regions used to enable access to dump the contents of the
1005  * flash memory on the device.
1006  */
1007 void ice_devlink_init_regions(struct ice_pf *pf)
1008 {
1009 	struct devlink *devlink = priv_to_devlink(pf);
1010 	struct device *dev = ice_pf_to_dev(pf);
1011 	u64 nvm_size, sram_size;
1012 
1013 	nvm_size = pf->hw.flash.flash_size;
1014 	pf->nvm_region = devlink_region_create(devlink, &ice_nvm_region_ops, 1,
1015 					       nvm_size);
1016 	if (IS_ERR(pf->nvm_region)) {
1017 		dev_err(dev, "failed to create NVM devlink region, err %ld\n",
1018 			PTR_ERR(pf->nvm_region));
1019 		pf->nvm_region = NULL;
1020 	}
1021 
1022 	sram_size = pf->hw.flash.sr_words * 2u;
1023 	pf->sram_region = devlink_region_create(devlink, &ice_sram_region_ops,
1024 						1, sram_size);
1025 	if (IS_ERR(pf->sram_region)) {
1026 		dev_err(dev, "failed to create shadow-ram devlink region, err %ld\n",
1027 			PTR_ERR(pf->sram_region));
1028 		pf->sram_region = NULL;
1029 	}
1030 
1031 	pf->devcaps_region = devlink_region_create(devlink,
1032 						   &ice_devcaps_region_ops, 10,
1033 						   ICE_AQ_MAX_BUF_LEN);
1034 	if (IS_ERR(pf->devcaps_region)) {
1035 		dev_err(dev, "failed to create device-caps devlink region, err %ld\n",
1036 			PTR_ERR(pf->devcaps_region));
1037 		pf->devcaps_region = NULL;
1038 	}
1039 }
1040 
1041 /**
1042  * ice_devlink_destroy_regions - Destroy devlink regions
1043  * @pf: the PF device structure
1044  *
1045  * Remove previously created regions for this PF.
1046  */
1047 void ice_devlink_destroy_regions(struct ice_pf *pf)
1048 {
1049 	if (pf->nvm_region)
1050 		devlink_region_destroy(pf->nvm_region);
1051 
1052 	if (pf->sram_region)
1053 		devlink_region_destroy(pf->sram_region);
1054 
1055 	if (pf->devcaps_region)
1056 		devlink_region_destroy(pf->devcaps_region);
1057 }
1058