1893ce44dSCatherine Sullivan // SPDX-License-Identifier: (GPL-2.0 OR MIT)
2893ce44dSCatherine Sullivan /* Google virtual Ethernet (gve) driver
3893ce44dSCatherine Sullivan *
48a39d3e0SBailey Forrest * Copyright (C) 2015-2021 Google, Inc.
5893ce44dSCatherine Sullivan */
6893ce44dSCatherine Sullivan
7893ce44dSCatherine Sullivan #include <linux/etherdevice.h>
8893ce44dSCatherine Sullivan #include <linux/pci.h>
9893ce44dSCatherine Sullivan #include "gve.h"
10893ce44dSCatherine Sullivan #include "gve_adminq.h"
11893ce44dSCatherine Sullivan #include "gve_register.h"
12893ce44dSCatherine Sullivan
13893ce44dSCatherine Sullivan #define GVE_MAX_ADMINQ_RELEASE_CHECK 500
14893ce44dSCatherine Sullivan #define GVE_ADMINQ_SLEEP_LEN 20
15893ce44dSCatherine Sullivan #define GVE_MAX_ADMINQ_EVENT_COUNTER_CHECK 100
16893ce44dSCatherine Sullivan
174944db80SCatherine Sullivan #define GVE_DEVICE_OPTION_ERROR_FMT "%s option error:\n" \
184944db80SCatherine Sullivan "Expected: length=%d, feature_mask=%x.\n" \
194944db80SCatherine Sullivan "Actual: length=%d, feature_mask=%x.\n"
204944db80SCatherine Sullivan
218a39d3e0SBailey Forrest #define GVE_DEVICE_OPTION_TOO_BIG_FMT "Length of %s option larger than expected. Possible older version of guest driver.\n"
228a39d3e0SBailey Forrest
234944db80SCatherine Sullivan static
gve_get_next_option(struct gve_device_descriptor * descriptor,struct gve_device_option * option)244944db80SCatherine Sullivan struct gve_device_option *gve_get_next_option(struct gve_device_descriptor *descriptor,
254944db80SCatherine Sullivan struct gve_device_option *option)
264944db80SCatherine Sullivan {
274944db80SCatherine Sullivan void *option_end, *descriptor_end;
284944db80SCatherine Sullivan
294944db80SCatherine Sullivan option_end = (void *)(option + 1) + be16_to_cpu(option->option_length);
304944db80SCatherine Sullivan descriptor_end = (void *)descriptor + be16_to_cpu(descriptor->total_length);
314944db80SCatherine Sullivan
324944db80SCatherine Sullivan return option_end > descriptor_end ? NULL : (struct gve_device_option *)option_end;
334944db80SCatherine Sullivan }
344944db80SCatherine Sullivan
354944db80SCatherine Sullivan static
gve_parse_device_option(struct gve_priv * priv,struct gve_device_descriptor * device_descriptor,struct gve_device_option * option,struct gve_device_option_gqi_rda ** dev_op_gqi_rda,struct gve_device_option_gqi_qpl ** dev_op_gqi_qpl,struct gve_device_option_dqo_rda ** dev_op_dqo_rda,struct gve_device_option_jumbo_frames ** dev_op_jumbo_frames,struct gve_device_option_dqo_qpl ** dev_op_dqo_qpl)364944db80SCatherine Sullivan void gve_parse_device_option(struct gve_priv *priv,
374944db80SCatherine Sullivan struct gve_device_descriptor *device_descriptor,
388a39d3e0SBailey Forrest struct gve_device_option *option,
398a39d3e0SBailey Forrest struct gve_device_option_gqi_rda **dev_op_gqi_rda,
408a39d3e0SBailey Forrest struct gve_device_option_gqi_qpl **dev_op_gqi_qpl,
41255489f5SShailend Chand struct gve_device_option_dqo_rda **dev_op_dqo_rda,
42*66ce8e6bSRushil Gupta struct gve_device_option_jumbo_frames **dev_op_jumbo_frames,
43*66ce8e6bSRushil Gupta struct gve_device_option_dqo_qpl **dev_op_dqo_qpl)
444944db80SCatherine Sullivan {
458a39d3e0SBailey Forrest u32 req_feat_mask = be32_to_cpu(option->required_features_mask);
464944db80SCatherine Sullivan u16 option_length = be16_to_cpu(option->option_length);
474944db80SCatherine Sullivan u16 option_id = be16_to_cpu(option->option_id);
484944db80SCatherine Sullivan
498a39d3e0SBailey Forrest /* If the length or feature mask doesn't match, continue without
508a39d3e0SBailey Forrest * enabling the feature.
514944db80SCatherine Sullivan */
528a39d3e0SBailey Forrest switch (option_id) {
538a39d3e0SBailey Forrest case GVE_DEV_OPT_ID_GQI_RAW_ADDRESSING:
548a39d3e0SBailey Forrest if (option_length != GVE_DEV_OPT_LEN_GQI_RAW_ADDRESSING ||
558a39d3e0SBailey Forrest req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_GQI_RAW_ADDRESSING) {
568a39d3e0SBailey Forrest dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT,
578a39d3e0SBailey Forrest "Raw Addressing",
588a39d3e0SBailey Forrest GVE_DEV_OPT_LEN_GQI_RAW_ADDRESSING,
598a39d3e0SBailey Forrest GVE_DEV_OPT_REQ_FEAT_MASK_GQI_RAW_ADDRESSING,
608a39d3e0SBailey Forrest option_length, req_feat_mask);
618a39d3e0SBailey Forrest break;
624944db80SCatherine Sullivan }
638a39d3e0SBailey Forrest
648a39d3e0SBailey Forrest dev_info(&priv->pdev->dev,
658a39d3e0SBailey Forrest "Gqi raw addressing device option enabled.\n");
66a5886ef4SBailey Forrest priv->queue_format = GVE_GQI_RDA_FORMAT;
678a39d3e0SBailey Forrest break;
688a39d3e0SBailey Forrest case GVE_DEV_OPT_ID_GQI_RDA:
698a39d3e0SBailey Forrest if (option_length < sizeof(**dev_op_gqi_rda) ||
708a39d3e0SBailey Forrest req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_GQI_RDA) {
718a39d3e0SBailey Forrest dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT,
728a39d3e0SBailey Forrest "GQI RDA", (int)sizeof(**dev_op_gqi_rda),
738a39d3e0SBailey Forrest GVE_DEV_OPT_REQ_FEAT_MASK_GQI_RDA,
748a39d3e0SBailey Forrest option_length, req_feat_mask);
758a39d3e0SBailey Forrest break;
768a39d3e0SBailey Forrest }
778a39d3e0SBailey Forrest
788a39d3e0SBailey Forrest if (option_length > sizeof(**dev_op_gqi_rda)) {
798a39d3e0SBailey Forrest dev_warn(&priv->pdev->dev,
808a39d3e0SBailey Forrest GVE_DEVICE_OPTION_TOO_BIG_FMT, "GQI RDA");
818a39d3e0SBailey Forrest }
828a39d3e0SBailey Forrest *dev_op_gqi_rda = (void *)(option + 1);
838a39d3e0SBailey Forrest break;
848a39d3e0SBailey Forrest case GVE_DEV_OPT_ID_GQI_QPL:
858a39d3e0SBailey Forrest if (option_length < sizeof(**dev_op_gqi_qpl) ||
868a39d3e0SBailey Forrest req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_GQI_QPL) {
878a39d3e0SBailey Forrest dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT,
888a39d3e0SBailey Forrest "GQI QPL", (int)sizeof(**dev_op_gqi_qpl),
898a39d3e0SBailey Forrest GVE_DEV_OPT_REQ_FEAT_MASK_GQI_QPL,
908a39d3e0SBailey Forrest option_length, req_feat_mask);
918a39d3e0SBailey Forrest break;
928a39d3e0SBailey Forrest }
938a39d3e0SBailey Forrest
948a39d3e0SBailey Forrest if (option_length > sizeof(**dev_op_gqi_qpl)) {
958a39d3e0SBailey Forrest dev_warn(&priv->pdev->dev,
968a39d3e0SBailey Forrest GVE_DEVICE_OPTION_TOO_BIG_FMT, "GQI QPL");
978a39d3e0SBailey Forrest }
988a39d3e0SBailey Forrest *dev_op_gqi_qpl = (void *)(option + 1);
998a39d3e0SBailey Forrest break;
1008a39d3e0SBailey Forrest case GVE_DEV_OPT_ID_DQO_RDA:
1018a39d3e0SBailey Forrest if (option_length < sizeof(**dev_op_dqo_rda) ||
1028a39d3e0SBailey Forrest req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_DQO_RDA) {
1038a39d3e0SBailey Forrest dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT,
1048a39d3e0SBailey Forrest "DQO RDA", (int)sizeof(**dev_op_dqo_rda),
1058a39d3e0SBailey Forrest GVE_DEV_OPT_REQ_FEAT_MASK_DQO_RDA,
1068a39d3e0SBailey Forrest option_length, req_feat_mask);
1078a39d3e0SBailey Forrest break;
1088a39d3e0SBailey Forrest }
1098a39d3e0SBailey Forrest
1108a39d3e0SBailey Forrest if (option_length > sizeof(**dev_op_dqo_rda)) {
1118a39d3e0SBailey Forrest dev_warn(&priv->pdev->dev,
1128a39d3e0SBailey Forrest GVE_DEVICE_OPTION_TOO_BIG_FMT, "DQO RDA");
1138a39d3e0SBailey Forrest }
1148a39d3e0SBailey Forrest *dev_op_dqo_rda = (void *)(option + 1);
1154944db80SCatherine Sullivan break;
116*66ce8e6bSRushil Gupta case GVE_DEV_OPT_ID_DQO_QPL:
117*66ce8e6bSRushil Gupta if (option_length < sizeof(**dev_op_dqo_qpl) ||
118*66ce8e6bSRushil Gupta req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_DQO_QPL) {
119*66ce8e6bSRushil Gupta dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT,
120*66ce8e6bSRushil Gupta "DQO QPL", (int)sizeof(**dev_op_dqo_qpl),
121*66ce8e6bSRushil Gupta GVE_DEV_OPT_REQ_FEAT_MASK_DQO_QPL,
122*66ce8e6bSRushil Gupta option_length, req_feat_mask);
123*66ce8e6bSRushil Gupta break;
124*66ce8e6bSRushil Gupta }
125*66ce8e6bSRushil Gupta
126*66ce8e6bSRushil Gupta if (option_length > sizeof(**dev_op_dqo_qpl)) {
127*66ce8e6bSRushil Gupta dev_warn(&priv->pdev->dev,
128*66ce8e6bSRushil Gupta GVE_DEVICE_OPTION_TOO_BIG_FMT, "DQO QPL");
129*66ce8e6bSRushil Gupta }
130*66ce8e6bSRushil Gupta *dev_op_dqo_qpl = (void *)(option + 1);
131*66ce8e6bSRushil Gupta break;
132255489f5SShailend Chand case GVE_DEV_OPT_ID_JUMBO_FRAMES:
133255489f5SShailend Chand if (option_length < sizeof(**dev_op_jumbo_frames) ||
134255489f5SShailend Chand req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_JUMBO_FRAMES) {
135255489f5SShailend Chand dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT,
136255489f5SShailend Chand "Jumbo Frames",
137255489f5SShailend Chand (int)sizeof(**dev_op_jumbo_frames),
138255489f5SShailend Chand GVE_DEV_OPT_REQ_FEAT_MASK_JUMBO_FRAMES,
139255489f5SShailend Chand option_length, req_feat_mask);
140255489f5SShailend Chand break;
141255489f5SShailend Chand }
142255489f5SShailend Chand
143255489f5SShailend Chand if (option_length > sizeof(**dev_op_jumbo_frames)) {
144255489f5SShailend Chand dev_warn(&priv->pdev->dev,
145255489f5SShailend Chand GVE_DEVICE_OPTION_TOO_BIG_FMT,
146255489f5SShailend Chand "Jumbo Frames");
147255489f5SShailend Chand }
148255489f5SShailend Chand *dev_op_jumbo_frames = (void *)(option + 1);
149255489f5SShailend Chand break;
1504944db80SCatherine Sullivan default:
1514944db80SCatherine Sullivan /* If we don't recognize the option just continue
1524944db80SCatherine Sullivan * without doing anything.
1534944db80SCatherine Sullivan */
1544944db80SCatherine Sullivan dev_dbg(&priv->pdev->dev, "Unrecognized device option 0x%hx not enabled.\n",
1554944db80SCatherine Sullivan option_id);
1564944db80SCatherine Sullivan }
1574944db80SCatherine Sullivan }
1584944db80SCatherine Sullivan
1598a39d3e0SBailey Forrest /* Process all device options for a given describe device call. */
1608a39d3e0SBailey Forrest static int
gve_process_device_options(struct gve_priv * priv,struct gve_device_descriptor * descriptor,struct gve_device_option_gqi_rda ** dev_op_gqi_rda,struct gve_device_option_gqi_qpl ** dev_op_gqi_qpl,struct gve_device_option_dqo_rda ** dev_op_dqo_rda,struct gve_device_option_jumbo_frames ** dev_op_jumbo_frames,struct gve_device_option_dqo_qpl ** dev_op_dqo_qpl)1618a39d3e0SBailey Forrest gve_process_device_options(struct gve_priv *priv,
1628a39d3e0SBailey Forrest struct gve_device_descriptor *descriptor,
1638a39d3e0SBailey Forrest struct gve_device_option_gqi_rda **dev_op_gqi_rda,
1648a39d3e0SBailey Forrest struct gve_device_option_gqi_qpl **dev_op_gqi_qpl,
165255489f5SShailend Chand struct gve_device_option_dqo_rda **dev_op_dqo_rda,
166*66ce8e6bSRushil Gupta struct gve_device_option_jumbo_frames **dev_op_jumbo_frames,
167*66ce8e6bSRushil Gupta struct gve_device_option_dqo_qpl **dev_op_dqo_qpl)
1688a39d3e0SBailey Forrest {
1698a39d3e0SBailey Forrest const int num_options = be16_to_cpu(descriptor->num_device_options);
1708a39d3e0SBailey Forrest struct gve_device_option *dev_opt;
1718a39d3e0SBailey Forrest int i;
1728a39d3e0SBailey Forrest
1738a39d3e0SBailey Forrest /* The options struct directly follows the device descriptor. */
1748a39d3e0SBailey Forrest dev_opt = (void *)(descriptor + 1);
1758a39d3e0SBailey Forrest for (i = 0; i < num_options; i++) {
1768a39d3e0SBailey Forrest struct gve_device_option *next_opt;
1778a39d3e0SBailey Forrest
1788a39d3e0SBailey Forrest next_opt = gve_get_next_option(descriptor, dev_opt);
1798a39d3e0SBailey Forrest if (!next_opt) {
1808a39d3e0SBailey Forrest dev_err(&priv->dev->dev,
1818a39d3e0SBailey Forrest "options exceed device_descriptor's total length.\n");
1828a39d3e0SBailey Forrest return -EINVAL;
1838a39d3e0SBailey Forrest }
1848a39d3e0SBailey Forrest
1858a39d3e0SBailey Forrest gve_parse_device_option(priv, descriptor, dev_opt,
1868a39d3e0SBailey Forrest dev_op_gqi_rda, dev_op_gqi_qpl,
187*66ce8e6bSRushil Gupta dev_op_dqo_rda, dev_op_jumbo_frames,
188*66ce8e6bSRushil Gupta dev_op_dqo_qpl);
1898a39d3e0SBailey Forrest dev_opt = next_opt;
1908a39d3e0SBailey Forrest }
1918a39d3e0SBailey Forrest
1928a39d3e0SBailey Forrest return 0;
1938a39d3e0SBailey Forrest }
1948a39d3e0SBailey Forrest
gve_adminq_alloc(struct device * dev,struct gve_priv * priv)195893ce44dSCatherine Sullivan int gve_adminq_alloc(struct device *dev, struct gve_priv *priv)
196893ce44dSCatherine Sullivan {
197893ce44dSCatherine Sullivan priv->adminq = dma_alloc_coherent(dev, PAGE_SIZE,
198893ce44dSCatherine Sullivan &priv->adminq_bus_addr, GFP_KERNEL);
199893ce44dSCatherine Sullivan if (unlikely(!priv->adminq))
200893ce44dSCatherine Sullivan return -ENOMEM;
201893ce44dSCatherine Sullivan
202893ce44dSCatherine Sullivan priv->adminq_mask = (PAGE_SIZE / sizeof(union gve_adminq_command)) - 1;
203893ce44dSCatherine Sullivan priv->adminq_prod_cnt = 0;
204433e274bSKuo Zhao priv->adminq_cmd_fail = 0;
205433e274bSKuo Zhao priv->adminq_timeouts = 0;
206433e274bSKuo Zhao priv->adminq_describe_device_cnt = 0;
207433e274bSKuo Zhao priv->adminq_cfg_device_resources_cnt = 0;
208433e274bSKuo Zhao priv->adminq_register_page_list_cnt = 0;
209433e274bSKuo Zhao priv->adminq_unregister_page_list_cnt = 0;
210433e274bSKuo Zhao priv->adminq_create_tx_queue_cnt = 0;
211433e274bSKuo Zhao priv->adminq_create_rx_queue_cnt = 0;
212433e274bSKuo Zhao priv->adminq_destroy_tx_queue_cnt = 0;
213433e274bSKuo Zhao priv->adminq_destroy_rx_queue_cnt = 0;
214433e274bSKuo Zhao priv->adminq_dcfg_device_resources_cnt = 0;
215433e274bSKuo Zhao priv->adminq_set_driver_parameter_cnt = 0;
21624aeb56fSKuo Zhao priv->adminq_report_stats_cnt = 0;
2177e074d5aSDavid Awogbemila priv->adminq_report_link_speed_cnt = 0;
218c4b87ac8SBailey Forrest priv->adminq_get_ptype_map_cnt = 0;
219893ce44dSCatherine Sullivan
220893ce44dSCatherine Sullivan /* Setup Admin queue with the device */
221893ce44dSCatherine Sullivan iowrite32be(priv->adminq_bus_addr / PAGE_SIZE,
222893ce44dSCatherine Sullivan &priv->reg_bar0->adminq_pfn);
223893ce44dSCatherine Sullivan
224893ce44dSCatherine Sullivan gve_set_admin_queue_ok(priv);
225893ce44dSCatherine Sullivan return 0;
226893ce44dSCatherine Sullivan }
227893ce44dSCatherine Sullivan
gve_adminq_release(struct gve_priv * priv)228893ce44dSCatherine Sullivan void gve_adminq_release(struct gve_priv *priv)
229893ce44dSCatherine Sullivan {
230893ce44dSCatherine Sullivan int i = 0;
231893ce44dSCatherine Sullivan
232893ce44dSCatherine Sullivan /* Tell the device the adminq is leaving */
233893ce44dSCatherine Sullivan iowrite32be(0x0, &priv->reg_bar0->adminq_pfn);
234893ce44dSCatherine Sullivan while (ioread32be(&priv->reg_bar0->adminq_pfn)) {
235893ce44dSCatherine Sullivan /* If this is reached the device is unrecoverable and still
236893ce44dSCatherine Sullivan * holding memory. Continue looping to avoid memory corruption,
237893ce44dSCatherine Sullivan * but WARN so it is visible what is going on.
238893ce44dSCatherine Sullivan */
239893ce44dSCatherine Sullivan if (i == GVE_MAX_ADMINQ_RELEASE_CHECK)
240893ce44dSCatherine Sullivan WARN(1, "Unrecoverable platform error!");
241893ce44dSCatherine Sullivan i++;
242893ce44dSCatherine Sullivan msleep(GVE_ADMINQ_SLEEP_LEN);
243893ce44dSCatherine Sullivan }
244893ce44dSCatherine Sullivan gve_clear_device_rings_ok(priv);
245893ce44dSCatherine Sullivan gve_clear_device_resources_ok(priv);
246893ce44dSCatherine Sullivan gve_clear_admin_queue_ok(priv);
247893ce44dSCatherine Sullivan }
248893ce44dSCatherine Sullivan
gve_adminq_free(struct device * dev,struct gve_priv * priv)249893ce44dSCatherine Sullivan void gve_adminq_free(struct device *dev, struct gve_priv *priv)
250893ce44dSCatherine Sullivan {
251893ce44dSCatherine Sullivan if (!gve_get_admin_queue_ok(priv))
252893ce44dSCatherine Sullivan return;
253893ce44dSCatherine Sullivan gve_adminq_release(priv);
254893ce44dSCatherine Sullivan dma_free_coherent(dev, PAGE_SIZE, priv->adminq, priv->adminq_bus_addr);
255893ce44dSCatherine Sullivan gve_clear_admin_queue_ok(priv);
256893ce44dSCatherine Sullivan }
257893ce44dSCatherine Sullivan
gve_adminq_kick_cmd(struct gve_priv * priv,u32 prod_cnt)258893ce44dSCatherine Sullivan static void gve_adminq_kick_cmd(struct gve_priv *priv, u32 prod_cnt)
259893ce44dSCatherine Sullivan {
260893ce44dSCatherine Sullivan iowrite32be(prod_cnt, &priv->reg_bar0->adminq_doorbell);
261893ce44dSCatherine Sullivan }
262893ce44dSCatherine Sullivan
gve_adminq_wait_for_cmd(struct gve_priv * priv,u32 prod_cnt)263893ce44dSCatherine Sullivan static bool gve_adminq_wait_for_cmd(struct gve_priv *priv, u32 prod_cnt)
264893ce44dSCatherine Sullivan {
265893ce44dSCatherine Sullivan int i;
266893ce44dSCatherine Sullivan
267893ce44dSCatherine Sullivan for (i = 0; i < GVE_MAX_ADMINQ_EVENT_COUNTER_CHECK; i++) {
268893ce44dSCatherine Sullivan if (ioread32be(&priv->reg_bar0->adminq_event_counter)
269893ce44dSCatherine Sullivan == prod_cnt)
270893ce44dSCatherine Sullivan return true;
271893ce44dSCatherine Sullivan msleep(GVE_ADMINQ_SLEEP_LEN);
272893ce44dSCatherine Sullivan }
273893ce44dSCatherine Sullivan
274893ce44dSCatherine Sullivan return false;
275893ce44dSCatherine Sullivan }
276893ce44dSCatherine Sullivan
gve_adminq_parse_err(struct gve_priv * priv,u32 status)277433e274bSKuo Zhao static int gve_adminq_parse_err(struct gve_priv *priv, u32 status)
278893ce44dSCatherine Sullivan {
279893ce44dSCatherine Sullivan if (status != GVE_ADMINQ_COMMAND_PASSED &&
280433e274bSKuo Zhao status != GVE_ADMINQ_COMMAND_UNSET) {
281433e274bSKuo Zhao dev_err(&priv->pdev->dev, "AQ command failed with status %d\n", status);
282433e274bSKuo Zhao priv->adminq_cmd_fail++;
283433e274bSKuo Zhao }
284893ce44dSCatherine Sullivan switch (status) {
285893ce44dSCatherine Sullivan case GVE_ADMINQ_COMMAND_PASSED:
286893ce44dSCatherine Sullivan return 0;
287893ce44dSCatherine Sullivan case GVE_ADMINQ_COMMAND_UNSET:
288433e274bSKuo Zhao dev_err(&priv->pdev->dev, "parse_aq_err: err and status both unset, this should not be possible.\n");
289893ce44dSCatherine Sullivan return -EINVAL;
290893ce44dSCatherine Sullivan case GVE_ADMINQ_COMMAND_ERROR_ABORTED:
291893ce44dSCatherine Sullivan case GVE_ADMINQ_COMMAND_ERROR_CANCELLED:
292893ce44dSCatherine Sullivan case GVE_ADMINQ_COMMAND_ERROR_DATALOSS:
293893ce44dSCatherine Sullivan case GVE_ADMINQ_COMMAND_ERROR_FAILED_PRECONDITION:
294893ce44dSCatherine Sullivan case GVE_ADMINQ_COMMAND_ERROR_UNAVAILABLE:
295893ce44dSCatherine Sullivan return -EAGAIN;
296893ce44dSCatherine Sullivan case GVE_ADMINQ_COMMAND_ERROR_ALREADY_EXISTS:
297893ce44dSCatherine Sullivan case GVE_ADMINQ_COMMAND_ERROR_INTERNAL_ERROR:
298893ce44dSCatherine Sullivan case GVE_ADMINQ_COMMAND_ERROR_INVALID_ARGUMENT:
299893ce44dSCatherine Sullivan case GVE_ADMINQ_COMMAND_ERROR_NOT_FOUND:
300893ce44dSCatherine Sullivan case GVE_ADMINQ_COMMAND_ERROR_OUT_OF_RANGE:
301893ce44dSCatherine Sullivan case GVE_ADMINQ_COMMAND_ERROR_UNKNOWN_ERROR:
302893ce44dSCatherine Sullivan return -EINVAL;
303893ce44dSCatherine Sullivan case GVE_ADMINQ_COMMAND_ERROR_DEADLINE_EXCEEDED:
304893ce44dSCatherine Sullivan return -ETIME;
305893ce44dSCatherine Sullivan case GVE_ADMINQ_COMMAND_ERROR_PERMISSION_DENIED:
306893ce44dSCatherine Sullivan case GVE_ADMINQ_COMMAND_ERROR_UNAUTHENTICATED:
307893ce44dSCatherine Sullivan return -EACCES;
308893ce44dSCatherine Sullivan case GVE_ADMINQ_COMMAND_ERROR_RESOURCE_EXHAUSTED:
309893ce44dSCatherine Sullivan return -ENOMEM;
310893ce44dSCatherine Sullivan case GVE_ADMINQ_COMMAND_ERROR_UNIMPLEMENTED:
311c2a0c3edSJeroen de Borst return -EOPNOTSUPP;
312893ce44dSCatherine Sullivan default:
313433e274bSKuo Zhao dev_err(&priv->pdev->dev, "parse_aq_err: unknown status code %d\n", status);
314893ce44dSCatherine Sullivan return -EINVAL;
315893ce44dSCatherine Sullivan }
316893ce44dSCatherine Sullivan }
317893ce44dSCatherine Sullivan
3185cdad90dSSagi Shahar /* Flushes all AQ commands currently queued and waits for them to complete.
3195cdad90dSSagi Shahar * If there are failures, it will return the first error.
3205cdad90dSSagi Shahar */
gve_adminq_kick_and_wait(struct gve_priv * priv)3215cdad90dSSagi Shahar static int gve_adminq_kick_and_wait(struct gve_priv *priv)
3225cdad90dSSagi Shahar {
3231f84a945SHaiyue Wang int tail, head;
3245cdad90dSSagi Shahar int i;
3255cdad90dSSagi Shahar
3265cdad90dSSagi Shahar tail = ioread32be(&priv->reg_bar0->adminq_event_counter);
3275cdad90dSSagi Shahar head = priv->adminq_prod_cnt;
3285cdad90dSSagi Shahar
3295cdad90dSSagi Shahar gve_adminq_kick_cmd(priv, head);
3305cdad90dSSagi Shahar if (!gve_adminq_wait_for_cmd(priv, head)) {
3315cdad90dSSagi Shahar dev_err(&priv->pdev->dev, "AQ commands timed out, need to reset AQ\n");
3325cdad90dSSagi Shahar priv->adminq_timeouts++;
3335cdad90dSSagi Shahar return -ENOTRECOVERABLE;
3345cdad90dSSagi Shahar }
3355cdad90dSSagi Shahar
3365cdad90dSSagi Shahar for (i = tail; i < head; i++) {
3375cdad90dSSagi Shahar union gve_adminq_command *cmd;
3385cdad90dSSagi Shahar u32 status, err;
3395cdad90dSSagi Shahar
3405cdad90dSSagi Shahar cmd = &priv->adminq[i & priv->adminq_mask];
3415cdad90dSSagi Shahar status = be32_to_cpu(READ_ONCE(cmd->status));
3425cdad90dSSagi Shahar err = gve_adminq_parse_err(priv, status);
3435cdad90dSSagi Shahar if (err)
3445cdad90dSSagi Shahar // Return the first error if we failed.
3455cdad90dSSagi Shahar return err;
3465cdad90dSSagi Shahar }
3475cdad90dSSagi Shahar
3485cdad90dSSagi Shahar return 0;
3495cdad90dSSagi Shahar }
3505cdad90dSSagi Shahar
351893ce44dSCatherine Sullivan /* This function is not threadsafe - the caller is responsible for any
352893ce44dSCatherine Sullivan * necessary locks.
353893ce44dSCatherine Sullivan */
gve_adminq_issue_cmd(struct gve_priv * priv,union gve_adminq_command * cmd_orig)3545cdad90dSSagi Shahar static int gve_adminq_issue_cmd(struct gve_priv *priv,
355893ce44dSCatherine Sullivan union gve_adminq_command *cmd_orig)
356893ce44dSCatherine Sullivan {
357893ce44dSCatherine Sullivan union gve_adminq_command *cmd;
358433e274bSKuo Zhao u32 opcode;
3595cdad90dSSagi Shahar u32 tail;
3605cdad90dSSagi Shahar
3615cdad90dSSagi Shahar tail = ioread32be(&priv->reg_bar0->adminq_event_counter);
3625cdad90dSSagi Shahar
3635cdad90dSSagi Shahar // Check if next command will overflow the buffer.
36463a9192bSHaiyue Wang if (((priv->adminq_prod_cnt + 1) & priv->adminq_mask) ==
36563a9192bSHaiyue Wang (tail & priv->adminq_mask)) {
3665cdad90dSSagi Shahar int err;
3675cdad90dSSagi Shahar
3685cdad90dSSagi Shahar // Flush existing commands to make room.
3695cdad90dSSagi Shahar err = gve_adminq_kick_and_wait(priv);
3705cdad90dSSagi Shahar if (err)
3715cdad90dSSagi Shahar return err;
3725cdad90dSSagi Shahar
3735cdad90dSSagi Shahar // Retry.
3745cdad90dSSagi Shahar tail = ioread32be(&priv->reg_bar0->adminq_event_counter);
37563a9192bSHaiyue Wang if (((priv->adminq_prod_cnt + 1) & priv->adminq_mask) ==
37663a9192bSHaiyue Wang (tail & priv->adminq_mask)) {
3775cdad90dSSagi Shahar // This should never happen. We just flushed the
3785cdad90dSSagi Shahar // command queue so there should be enough space.
3795cdad90dSSagi Shahar return -ENOMEM;
3805cdad90dSSagi Shahar }
3815cdad90dSSagi Shahar }
382893ce44dSCatherine Sullivan
383893ce44dSCatherine Sullivan cmd = &priv->adminq[priv->adminq_prod_cnt & priv->adminq_mask];
384893ce44dSCatherine Sullivan priv->adminq_prod_cnt++;
385893ce44dSCatherine Sullivan
386893ce44dSCatherine Sullivan memcpy(cmd, cmd_orig, sizeof(*cmd_orig));
387433e274bSKuo Zhao opcode = be32_to_cpu(READ_ONCE(cmd->opcode));
388433e274bSKuo Zhao
389433e274bSKuo Zhao switch (opcode) {
390433e274bSKuo Zhao case GVE_ADMINQ_DESCRIBE_DEVICE:
391433e274bSKuo Zhao priv->adminq_describe_device_cnt++;
392433e274bSKuo Zhao break;
393433e274bSKuo Zhao case GVE_ADMINQ_CONFIGURE_DEVICE_RESOURCES:
394433e274bSKuo Zhao priv->adminq_cfg_device_resources_cnt++;
395433e274bSKuo Zhao break;
396433e274bSKuo Zhao case GVE_ADMINQ_REGISTER_PAGE_LIST:
397433e274bSKuo Zhao priv->adminq_register_page_list_cnt++;
398433e274bSKuo Zhao break;
399433e274bSKuo Zhao case GVE_ADMINQ_UNREGISTER_PAGE_LIST:
400433e274bSKuo Zhao priv->adminq_unregister_page_list_cnt++;
401433e274bSKuo Zhao break;
402433e274bSKuo Zhao case GVE_ADMINQ_CREATE_TX_QUEUE:
403433e274bSKuo Zhao priv->adminq_create_tx_queue_cnt++;
404433e274bSKuo Zhao break;
405433e274bSKuo Zhao case GVE_ADMINQ_CREATE_RX_QUEUE:
406433e274bSKuo Zhao priv->adminq_create_rx_queue_cnt++;
407433e274bSKuo Zhao break;
408433e274bSKuo Zhao case GVE_ADMINQ_DESTROY_TX_QUEUE:
409433e274bSKuo Zhao priv->adminq_destroy_tx_queue_cnt++;
410433e274bSKuo Zhao break;
411433e274bSKuo Zhao case GVE_ADMINQ_DESTROY_RX_QUEUE:
412433e274bSKuo Zhao priv->adminq_destroy_rx_queue_cnt++;
413433e274bSKuo Zhao break;
414433e274bSKuo Zhao case GVE_ADMINQ_DECONFIGURE_DEVICE_RESOURCES:
415433e274bSKuo Zhao priv->adminq_dcfg_device_resources_cnt++;
416433e274bSKuo Zhao break;
417433e274bSKuo Zhao case GVE_ADMINQ_SET_DRIVER_PARAMETER:
418433e274bSKuo Zhao priv->adminq_set_driver_parameter_cnt++;
419433e274bSKuo Zhao break;
42024aeb56fSKuo Zhao case GVE_ADMINQ_REPORT_STATS:
42124aeb56fSKuo Zhao priv->adminq_report_stats_cnt++;
42224aeb56fSKuo Zhao break;
4237e074d5aSDavid Awogbemila case GVE_ADMINQ_REPORT_LINK_SPEED:
4247e074d5aSDavid Awogbemila priv->adminq_report_link_speed_cnt++;
4257e074d5aSDavid Awogbemila break;
426c4b87ac8SBailey Forrest case GVE_ADMINQ_GET_PTYPE_MAP:
427c4b87ac8SBailey Forrest priv->adminq_get_ptype_map_cnt++;
428c4b87ac8SBailey Forrest break;
429c2a0c3edSJeroen de Borst case GVE_ADMINQ_VERIFY_DRIVER_COMPATIBILITY:
430c2a0c3edSJeroen de Borst priv->adminq_verify_driver_compatibility_cnt++;
431c2a0c3edSJeroen de Borst break;
432433e274bSKuo Zhao default:
433433e274bSKuo Zhao dev_err(&priv->pdev->dev, "unknown AQ command opcode %d\n", opcode);
434433e274bSKuo Zhao }
435893ce44dSCatherine Sullivan
4365cdad90dSSagi Shahar return 0;
437893ce44dSCatherine Sullivan }
438893ce44dSCatherine Sullivan
4395cdad90dSSagi Shahar /* This function is not threadsafe - the caller is responsible for any
4405cdad90dSSagi Shahar * necessary locks.
4415cdad90dSSagi Shahar * The caller is also responsible for making sure there are no commands
4425cdad90dSSagi Shahar * waiting to be executed.
4435cdad90dSSagi Shahar */
gve_adminq_execute_cmd(struct gve_priv * priv,union gve_adminq_command * cmd_orig)444c4b87ac8SBailey Forrest static int gve_adminq_execute_cmd(struct gve_priv *priv,
445c4b87ac8SBailey Forrest union gve_adminq_command *cmd_orig)
4465cdad90dSSagi Shahar {
4475cdad90dSSagi Shahar u32 tail, head;
4485cdad90dSSagi Shahar int err;
4495cdad90dSSagi Shahar
4505cdad90dSSagi Shahar tail = ioread32be(&priv->reg_bar0->adminq_event_counter);
4515cdad90dSSagi Shahar head = priv->adminq_prod_cnt;
4525cdad90dSSagi Shahar if (tail != head)
4535cdad90dSSagi Shahar // This is not a valid path
4545cdad90dSSagi Shahar return -EINVAL;
4555cdad90dSSagi Shahar
4565cdad90dSSagi Shahar err = gve_adminq_issue_cmd(priv, cmd_orig);
4575cdad90dSSagi Shahar if (err)
4585cdad90dSSagi Shahar return err;
4595cdad90dSSagi Shahar
4605cdad90dSSagi Shahar return gve_adminq_kick_and_wait(priv);
461893ce44dSCatherine Sullivan }
462893ce44dSCatherine Sullivan
463893ce44dSCatherine Sullivan /* The device specifies that the management vector can either be the first irq
464893ce44dSCatherine Sullivan * or the last irq. ntfy_blk_msix_base_idx indicates the first irq assigned to
465893ce44dSCatherine Sullivan * the ntfy blks. It if is 0 then the management vector is last, if it is 1 then
466893ce44dSCatherine Sullivan * the management vector is first.
467893ce44dSCatherine Sullivan *
468893ce44dSCatherine Sullivan * gve arranges the msix vectors so that the management vector is last.
469893ce44dSCatherine Sullivan */
470893ce44dSCatherine Sullivan #define GVE_NTFY_BLK_BASE_MSIX_IDX 0
gve_adminq_configure_device_resources(struct gve_priv * priv,dma_addr_t counter_array_bus_addr,u32 num_counters,dma_addr_t db_array_bus_addr,u32 num_ntfy_blks)471893ce44dSCatherine Sullivan int gve_adminq_configure_device_resources(struct gve_priv *priv,
472893ce44dSCatherine Sullivan dma_addr_t counter_array_bus_addr,
473893ce44dSCatherine Sullivan u32 num_counters,
474893ce44dSCatherine Sullivan dma_addr_t db_array_bus_addr,
475893ce44dSCatherine Sullivan u32 num_ntfy_blks)
476893ce44dSCatherine Sullivan {
477893ce44dSCatherine Sullivan union gve_adminq_command cmd;
478893ce44dSCatherine Sullivan
479893ce44dSCatherine Sullivan memset(&cmd, 0, sizeof(cmd));
480893ce44dSCatherine Sullivan cmd.opcode = cpu_to_be32(GVE_ADMINQ_CONFIGURE_DEVICE_RESOURCES);
481893ce44dSCatherine Sullivan cmd.configure_device_resources =
482893ce44dSCatherine Sullivan (struct gve_adminq_configure_device_resources) {
483893ce44dSCatherine Sullivan .counter_array = cpu_to_be64(counter_array_bus_addr),
484893ce44dSCatherine Sullivan .num_counters = cpu_to_be32(num_counters),
485893ce44dSCatherine Sullivan .irq_db_addr = cpu_to_be64(db_array_bus_addr),
486893ce44dSCatherine Sullivan .num_irq_dbs = cpu_to_be32(num_ntfy_blks),
487d30baaccSCatherine Sullivan .irq_db_stride = cpu_to_be32(sizeof(*priv->irq_db_indices)),
488893ce44dSCatherine Sullivan .ntfy_blk_msix_base_idx =
489893ce44dSCatherine Sullivan cpu_to_be32(GVE_NTFY_BLK_BASE_MSIX_IDX),
4901f6228e4SBailey Forrest .queue_format = priv->queue_format,
491893ce44dSCatherine Sullivan };
492893ce44dSCatherine Sullivan
493893ce44dSCatherine Sullivan return gve_adminq_execute_cmd(priv, &cmd);
494893ce44dSCatherine Sullivan }
495893ce44dSCatherine Sullivan
gve_adminq_deconfigure_device_resources(struct gve_priv * priv)496893ce44dSCatherine Sullivan int gve_adminq_deconfigure_device_resources(struct gve_priv *priv)
497893ce44dSCatherine Sullivan {
498893ce44dSCatherine Sullivan union gve_adminq_command cmd;
499893ce44dSCatherine Sullivan
500893ce44dSCatherine Sullivan memset(&cmd, 0, sizeof(cmd));
501893ce44dSCatherine Sullivan cmd.opcode = cpu_to_be32(GVE_ADMINQ_DECONFIGURE_DEVICE_RESOURCES);
502893ce44dSCatherine Sullivan
503893ce44dSCatherine Sullivan return gve_adminq_execute_cmd(priv, &cmd);
504893ce44dSCatherine Sullivan }
505893ce44dSCatherine Sullivan
gve_adminq_create_tx_queue(struct gve_priv * priv,u32 queue_index)5065cdad90dSSagi Shahar static int gve_adminq_create_tx_queue(struct gve_priv *priv, u32 queue_index)
507f5cedc84SCatherine Sullivan {
508f5cedc84SCatherine Sullivan struct gve_tx_ring *tx = &priv->tx[queue_index];
509f5cedc84SCatherine Sullivan union gve_adminq_command cmd;
510f5cedc84SCatherine Sullivan
511f5cedc84SCatherine Sullivan memset(&cmd, 0, sizeof(cmd));
512f5cedc84SCatherine Sullivan cmd.opcode = cpu_to_be32(GVE_ADMINQ_CREATE_TX_QUEUE);
513f5cedc84SCatherine Sullivan cmd.create_tx_queue = (struct gve_adminq_create_tx_queue) {
514f5cedc84SCatherine Sullivan .queue_id = cpu_to_be32(queue_index),
5155cdad90dSSagi Shahar .queue_resources_addr =
5165cdad90dSSagi Shahar cpu_to_be64(tx->q_resources_bus),
517f5cedc84SCatherine Sullivan .tx_ring_addr = cpu_to_be64(tx->bus),
518f5cedc84SCatherine Sullivan .ntfy_id = cpu_to_be32(tx->ntfy_id),
519f5cedc84SCatherine Sullivan };
520f5cedc84SCatherine Sullivan
5211f6228e4SBailey Forrest if (gve_is_gqi(priv)) {
5221f6228e4SBailey Forrest u32 qpl_id = priv->queue_format == GVE_GQI_RDA_FORMAT ?
5231f6228e4SBailey Forrest GVE_RAW_ADDRESSING_QPL_ID : tx->tx_fifo.qpl->id;
5245cdad90dSSagi Shahar
5251f6228e4SBailey Forrest cmd.create_tx_queue.queue_page_list_id = cpu_to_be32(qpl_id);
5261f6228e4SBailey Forrest } else {
527*66ce8e6bSRushil Gupta u16 comp_ring_size;
528*66ce8e6bSRushil Gupta u32 qpl_id = 0;
529*66ce8e6bSRushil Gupta
530*66ce8e6bSRushil Gupta if (priv->queue_format == GVE_DQO_RDA_FORMAT) {
531*66ce8e6bSRushil Gupta qpl_id = GVE_RAW_ADDRESSING_QPL_ID;
532*66ce8e6bSRushil Gupta comp_ring_size =
533*66ce8e6bSRushil Gupta priv->options_dqo_rda.tx_comp_ring_entries;
534*66ce8e6bSRushil Gupta } else {
535*66ce8e6bSRushil Gupta qpl_id = tx->dqo.qpl->id;
536*66ce8e6bSRushil Gupta comp_ring_size = priv->tx_desc_cnt;
537*66ce8e6bSRushil Gupta }
538*66ce8e6bSRushil Gupta cmd.create_tx_queue.queue_page_list_id = cpu_to_be32(qpl_id);
5391f6228e4SBailey Forrest cmd.create_tx_queue.tx_ring_size =
5401f6228e4SBailey Forrest cpu_to_be16(priv->tx_desc_cnt);
5411f6228e4SBailey Forrest cmd.create_tx_queue.tx_comp_ring_addr =
5421f6228e4SBailey Forrest cpu_to_be64(tx->complq_bus_dqo);
5431f6228e4SBailey Forrest cmd.create_tx_queue.tx_comp_ring_size =
544*66ce8e6bSRushil Gupta cpu_to_be16(comp_ring_size);
5451f6228e4SBailey Forrest }
5461f6228e4SBailey Forrest
5471f6228e4SBailey Forrest return gve_adminq_issue_cmd(priv, &cmd);
548f5cedc84SCatherine Sullivan }
549f5cedc84SCatherine Sullivan
gve_adminq_create_tx_queues(struct gve_priv * priv,u32 start_id,u32 num_queues)5507fc2bf78SPraveen Kaligineedi int gve_adminq_create_tx_queues(struct gve_priv *priv, u32 start_id, u32 num_queues)
5515cdad90dSSagi Shahar {
5525cdad90dSSagi Shahar int err;
5535cdad90dSSagi Shahar int i;
5545cdad90dSSagi Shahar
5557fc2bf78SPraveen Kaligineedi for (i = start_id; i < start_id + num_queues; i++) {
5565cdad90dSSagi Shahar err = gve_adminq_create_tx_queue(priv, i);
5575cdad90dSSagi Shahar if (err)
5585cdad90dSSagi Shahar return err;
5595cdad90dSSagi Shahar }
5605cdad90dSSagi Shahar
5615cdad90dSSagi Shahar return gve_adminq_kick_and_wait(priv);
5625cdad90dSSagi Shahar }
5635cdad90dSSagi Shahar
gve_adminq_create_rx_queue(struct gve_priv * priv,u32 queue_index)5645cdad90dSSagi Shahar static int gve_adminq_create_rx_queue(struct gve_priv *priv, u32 queue_index)
565f5cedc84SCatherine Sullivan {
566f5cedc84SCatherine Sullivan struct gve_rx_ring *rx = &priv->rx[queue_index];
567f5cedc84SCatherine Sullivan union gve_adminq_command cmd;
568f5cedc84SCatherine Sullivan
569f5cedc84SCatherine Sullivan memset(&cmd, 0, sizeof(cmd));
570f5cedc84SCatherine Sullivan cmd.opcode = cpu_to_be32(GVE_ADMINQ_CREATE_RX_QUEUE);
571f5cedc84SCatherine Sullivan cmd.create_rx_queue = (struct gve_adminq_create_rx_queue) {
572f5cedc84SCatherine Sullivan .queue_id = cpu_to_be32(queue_index),
573f5cedc84SCatherine Sullivan .ntfy_id = cpu_to_be32(rx->ntfy_id),
574f5cedc84SCatherine Sullivan .queue_resources_addr = cpu_to_be64(rx->q_resources_bus),
575f5cedc84SCatherine Sullivan };
576f5cedc84SCatherine Sullivan
5771f6228e4SBailey Forrest if (gve_is_gqi(priv)) {
5781f6228e4SBailey Forrest u32 qpl_id = priv->queue_format == GVE_GQI_RDA_FORMAT ?
5791f6228e4SBailey Forrest GVE_RAW_ADDRESSING_QPL_ID : rx->data.qpl->id;
5805cdad90dSSagi Shahar
5811f6228e4SBailey Forrest cmd.create_rx_queue.rx_desc_ring_addr =
5821f6228e4SBailey Forrest cpu_to_be64(rx->desc.bus),
5831f6228e4SBailey Forrest cmd.create_rx_queue.rx_data_ring_addr =
5841f6228e4SBailey Forrest cpu_to_be64(rx->data.data_bus),
5851f6228e4SBailey Forrest cmd.create_rx_queue.index = cpu_to_be32(queue_index);
5861f6228e4SBailey Forrest cmd.create_rx_queue.queue_page_list_id = cpu_to_be32(qpl_id);
58737149e93SDavid Awogbemila cmd.create_rx_queue.packet_buffer_size = cpu_to_be16(rx->packet_buffer_size);
5881f6228e4SBailey Forrest } else {
589*66ce8e6bSRushil Gupta u16 rx_buff_ring_entries;
590*66ce8e6bSRushil Gupta u32 qpl_id = 0;
591*66ce8e6bSRushil Gupta
592*66ce8e6bSRushil Gupta if (priv->queue_format == GVE_DQO_RDA_FORMAT) {
593*66ce8e6bSRushil Gupta qpl_id = GVE_RAW_ADDRESSING_QPL_ID;
594*66ce8e6bSRushil Gupta rx_buff_ring_entries =
595*66ce8e6bSRushil Gupta priv->options_dqo_rda.rx_buff_ring_entries;
596*66ce8e6bSRushil Gupta } else {
597*66ce8e6bSRushil Gupta qpl_id = rx->dqo.qpl->id;
598*66ce8e6bSRushil Gupta rx_buff_ring_entries = priv->rx_desc_cnt;
599*66ce8e6bSRushil Gupta }
600*66ce8e6bSRushil Gupta cmd.create_rx_queue.queue_page_list_id = cpu_to_be32(qpl_id);
6011f6228e4SBailey Forrest cmd.create_rx_queue.rx_ring_size =
6021f6228e4SBailey Forrest cpu_to_be16(priv->rx_desc_cnt);
6031f6228e4SBailey Forrest cmd.create_rx_queue.rx_desc_ring_addr =
6041f6228e4SBailey Forrest cpu_to_be64(rx->dqo.complq.bus);
6051f6228e4SBailey Forrest cmd.create_rx_queue.rx_data_ring_addr =
6061f6228e4SBailey Forrest cpu_to_be64(rx->dqo.bufq.bus);
6071f6228e4SBailey Forrest cmd.create_rx_queue.packet_buffer_size =
6081f6228e4SBailey Forrest cpu_to_be16(priv->data_buffer_size_dqo);
6091f6228e4SBailey Forrest cmd.create_rx_queue.rx_buff_ring_size =
610*66ce8e6bSRushil Gupta cpu_to_be16(rx_buff_ring_entries);
6111f6228e4SBailey Forrest cmd.create_rx_queue.enable_rsc =
6121f6228e4SBailey Forrest !!(priv->dev->features & NETIF_F_LRO);
6131f6228e4SBailey Forrest }
6141f6228e4SBailey Forrest
6151f6228e4SBailey Forrest return gve_adminq_issue_cmd(priv, &cmd);
616f5cedc84SCatherine Sullivan }
617f5cedc84SCatherine Sullivan
gve_adminq_create_rx_queues(struct gve_priv * priv,u32 num_queues)6185cdad90dSSagi Shahar int gve_adminq_create_rx_queues(struct gve_priv *priv, u32 num_queues)
6195cdad90dSSagi Shahar {
6205cdad90dSSagi Shahar int err;
6215cdad90dSSagi Shahar int i;
6225cdad90dSSagi Shahar
6235cdad90dSSagi Shahar for (i = 0; i < num_queues; i++) {
6245cdad90dSSagi Shahar err = gve_adminq_create_rx_queue(priv, i);
6255cdad90dSSagi Shahar if (err)
6265cdad90dSSagi Shahar return err;
6275cdad90dSSagi Shahar }
6285cdad90dSSagi Shahar
6295cdad90dSSagi Shahar return gve_adminq_kick_and_wait(priv);
6305cdad90dSSagi Shahar }
6315cdad90dSSagi Shahar
gve_adminq_destroy_tx_queue(struct gve_priv * priv,u32 queue_index)6325cdad90dSSagi Shahar static int gve_adminq_destroy_tx_queue(struct gve_priv *priv, u32 queue_index)
633f5cedc84SCatherine Sullivan {
634f5cedc84SCatherine Sullivan union gve_adminq_command cmd;
6355cdad90dSSagi Shahar int err;
636f5cedc84SCatherine Sullivan
637f5cedc84SCatherine Sullivan memset(&cmd, 0, sizeof(cmd));
638f5cedc84SCatherine Sullivan cmd.opcode = cpu_to_be32(GVE_ADMINQ_DESTROY_TX_QUEUE);
639f5cedc84SCatherine Sullivan cmd.destroy_tx_queue = (struct gve_adminq_destroy_tx_queue) {
640f5cedc84SCatherine Sullivan .queue_id = cpu_to_be32(queue_index),
641f5cedc84SCatherine Sullivan };
642f5cedc84SCatherine Sullivan
6435cdad90dSSagi Shahar err = gve_adminq_issue_cmd(priv, &cmd);
6445cdad90dSSagi Shahar if (err)
6455cdad90dSSagi Shahar return err;
6465cdad90dSSagi Shahar
6475cdad90dSSagi Shahar return 0;
648f5cedc84SCatherine Sullivan }
649f5cedc84SCatherine Sullivan
gve_adminq_destroy_tx_queues(struct gve_priv * priv,u32 start_id,u32 num_queues)6507fc2bf78SPraveen Kaligineedi int gve_adminq_destroy_tx_queues(struct gve_priv *priv, u32 start_id, u32 num_queues)
6515cdad90dSSagi Shahar {
6525cdad90dSSagi Shahar int err;
6535cdad90dSSagi Shahar int i;
6545cdad90dSSagi Shahar
6557fc2bf78SPraveen Kaligineedi for (i = start_id; i < start_id + num_queues; i++) {
6565cdad90dSSagi Shahar err = gve_adminq_destroy_tx_queue(priv, i);
6575cdad90dSSagi Shahar if (err)
6585cdad90dSSagi Shahar return err;
6595cdad90dSSagi Shahar }
6605cdad90dSSagi Shahar
6615cdad90dSSagi Shahar return gve_adminq_kick_and_wait(priv);
6625cdad90dSSagi Shahar }
6635cdad90dSSagi Shahar
gve_adminq_destroy_rx_queue(struct gve_priv * priv,u32 queue_index)6645cdad90dSSagi Shahar static int gve_adminq_destroy_rx_queue(struct gve_priv *priv, u32 queue_index)
665f5cedc84SCatherine Sullivan {
666f5cedc84SCatherine Sullivan union gve_adminq_command cmd;
6675cdad90dSSagi Shahar int err;
668f5cedc84SCatherine Sullivan
669f5cedc84SCatherine Sullivan memset(&cmd, 0, sizeof(cmd));
670f5cedc84SCatherine Sullivan cmd.opcode = cpu_to_be32(GVE_ADMINQ_DESTROY_RX_QUEUE);
671f5cedc84SCatherine Sullivan cmd.destroy_rx_queue = (struct gve_adminq_destroy_rx_queue) {
672f5cedc84SCatherine Sullivan .queue_id = cpu_to_be32(queue_index),
673f5cedc84SCatherine Sullivan };
674f5cedc84SCatherine Sullivan
6755cdad90dSSagi Shahar err = gve_adminq_issue_cmd(priv, &cmd);
6765cdad90dSSagi Shahar if (err)
6775cdad90dSSagi Shahar return err;
6785cdad90dSSagi Shahar
6795cdad90dSSagi Shahar return 0;
6805cdad90dSSagi Shahar }
6815cdad90dSSagi Shahar
gve_adminq_destroy_rx_queues(struct gve_priv * priv,u32 num_queues)6825cdad90dSSagi Shahar int gve_adminq_destroy_rx_queues(struct gve_priv *priv, u32 num_queues)
6835cdad90dSSagi Shahar {
6845cdad90dSSagi Shahar int err;
6855cdad90dSSagi Shahar int i;
6865cdad90dSSagi Shahar
6875cdad90dSSagi Shahar for (i = 0; i < num_queues; i++) {
6885cdad90dSSagi Shahar err = gve_adminq_destroy_rx_queue(priv, i);
6895cdad90dSSagi Shahar if (err)
6905cdad90dSSagi Shahar return err;
6915cdad90dSSagi Shahar }
6925cdad90dSSagi Shahar
6935cdad90dSSagi Shahar return gve_adminq_kick_and_wait(priv);
694f5cedc84SCatherine Sullivan }
695f5cedc84SCatherine Sullivan
gve_set_desc_cnt(struct gve_priv * priv,struct gve_device_descriptor * descriptor)6965ca2265eSBailey Forrest static int gve_set_desc_cnt(struct gve_priv *priv,
6975ca2265eSBailey Forrest struct gve_device_descriptor *descriptor)
6985ca2265eSBailey Forrest {
6995ca2265eSBailey Forrest priv->tx_desc_cnt = be16_to_cpu(descriptor->tx_queue_entries);
7005ca2265eSBailey Forrest if (priv->tx_desc_cnt * sizeof(priv->tx->desc[0]) < PAGE_SIZE) {
7015ca2265eSBailey Forrest dev_err(&priv->pdev->dev, "Tx desc count %d too low\n",
7025ca2265eSBailey Forrest priv->tx_desc_cnt);
7035ca2265eSBailey Forrest return -EINVAL;
7045ca2265eSBailey Forrest }
7055ca2265eSBailey Forrest priv->rx_desc_cnt = be16_to_cpu(descriptor->rx_queue_entries);
7065ca2265eSBailey Forrest if (priv->rx_desc_cnt * sizeof(priv->rx->desc.desc_ring[0])
7075ca2265eSBailey Forrest < PAGE_SIZE) {
7085ca2265eSBailey Forrest dev_err(&priv->pdev->dev, "Rx desc count %d too low\n",
7095ca2265eSBailey Forrest priv->rx_desc_cnt);
7105ca2265eSBailey Forrest return -EINVAL;
7115ca2265eSBailey Forrest }
7125ca2265eSBailey Forrest return 0;
7135ca2265eSBailey Forrest }
7145ca2265eSBailey Forrest
7155ca2265eSBailey Forrest static int
gve_set_desc_cnt_dqo(struct gve_priv * priv,const struct gve_device_descriptor * descriptor,const struct gve_device_option_dqo_rda * dev_op_dqo_rda)7165ca2265eSBailey Forrest gve_set_desc_cnt_dqo(struct gve_priv *priv,
7175ca2265eSBailey Forrest const struct gve_device_descriptor *descriptor,
7185ca2265eSBailey Forrest const struct gve_device_option_dqo_rda *dev_op_dqo_rda)
7195ca2265eSBailey Forrest {
7205ca2265eSBailey Forrest priv->tx_desc_cnt = be16_to_cpu(descriptor->tx_queue_entries);
721*66ce8e6bSRushil Gupta priv->rx_desc_cnt = be16_to_cpu(descriptor->rx_queue_entries);
722*66ce8e6bSRushil Gupta
723*66ce8e6bSRushil Gupta if (priv->queue_format == GVE_DQO_QPL_FORMAT)
724*66ce8e6bSRushil Gupta return 0;
725*66ce8e6bSRushil Gupta
7265ca2265eSBailey Forrest priv->options_dqo_rda.tx_comp_ring_entries =
7275ca2265eSBailey Forrest be16_to_cpu(dev_op_dqo_rda->tx_comp_ring_entries);
7285ca2265eSBailey Forrest priv->options_dqo_rda.rx_buff_ring_entries =
7295ca2265eSBailey Forrest be16_to_cpu(dev_op_dqo_rda->rx_buff_ring_entries);
7305ca2265eSBailey Forrest
7315ca2265eSBailey Forrest return 0;
7325ca2265eSBailey Forrest }
7335ca2265eSBailey Forrest
gve_enable_supported_features(struct gve_priv * priv,u32 supported_features_mask,const struct gve_device_option_jumbo_frames * dev_op_jumbo_frames,const struct gve_device_option_dqo_qpl * dev_op_dqo_qpl)734255489f5SShailend Chand static void gve_enable_supported_features(struct gve_priv *priv,
735255489f5SShailend Chand u32 supported_features_mask,
736255489f5SShailend Chand const struct gve_device_option_jumbo_frames
737*66ce8e6bSRushil Gupta *dev_op_jumbo_frames,
738*66ce8e6bSRushil Gupta const struct gve_device_option_dqo_qpl
739*66ce8e6bSRushil Gupta *dev_op_dqo_qpl)
740255489f5SShailend Chand {
741255489f5SShailend Chand /* Before control reaches this point, the page-size-capped max MTU from
742255489f5SShailend Chand * the gve_device_descriptor field has already been stored in
743255489f5SShailend Chand * priv->dev->max_mtu. We overwrite it with the true max MTU below.
744255489f5SShailend Chand */
745255489f5SShailend Chand if (dev_op_jumbo_frames &&
746255489f5SShailend Chand (supported_features_mask & GVE_SUP_JUMBO_FRAMES_MASK)) {
747255489f5SShailend Chand dev_info(&priv->pdev->dev,
748255489f5SShailend Chand "JUMBO FRAMES device option enabled.\n");
749255489f5SShailend Chand priv->dev->max_mtu = be16_to_cpu(dev_op_jumbo_frames->max_mtu);
750255489f5SShailend Chand }
751*66ce8e6bSRushil Gupta
752*66ce8e6bSRushil Gupta /* Override pages for qpl for DQO-QPL */
753*66ce8e6bSRushil Gupta if (dev_op_dqo_qpl) {
754*66ce8e6bSRushil Gupta priv->tx_pages_per_qpl =
755*66ce8e6bSRushil Gupta be16_to_cpu(dev_op_dqo_qpl->tx_pages_per_qpl);
756*66ce8e6bSRushil Gupta priv->rx_pages_per_qpl =
757*66ce8e6bSRushil Gupta be16_to_cpu(dev_op_dqo_qpl->rx_pages_per_qpl);
758*66ce8e6bSRushil Gupta if (priv->tx_pages_per_qpl == 0)
759*66ce8e6bSRushil Gupta priv->tx_pages_per_qpl = DQO_QPL_DEFAULT_TX_PAGES;
760*66ce8e6bSRushil Gupta if (priv->rx_pages_per_qpl == 0)
761*66ce8e6bSRushil Gupta priv->rx_pages_per_qpl = DQO_QPL_DEFAULT_RX_PAGES;
762*66ce8e6bSRushil Gupta }
763255489f5SShailend Chand }
764255489f5SShailend Chand
gve_adminq_describe_device(struct gve_priv * priv)765893ce44dSCatherine Sullivan int gve_adminq_describe_device(struct gve_priv *priv)
766893ce44dSCatherine Sullivan {
767255489f5SShailend Chand struct gve_device_option_jumbo_frames *dev_op_jumbo_frames = NULL;
7688a39d3e0SBailey Forrest struct gve_device_option_gqi_rda *dev_op_gqi_rda = NULL;
7698a39d3e0SBailey Forrest struct gve_device_option_gqi_qpl *dev_op_gqi_qpl = NULL;
7708a39d3e0SBailey Forrest struct gve_device_option_dqo_rda *dev_op_dqo_rda = NULL;
771*66ce8e6bSRushil Gupta struct gve_device_option_dqo_qpl *dev_op_dqo_qpl = NULL;
772893ce44dSCatherine Sullivan struct gve_device_descriptor *descriptor;
773255489f5SShailend Chand u32 supported_features_mask = 0;
774893ce44dSCatherine Sullivan union gve_adminq_command cmd;
775893ce44dSCatherine Sullivan dma_addr_t descriptor_bus;
776893ce44dSCatherine Sullivan int err = 0;
777893ce44dSCatherine Sullivan u8 *mac;
778893ce44dSCatherine Sullivan u16 mtu;
779893ce44dSCatherine Sullivan
780893ce44dSCatherine Sullivan memset(&cmd, 0, sizeof(cmd));
781893ce44dSCatherine Sullivan descriptor = dma_alloc_coherent(&priv->pdev->dev, PAGE_SIZE,
782893ce44dSCatherine Sullivan &descriptor_bus, GFP_KERNEL);
783893ce44dSCatherine Sullivan if (!descriptor)
784893ce44dSCatherine Sullivan return -ENOMEM;
785893ce44dSCatherine Sullivan cmd.opcode = cpu_to_be32(GVE_ADMINQ_DESCRIBE_DEVICE);
786893ce44dSCatherine Sullivan cmd.describe_device.device_descriptor_addr =
787893ce44dSCatherine Sullivan cpu_to_be64(descriptor_bus);
788893ce44dSCatherine Sullivan cmd.describe_device.device_descriptor_version =
789893ce44dSCatherine Sullivan cpu_to_be32(GVE_ADMINQ_DEVICE_DESCRIPTOR_VERSION);
790893ce44dSCatherine Sullivan cmd.describe_device.available_length = cpu_to_be32(PAGE_SIZE);
791893ce44dSCatherine Sullivan
792893ce44dSCatherine Sullivan err = gve_adminq_execute_cmd(priv, &cmd);
793893ce44dSCatherine Sullivan if (err)
794893ce44dSCatherine Sullivan goto free_device_descriptor;
795893ce44dSCatherine Sullivan
7968a39d3e0SBailey Forrest err = gve_process_device_options(priv, descriptor, &dev_op_gqi_rda,
797255489f5SShailend Chand &dev_op_gqi_qpl, &dev_op_dqo_rda,
798*66ce8e6bSRushil Gupta &dev_op_jumbo_frames,
799*66ce8e6bSRushil Gupta &dev_op_dqo_qpl);
8008a39d3e0SBailey Forrest if (err)
8018a39d3e0SBailey Forrest goto free_device_descriptor;
8028a39d3e0SBailey Forrest
8038a39d3e0SBailey Forrest /* If the GQI_RAW_ADDRESSING option is not enabled and the queue format
8048a39d3e0SBailey Forrest * is not set to GqiRda, choose the queue format in a priority order:
805*66ce8e6bSRushil Gupta * DqoRda, DqoQpl, GqiRda, GqiQpl. Use GqiQpl as default.
8068a39d3e0SBailey Forrest */
807a10834a3SJeroen de Borst if (dev_op_dqo_rda) {
808a5886ef4SBailey Forrest priv->queue_format = GVE_DQO_RDA_FORMAT;
8098a39d3e0SBailey Forrest dev_info(&priv->pdev->dev,
8108a39d3e0SBailey Forrest "Driver is running with DQO RDA queue format.\n");
811255489f5SShailend Chand supported_features_mask =
812255489f5SShailend Chand be32_to_cpu(dev_op_dqo_rda->supported_features_mask);
813*66ce8e6bSRushil Gupta } else if (dev_op_dqo_qpl) {
814*66ce8e6bSRushil Gupta priv->queue_format = GVE_DQO_QPL_FORMAT;
815*66ce8e6bSRushil Gupta supported_features_mask =
816*66ce8e6bSRushil Gupta be32_to_cpu(dev_op_dqo_qpl->supported_features_mask);
8178a39d3e0SBailey Forrest } else if (dev_op_gqi_rda) {
818a5886ef4SBailey Forrest priv->queue_format = GVE_GQI_RDA_FORMAT;
8198a39d3e0SBailey Forrest dev_info(&priv->pdev->dev,
8208a39d3e0SBailey Forrest "Driver is running with GQI RDA queue format.\n");
821255489f5SShailend Chand supported_features_mask =
822255489f5SShailend Chand be32_to_cpu(dev_op_gqi_rda->supported_features_mask);
823a10834a3SJeroen de Borst } else if (priv->queue_format == GVE_GQI_RDA_FORMAT) {
824a10834a3SJeroen de Borst dev_info(&priv->pdev->dev,
825a10834a3SJeroen de Borst "Driver is running with GQI RDA queue format.\n");
8268a39d3e0SBailey Forrest } else {
827a5886ef4SBailey Forrest priv->queue_format = GVE_GQI_QPL_FORMAT;
828255489f5SShailend Chand if (dev_op_gqi_qpl)
829255489f5SShailend Chand supported_features_mask =
830255489f5SShailend Chand be32_to_cpu(dev_op_gqi_qpl->supported_features_mask);
8318a39d3e0SBailey Forrest dev_info(&priv->pdev->dev,
8328a39d3e0SBailey Forrest "Driver is running with GQI QPL queue format.\n");
8338a39d3e0SBailey Forrest }
8345ca2265eSBailey Forrest if (gve_is_gqi(priv)) {
8355ca2265eSBailey Forrest err = gve_set_desc_cnt(priv, descriptor);
8365ca2265eSBailey Forrest } else {
8375e8c5adfSBailey Forrest /* DQO supports LRO. */
8385e8c5adfSBailey Forrest priv->dev->hw_features |= NETIF_F_LRO;
8395ca2265eSBailey Forrest err = gve_set_desc_cnt_dqo(priv, descriptor, dev_op_dqo_rda);
8405ca2265eSBailey Forrest }
8415ca2265eSBailey Forrest if (err)
8425ca2265eSBailey Forrest goto free_device_descriptor;
8438a39d3e0SBailey Forrest
844f5cedc84SCatherine Sullivan priv->max_registered_pages =
845f5cedc84SCatherine Sullivan be64_to_cpu(descriptor->max_registered_pages);
846893ce44dSCatherine Sullivan mtu = be16_to_cpu(descriptor->mtu);
847893ce44dSCatherine Sullivan if (mtu < ETH_MIN_MTU) {
8480d5775d3SCatherine Sullivan dev_err(&priv->pdev->dev, "MTU %d below minimum MTU\n", mtu);
849893ce44dSCatherine Sullivan err = -EINVAL;
850893ce44dSCatherine Sullivan goto free_device_descriptor;
851893ce44dSCatherine Sullivan }
852893ce44dSCatherine Sullivan priv->dev->max_mtu = mtu;
853893ce44dSCatherine Sullivan priv->num_event_counters = be16_to_cpu(descriptor->counters);
854f3956ebbSJakub Kicinski eth_hw_addr_set(priv->dev, descriptor->mac);
855893ce44dSCatherine Sullivan mac = descriptor->mac;
8560d5775d3SCatherine Sullivan dev_info(&priv->pdev->dev, "MAC addr: %pM\n", mac);
857f5cedc84SCatherine Sullivan priv->tx_pages_per_qpl = be16_to_cpu(descriptor->tx_pages_per_qpl);
858ede3fcf5SCatherine Sullivan priv->rx_data_slot_cnt = be16_to_cpu(descriptor->rx_pages_per_qpl);
8595ca2265eSBailey Forrest
8605ca2265eSBailey Forrest if (gve_is_gqi(priv) && priv->rx_data_slot_cnt < priv->rx_desc_cnt) {
861ede3fcf5SCatherine Sullivan dev_err(&priv->pdev->dev, "rx_data_slot_cnt cannot be smaller than rx_desc_cnt, setting rx_desc_cnt down to %d.\n",
862ede3fcf5SCatherine Sullivan priv->rx_data_slot_cnt);
863ede3fcf5SCatherine Sullivan priv->rx_desc_cnt = priv->rx_data_slot_cnt;
864f5cedc84SCatherine Sullivan }
865f5cedc84SCatherine Sullivan priv->default_num_queues = be16_to_cpu(descriptor->default_num_queues);
866893ce44dSCatherine Sullivan
867255489f5SShailend Chand gve_enable_supported_features(priv, supported_features_mask,
868*66ce8e6bSRushil Gupta dev_op_jumbo_frames, dev_op_dqo_qpl);
869255489f5SShailend Chand
870893ce44dSCatherine Sullivan free_device_descriptor:
8718a39d3e0SBailey Forrest dma_free_coherent(&priv->pdev->dev, PAGE_SIZE, descriptor,
872893ce44dSCatherine Sullivan descriptor_bus);
873893ce44dSCatherine Sullivan return err;
874893ce44dSCatherine Sullivan }
875893ce44dSCatherine Sullivan
gve_adminq_register_page_list(struct gve_priv * priv,struct gve_queue_page_list * qpl)876f5cedc84SCatherine Sullivan int gve_adminq_register_page_list(struct gve_priv *priv,
877f5cedc84SCatherine Sullivan struct gve_queue_page_list *qpl)
878f5cedc84SCatherine Sullivan {
879f5cedc84SCatherine Sullivan struct device *hdev = &priv->pdev->dev;
880f5cedc84SCatherine Sullivan u32 num_entries = qpl->num_entries;
881f5cedc84SCatherine Sullivan u32 size = num_entries * sizeof(qpl->page_buses[0]);
882f5cedc84SCatherine Sullivan union gve_adminq_command cmd;
883f5cedc84SCatherine Sullivan dma_addr_t page_list_bus;
884f5cedc84SCatherine Sullivan __be64 *page_list;
885f5cedc84SCatherine Sullivan int err;
886f5cedc84SCatherine Sullivan int i;
887f5cedc84SCatherine Sullivan
888f5cedc84SCatherine Sullivan memset(&cmd, 0, sizeof(cmd));
889f5cedc84SCatherine Sullivan page_list = dma_alloc_coherent(hdev, size, &page_list_bus, GFP_KERNEL);
890f5cedc84SCatherine Sullivan if (!page_list)
891f5cedc84SCatherine Sullivan return -ENOMEM;
892f5cedc84SCatherine Sullivan
893f5cedc84SCatherine Sullivan for (i = 0; i < num_entries; i++)
894f5cedc84SCatherine Sullivan page_list[i] = cpu_to_be64(qpl->page_buses[i]);
895f5cedc84SCatherine Sullivan
896f5cedc84SCatherine Sullivan cmd.opcode = cpu_to_be32(GVE_ADMINQ_REGISTER_PAGE_LIST);
897f5cedc84SCatherine Sullivan cmd.reg_page_list = (struct gve_adminq_register_page_list) {
898f5cedc84SCatherine Sullivan .page_list_id = cpu_to_be32(qpl->id),
899f5cedc84SCatherine Sullivan .num_pages = cpu_to_be32(num_entries),
900f5cedc84SCatherine Sullivan .page_address_list_addr = cpu_to_be64(page_list_bus),
901f5cedc84SCatherine Sullivan };
902f5cedc84SCatherine Sullivan
903f5cedc84SCatherine Sullivan err = gve_adminq_execute_cmd(priv, &cmd);
904f5cedc84SCatherine Sullivan dma_free_coherent(hdev, size, page_list, page_list_bus);
905f5cedc84SCatherine Sullivan return err;
906f5cedc84SCatherine Sullivan }
907f5cedc84SCatherine Sullivan
gve_adminq_unregister_page_list(struct gve_priv * priv,u32 page_list_id)908f5cedc84SCatherine Sullivan int gve_adminq_unregister_page_list(struct gve_priv *priv, u32 page_list_id)
909f5cedc84SCatherine Sullivan {
910f5cedc84SCatherine Sullivan union gve_adminq_command cmd;
911f5cedc84SCatherine Sullivan
912f5cedc84SCatherine Sullivan memset(&cmd, 0, sizeof(cmd));
913f5cedc84SCatherine Sullivan cmd.opcode = cpu_to_be32(GVE_ADMINQ_UNREGISTER_PAGE_LIST);
914f5cedc84SCatherine Sullivan cmd.unreg_page_list = (struct gve_adminq_unregister_page_list) {
915f5cedc84SCatherine Sullivan .page_list_id = cpu_to_be32(page_list_id),
916f5cedc84SCatherine Sullivan };
917f5cedc84SCatherine Sullivan
918f5cedc84SCatherine Sullivan return gve_adminq_execute_cmd(priv, &cmd);
919f5cedc84SCatherine Sullivan }
920f5cedc84SCatherine Sullivan
gve_adminq_set_mtu(struct gve_priv * priv,u64 mtu)921893ce44dSCatherine Sullivan int gve_adminq_set_mtu(struct gve_priv *priv, u64 mtu)
922893ce44dSCatherine Sullivan {
923893ce44dSCatherine Sullivan union gve_adminq_command cmd;
924893ce44dSCatherine Sullivan
925893ce44dSCatherine Sullivan memset(&cmd, 0, sizeof(cmd));
926893ce44dSCatherine Sullivan cmd.opcode = cpu_to_be32(GVE_ADMINQ_SET_DRIVER_PARAMETER);
927893ce44dSCatherine Sullivan cmd.set_driver_param = (struct gve_adminq_set_driver_parameter) {
928893ce44dSCatherine Sullivan .parameter_type = cpu_to_be32(GVE_SET_PARAM_MTU),
929893ce44dSCatherine Sullivan .parameter_value = cpu_to_be64(mtu),
930893ce44dSCatherine Sullivan };
931893ce44dSCatherine Sullivan
932893ce44dSCatherine Sullivan return gve_adminq_execute_cmd(priv, &cmd);
933893ce44dSCatherine Sullivan }
93424aeb56fSKuo Zhao
gve_adminq_report_stats(struct gve_priv * priv,u64 stats_report_len,dma_addr_t stats_report_addr,u64 interval)93524aeb56fSKuo Zhao int gve_adminq_report_stats(struct gve_priv *priv, u64 stats_report_len,
93624aeb56fSKuo Zhao dma_addr_t stats_report_addr, u64 interval)
93724aeb56fSKuo Zhao {
93824aeb56fSKuo Zhao union gve_adminq_command cmd;
93924aeb56fSKuo Zhao
94024aeb56fSKuo Zhao memset(&cmd, 0, sizeof(cmd));
94124aeb56fSKuo Zhao cmd.opcode = cpu_to_be32(GVE_ADMINQ_REPORT_STATS);
94224aeb56fSKuo Zhao cmd.report_stats = (struct gve_adminq_report_stats) {
94324aeb56fSKuo Zhao .stats_report_len = cpu_to_be64(stats_report_len),
94424aeb56fSKuo Zhao .stats_report_addr = cpu_to_be64(stats_report_addr),
94524aeb56fSKuo Zhao .interval = cpu_to_be64(interval),
94624aeb56fSKuo Zhao };
94724aeb56fSKuo Zhao
94824aeb56fSKuo Zhao return gve_adminq_execute_cmd(priv, &cmd);
94924aeb56fSKuo Zhao }
9507e074d5aSDavid Awogbemila
gve_adminq_verify_driver_compatibility(struct gve_priv * priv,u64 driver_info_len,dma_addr_t driver_info_addr)951c2a0c3edSJeroen de Borst int gve_adminq_verify_driver_compatibility(struct gve_priv *priv,
952c2a0c3edSJeroen de Borst u64 driver_info_len,
953c2a0c3edSJeroen de Borst dma_addr_t driver_info_addr)
954c2a0c3edSJeroen de Borst {
955c2a0c3edSJeroen de Borst union gve_adminq_command cmd;
956c2a0c3edSJeroen de Borst
957c2a0c3edSJeroen de Borst memset(&cmd, 0, sizeof(cmd));
958c2a0c3edSJeroen de Borst cmd.opcode = cpu_to_be32(GVE_ADMINQ_VERIFY_DRIVER_COMPATIBILITY);
959c2a0c3edSJeroen de Borst cmd.verify_driver_compatibility = (struct gve_adminq_verify_driver_compatibility) {
960c2a0c3edSJeroen de Borst .driver_info_len = cpu_to_be64(driver_info_len),
961c2a0c3edSJeroen de Borst .driver_info_addr = cpu_to_be64(driver_info_addr),
962c2a0c3edSJeroen de Borst };
963c2a0c3edSJeroen de Borst
964c2a0c3edSJeroen de Borst return gve_adminq_execute_cmd(priv, &cmd);
965c2a0c3edSJeroen de Borst }
966c2a0c3edSJeroen de Borst
gve_adminq_report_link_speed(struct gve_priv * priv)9677e074d5aSDavid Awogbemila int gve_adminq_report_link_speed(struct gve_priv *priv)
9687e074d5aSDavid Awogbemila {
9697e074d5aSDavid Awogbemila union gve_adminq_command gvnic_cmd;
9707e074d5aSDavid Awogbemila dma_addr_t link_speed_region_bus;
9717e074d5aSDavid Awogbemila __be64 *link_speed_region;
9727e074d5aSDavid Awogbemila int err;
9737e074d5aSDavid Awogbemila
9747e074d5aSDavid Awogbemila link_speed_region =
9757e074d5aSDavid Awogbemila dma_alloc_coherent(&priv->pdev->dev, sizeof(*link_speed_region),
9767e074d5aSDavid Awogbemila &link_speed_region_bus, GFP_KERNEL);
9777e074d5aSDavid Awogbemila
9787e074d5aSDavid Awogbemila if (!link_speed_region)
9797e074d5aSDavid Awogbemila return -ENOMEM;
9807e074d5aSDavid Awogbemila
9817e074d5aSDavid Awogbemila memset(&gvnic_cmd, 0, sizeof(gvnic_cmd));
9827e074d5aSDavid Awogbemila gvnic_cmd.opcode = cpu_to_be32(GVE_ADMINQ_REPORT_LINK_SPEED);
9837e074d5aSDavid Awogbemila gvnic_cmd.report_link_speed.link_speed_address =
9847e074d5aSDavid Awogbemila cpu_to_be64(link_speed_region_bus);
9857e074d5aSDavid Awogbemila
9867e074d5aSDavid Awogbemila err = gve_adminq_execute_cmd(priv, &gvnic_cmd);
9877e074d5aSDavid Awogbemila
9887e074d5aSDavid Awogbemila priv->link_speed = be64_to_cpu(*link_speed_region);
9897e074d5aSDavid Awogbemila dma_free_coherent(&priv->pdev->dev, sizeof(*link_speed_region), link_speed_region,
9907e074d5aSDavid Awogbemila link_speed_region_bus);
9917e074d5aSDavid Awogbemila return err;
9927e074d5aSDavid Awogbemila }
993c4b87ac8SBailey Forrest
gve_adminq_get_ptype_map_dqo(struct gve_priv * priv,struct gve_ptype_lut * ptype_lut)994c4b87ac8SBailey Forrest int gve_adminq_get_ptype_map_dqo(struct gve_priv *priv,
995c4b87ac8SBailey Forrest struct gve_ptype_lut *ptype_lut)
996c4b87ac8SBailey Forrest {
997c4b87ac8SBailey Forrest struct gve_ptype_map *ptype_map;
998c4b87ac8SBailey Forrest union gve_adminq_command cmd;
999c4b87ac8SBailey Forrest dma_addr_t ptype_map_bus;
1000c4b87ac8SBailey Forrest int err = 0;
1001c4b87ac8SBailey Forrest int i;
1002c4b87ac8SBailey Forrest
1003c4b87ac8SBailey Forrest memset(&cmd, 0, sizeof(cmd));
1004c4b87ac8SBailey Forrest ptype_map = dma_alloc_coherent(&priv->pdev->dev, sizeof(*ptype_map),
1005c4b87ac8SBailey Forrest &ptype_map_bus, GFP_KERNEL);
1006c4b87ac8SBailey Forrest if (!ptype_map)
1007c4b87ac8SBailey Forrest return -ENOMEM;
1008c4b87ac8SBailey Forrest
1009c4b87ac8SBailey Forrest cmd.opcode = cpu_to_be32(GVE_ADMINQ_GET_PTYPE_MAP);
1010c4b87ac8SBailey Forrest cmd.get_ptype_map = (struct gve_adminq_get_ptype_map) {
1011c4b87ac8SBailey Forrest .ptype_map_len = cpu_to_be64(sizeof(*ptype_map)),
1012c4b87ac8SBailey Forrest .ptype_map_addr = cpu_to_be64(ptype_map_bus),
1013c4b87ac8SBailey Forrest };
1014c4b87ac8SBailey Forrest
1015c4b87ac8SBailey Forrest err = gve_adminq_execute_cmd(priv, &cmd);
1016c4b87ac8SBailey Forrest if (err)
1017c4b87ac8SBailey Forrest goto err;
1018c4b87ac8SBailey Forrest
1019c4b87ac8SBailey Forrest /* Populate ptype_lut. */
1020c4b87ac8SBailey Forrest for (i = 0; i < GVE_NUM_PTYPES; i++) {
1021c4b87ac8SBailey Forrest ptype_lut->ptypes[i].l3_type =
1022c4b87ac8SBailey Forrest ptype_map->ptypes[i].l3_type;
1023c4b87ac8SBailey Forrest ptype_lut->ptypes[i].l4_type =
1024c4b87ac8SBailey Forrest ptype_map->ptypes[i].l4_type;
1025c4b87ac8SBailey Forrest }
1026c4b87ac8SBailey Forrest err:
1027c4b87ac8SBailey Forrest dma_free_coherent(&priv->pdev->dev, sizeof(*ptype_map), ptype_map,
1028c4b87ac8SBailey Forrest ptype_map_bus);
1029c4b87ac8SBailey Forrest return err;
1030c4b87ac8SBailey Forrest }
1031