1 // SPDX-License-Identifier: GPL-2.0+ 2 // Copyright (c) 2016-2017 Hisilicon Limited. 3 4 #include "hclge_main.h" 5 #include "hclge_tm.h" 6 #include "hnae3.h" 7 8 #define BW_PERCENT 100 9 10 static int hclge_ieee_ets_to_tm_info(struct hclge_dev *hdev, 11 struct ieee_ets *ets) 12 { 13 u8 i; 14 15 for (i = 0; i < HNAE3_MAX_TC; i++) { 16 switch (ets->tc_tsa[i]) { 17 case IEEE_8021QAZ_TSA_STRICT: 18 hdev->tm_info.tc_info[i].tc_sch_mode = 19 HCLGE_SCH_MODE_SP; 20 hdev->tm_info.pg_info[0].tc_dwrr[i] = 0; 21 break; 22 case IEEE_8021QAZ_TSA_ETS: 23 hdev->tm_info.tc_info[i].tc_sch_mode = 24 HCLGE_SCH_MODE_DWRR; 25 hdev->tm_info.pg_info[0].tc_dwrr[i] = 26 ets->tc_tx_bw[i]; 27 break; 28 default: 29 /* Hardware only supports SP (strict priority) 30 * or ETS (enhanced transmission selection) 31 * algorithms, if we receive some other value 32 * from dcbnl, then throw an error. 33 */ 34 return -EINVAL; 35 } 36 } 37 38 return hclge_tm_prio_tc_info_update(hdev, ets->prio_tc); 39 } 40 41 static void hclge_tm_info_to_ieee_ets(struct hclge_dev *hdev, 42 struct ieee_ets *ets) 43 { 44 u32 i; 45 46 memset(ets, 0, sizeof(*ets)); 47 ets->willing = 1; 48 ets->ets_cap = hdev->tc_max; 49 50 for (i = 0; i < HNAE3_MAX_TC; i++) { 51 ets->prio_tc[i] = hdev->tm_info.prio_tc[i]; 52 ets->tc_tx_bw[i] = hdev->tm_info.pg_info[0].tc_dwrr[i]; 53 54 if (hdev->tm_info.tc_info[i].tc_sch_mode == 55 HCLGE_SCH_MODE_SP) 56 ets->tc_tsa[i] = IEEE_8021QAZ_TSA_STRICT; 57 else 58 ets->tc_tsa[i] = IEEE_8021QAZ_TSA_ETS; 59 } 60 } 61 62 /* IEEE std */ 63 static int hclge_ieee_getets(struct hnae3_handle *h, struct ieee_ets *ets) 64 { 65 struct hclge_vport *vport = hclge_get_vport(h); 66 struct hclge_dev *hdev = vport->back; 67 68 hclge_tm_info_to_ieee_ets(hdev, ets); 69 70 return 0; 71 } 72 73 static int hclge_ets_validate(struct hclge_dev *hdev, struct ieee_ets *ets, 74 u8 *tc, bool *changed) 75 { 76 bool has_ets_tc = false; 77 u32 total_ets_bw = 0; 78 u8 max_tc = 0; 79 u8 i; 80 81 for (i = 0; i < HNAE3_MAX_TC; i++) { 82 if (ets->prio_tc[i] >= hdev->tc_max || 83 i >= hdev->tc_max) 84 return -EINVAL; 85 86 if (ets->prio_tc[i] != hdev->tm_info.prio_tc[i]) 87 *changed = true; 88 89 if (ets->prio_tc[i] > max_tc) 90 max_tc = ets->prio_tc[i]; 91 92 switch (ets->tc_tsa[i]) { 93 case IEEE_8021QAZ_TSA_STRICT: 94 if (hdev->tm_info.tc_info[i].tc_sch_mode != 95 HCLGE_SCH_MODE_SP) 96 *changed = true; 97 break; 98 case IEEE_8021QAZ_TSA_ETS: 99 if (hdev->tm_info.tc_info[i].tc_sch_mode != 100 HCLGE_SCH_MODE_DWRR) 101 *changed = true; 102 103 total_ets_bw += ets->tc_tx_bw[i]; 104 has_ets_tc = true; 105 break; 106 default: 107 return -EINVAL; 108 } 109 } 110 111 if (has_ets_tc && total_ets_bw != BW_PERCENT) 112 return -EINVAL; 113 114 *tc = max_tc + 1; 115 if (*tc != hdev->tm_info.num_tc) 116 *changed = true; 117 118 return 0; 119 } 120 121 static int hclge_map_update(struct hnae3_handle *h) 122 { 123 struct hclge_vport *vport = hclge_get_vport(h); 124 struct hclge_dev *hdev = vport->back; 125 int ret; 126 127 ret = hclge_tm_map_cfg(hdev); 128 if (ret) 129 return ret; 130 131 ret = hclge_tm_schd_mode_hw(hdev); 132 if (ret) 133 return ret; 134 135 ret = hclge_pause_setup_hw(hdev); 136 if (ret) 137 return ret; 138 139 ret = hclge_buffer_alloc(hdev); 140 if (ret) 141 return ret; 142 143 hclge_rss_indir_init_cfg(hdev); 144 145 return hclge_rss_init_hw(hdev); 146 } 147 148 static int hclge_client_setup_tc(struct hclge_dev *hdev) 149 { 150 struct hclge_vport *vport = hdev->vport; 151 struct hnae3_client *client; 152 struct hnae3_handle *handle; 153 int ret; 154 u32 i; 155 156 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) { 157 handle = &vport[i].nic; 158 client = handle->client; 159 160 if (!client || !client->ops || !client->ops->setup_tc) 161 continue; 162 163 ret = client->ops->setup_tc(handle, hdev->tm_info.num_tc); 164 if (ret) 165 return ret; 166 } 167 168 return 0; 169 } 170 171 static int hclge_ieee_setets(struct hnae3_handle *h, struct ieee_ets *ets) 172 { 173 struct hclge_vport *vport = hclge_get_vport(h); 174 struct hclge_dev *hdev = vport->back; 175 bool map_changed = false; 176 u8 num_tc = 0; 177 int ret; 178 179 if (!(hdev->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) || 180 hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE) 181 return -EINVAL; 182 183 ret = hclge_ets_validate(hdev, ets, &num_tc, &map_changed); 184 if (ret) 185 return ret; 186 187 ret = hclge_tm_schd_info_update(hdev, num_tc); 188 if (ret) 189 return ret; 190 191 ret = hclge_ieee_ets_to_tm_info(hdev, ets); 192 if (ret) 193 return ret; 194 195 if (map_changed) { 196 ret = hclge_client_setup_tc(hdev); 197 if (ret) 198 return ret; 199 } 200 201 return hclge_tm_dwrr_cfg(hdev); 202 } 203 204 static int hclge_ieee_getpfc(struct hnae3_handle *h, struct ieee_pfc *pfc) 205 { 206 u64 requests[HNAE3_MAX_TC], indications[HNAE3_MAX_TC]; 207 struct hclge_vport *vport = hclge_get_vport(h); 208 struct hclge_dev *hdev = vport->back; 209 u8 i, j, pfc_map, *prio_tc; 210 int ret; 211 212 memset(pfc, 0, sizeof(*pfc)); 213 pfc->pfc_cap = hdev->pfc_max; 214 prio_tc = hdev->tm_info.prio_tc; 215 pfc_map = hdev->tm_info.hw_pfc_map; 216 217 /* Pfc setting is based on TC */ 218 for (i = 0; i < hdev->tm_info.num_tc; i++) { 219 for (j = 0; j < HNAE3_MAX_USER_PRIO; j++) { 220 if ((prio_tc[j] == i) && (pfc_map & BIT(i))) 221 pfc->pfc_en |= BIT(j); 222 } 223 } 224 225 ret = hclge_pfc_tx_stats_get(hdev, requests); 226 if (ret) 227 return ret; 228 229 ret = hclge_pfc_rx_stats_get(hdev, indications); 230 if (ret) 231 return ret; 232 233 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 234 pfc->requests[i] = requests[i]; 235 pfc->indications[i] = indications[i]; 236 } 237 return 0; 238 } 239 240 static int hclge_ieee_setpfc(struct hnae3_handle *h, struct ieee_pfc *pfc) 241 { 242 struct hclge_vport *vport = hclge_get_vport(h); 243 struct hclge_dev *hdev = vport->back; 244 u8 i, j, pfc_map, *prio_tc; 245 246 if (!(hdev->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) || 247 hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE) 248 return -EINVAL; 249 250 prio_tc = hdev->tm_info.prio_tc; 251 pfc_map = 0; 252 253 for (i = 0; i < hdev->tm_info.num_tc; i++) { 254 for (j = 0; j < HNAE3_MAX_USER_PRIO; j++) { 255 if ((prio_tc[j] == i) && (pfc->pfc_en & BIT(j))) { 256 pfc_map |= BIT(i); 257 break; 258 } 259 } 260 } 261 262 if (pfc_map == hdev->tm_info.hw_pfc_map) 263 return 0; 264 265 hdev->tm_info.hw_pfc_map = pfc_map; 266 267 return hclge_pause_setup_hw(hdev); 268 } 269 270 /* DCBX configuration */ 271 static u8 hclge_getdcbx(struct hnae3_handle *h) 272 { 273 struct hclge_vport *vport = hclge_get_vport(h); 274 struct hclge_dev *hdev = vport->back; 275 276 if (hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE) 277 return 0; 278 279 return hdev->dcbx_cap; 280 } 281 282 static u8 hclge_setdcbx(struct hnae3_handle *h, u8 mode) 283 { 284 struct hclge_vport *vport = hclge_get_vport(h); 285 struct hclge_dev *hdev = vport->back; 286 287 /* No support for LLD_MANAGED modes or CEE */ 288 if ((mode & DCB_CAP_DCBX_LLD_MANAGED) || 289 (mode & DCB_CAP_DCBX_VER_CEE) || 290 !(mode & DCB_CAP_DCBX_HOST)) 291 return 1; 292 293 hdev->dcbx_cap = mode; 294 295 return 0; 296 } 297 298 /* Set up TC for hardware offloaded mqprio in channel mode */ 299 static int hclge_setup_tc(struct hnae3_handle *h, u8 tc, u8 *prio_tc) 300 { 301 struct hclge_vport *vport = hclge_get_vport(h); 302 struct hclge_dev *hdev = vport->back; 303 int ret; 304 305 if (hdev->flag & HCLGE_FLAG_DCB_ENABLE) 306 return -EINVAL; 307 308 if (tc > hdev->tc_max) { 309 dev_err(&hdev->pdev->dev, 310 "setup tc failed, tc(%u) > tc_max(%u)\n", 311 tc, hdev->tc_max); 312 return -EINVAL; 313 } 314 315 ret = hclge_tm_schd_info_update(hdev, tc); 316 if (ret) 317 return ret; 318 319 ret = hclge_tm_prio_tc_info_update(hdev, prio_tc); 320 if (ret) 321 return ret; 322 323 ret = hclge_tm_init_hw(hdev); 324 if (ret) 325 return ret; 326 327 hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE; 328 329 if (tc > 1) 330 hdev->flag |= HCLGE_FLAG_MQPRIO_ENABLE; 331 else 332 hdev->flag &= ~HCLGE_FLAG_MQPRIO_ENABLE; 333 334 return 0; 335 } 336 337 static const struct hnae3_dcb_ops hns3_dcb_ops = { 338 .ieee_getets = hclge_ieee_getets, 339 .ieee_setets = hclge_ieee_setets, 340 .ieee_getpfc = hclge_ieee_getpfc, 341 .ieee_setpfc = hclge_ieee_setpfc, 342 .getdcbx = hclge_getdcbx, 343 .setdcbx = hclge_setdcbx, 344 .map_update = hclge_map_update, 345 .setup_tc = hclge_setup_tc, 346 }; 347 348 void hclge_dcb_ops_set(struct hclge_dev *hdev) 349 { 350 struct hclge_vport *vport = hdev->vport; 351 struct hnae3_knic_private_info *kinfo; 352 353 /* Hdev does not support DCB or vport is 354 * not a pf, then dcb_ops is not set. 355 */ 356 if (!hnae3_dev_dcb_supported(hdev) || 357 vport->vport_id != 0) 358 return; 359 360 kinfo = &vport->nic.kinfo; 361 kinfo->dcb_ops = &hns3_dcb_ops; 362 hdev->dcbx_cap = DCB_CAP_DCBX_VER_IEEE | DCB_CAP_DCBX_HOST; 363 } 364