1 /* 2 * Copyright (c) 2016-2017 Hisilicon Limited. 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; either version 2 of the License, or 7 * (at your option) any later version. 8 */ 9 10 #include "hclge_main.h" 11 #include "hclge_tm.h" 12 #include "hnae3.h" 13 14 #define BW_PERCENT 100 15 16 static int hclge_ieee_ets_to_tm_info(struct hclge_dev *hdev, 17 struct ieee_ets *ets) 18 { 19 u8 i; 20 21 for (i = 0; i < HNAE3_MAX_TC; i++) { 22 switch (ets->tc_tsa[i]) { 23 case IEEE_8021QAZ_TSA_STRICT: 24 hdev->tm_info.tc_info[i].tc_sch_mode = 25 HCLGE_SCH_MODE_SP; 26 hdev->tm_info.pg_info[0].tc_dwrr[i] = 0; 27 break; 28 case IEEE_8021QAZ_TSA_ETS: 29 hdev->tm_info.tc_info[i].tc_sch_mode = 30 HCLGE_SCH_MODE_DWRR; 31 hdev->tm_info.pg_info[0].tc_dwrr[i] = 32 ets->tc_tx_bw[i]; 33 break; 34 default: 35 /* Hardware only supports SP (strict priority) 36 * or ETS (enhanced transmission selection) 37 * algorithms, if we receive some other value 38 * from dcbnl, then throw an error. 39 */ 40 return -EINVAL; 41 } 42 } 43 44 return hclge_tm_prio_tc_info_update(hdev, ets->prio_tc); 45 } 46 47 static void hclge_tm_info_to_ieee_ets(struct hclge_dev *hdev, 48 struct ieee_ets *ets) 49 { 50 u32 i; 51 52 memset(ets, 0, sizeof(*ets)); 53 ets->willing = 1; 54 ets->ets_cap = hdev->tc_max; 55 56 for (i = 0; i < HNAE3_MAX_TC; i++) { 57 ets->prio_tc[i] = hdev->tm_info.prio_tc[i]; 58 ets->tc_tx_bw[i] = hdev->tm_info.pg_info[0].tc_dwrr[i]; 59 60 if (hdev->tm_info.tc_info[i].tc_sch_mode == 61 HCLGE_SCH_MODE_SP) 62 ets->tc_tsa[i] = IEEE_8021QAZ_TSA_STRICT; 63 else 64 ets->tc_tsa[i] = IEEE_8021QAZ_TSA_ETS; 65 } 66 } 67 68 /* IEEE std */ 69 static int hclge_ieee_getets(struct hnae3_handle *h, struct ieee_ets *ets) 70 { 71 struct hclge_vport *vport = hclge_get_vport(h); 72 struct hclge_dev *hdev = vport->back; 73 74 hclge_tm_info_to_ieee_ets(hdev, ets); 75 76 return 0; 77 } 78 79 static int hclge_ets_validate(struct hclge_dev *hdev, struct ieee_ets *ets, 80 u8 *tc, bool *changed) 81 { 82 u32 total_ets_bw = 0; 83 u8 max_tc = 0; 84 u8 i; 85 86 for (i = 0; i < HNAE3_MAX_TC; i++) { 87 if (ets->prio_tc[i] >= hdev->tc_max || 88 i >= hdev->tc_max) 89 return -EINVAL; 90 91 if (ets->prio_tc[i] != hdev->tm_info.prio_tc[i]) 92 *changed = true; 93 94 if (ets->prio_tc[i] > max_tc) 95 max_tc = ets->prio_tc[i]; 96 97 switch (ets->tc_tsa[i]) { 98 case IEEE_8021QAZ_TSA_STRICT: 99 if (hdev->tm_info.tc_info[i].tc_sch_mode != 100 HCLGE_SCH_MODE_SP) 101 *changed = true; 102 break; 103 case IEEE_8021QAZ_TSA_ETS: 104 if (hdev->tm_info.tc_info[i].tc_sch_mode != 105 HCLGE_SCH_MODE_DWRR) 106 *changed = true; 107 108 total_ets_bw += ets->tc_tx_bw[i]; 109 break; 110 default: 111 return -EINVAL; 112 } 113 } 114 115 if (total_ets_bw != BW_PERCENT) 116 return -EINVAL; 117 118 *tc = max_tc + 1; 119 if (*tc != hdev->tm_info.num_tc) 120 *changed = true; 121 122 return 0; 123 } 124 125 static int hclge_map_update(struct hnae3_handle *h) 126 { 127 struct hclge_vport *vport = hclge_get_vport(h); 128 struct hclge_dev *hdev = vport->back; 129 int ret; 130 131 ret = hclge_tm_map_cfg(hdev); 132 if (ret) 133 return ret; 134 135 ret = hclge_tm_schd_mode_hw(hdev); 136 if (ret) 137 return ret; 138 139 ret = hclge_pause_setup_hw(hdev); 140 if (ret) 141 return ret; 142 143 ret = hclge_buffer_alloc(hdev); 144 if (ret) 145 return ret; 146 147 return hclge_rss_init_hw(hdev); 148 } 149 150 static int hclge_client_setup_tc(struct hclge_dev *hdev) 151 { 152 struct hclge_vport *vport = hdev->vport; 153 struct hnae3_client *client; 154 struct hnae3_handle *handle; 155 int ret; 156 u32 i; 157 158 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) { 159 handle = &vport[i].nic; 160 client = handle->client; 161 162 if (!client || !client->ops || !client->ops->setup_tc) 163 continue; 164 165 ret = client->ops->setup_tc(handle, hdev->tm_info.num_tc); 166 if (ret) 167 return ret; 168 } 169 170 return 0; 171 } 172 173 static int hclge_ieee_setets(struct hnae3_handle *h, struct ieee_ets *ets) 174 { 175 struct hclge_vport *vport = hclge_get_vport(h); 176 struct hclge_dev *hdev = vport->back; 177 bool map_changed = false; 178 u8 num_tc = 0; 179 int ret; 180 181 if (!(hdev->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) || 182 hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE) 183 return -EINVAL; 184 185 ret = hclge_ets_validate(hdev, ets, &num_tc, &map_changed); 186 if (ret) 187 return ret; 188 189 hclge_tm_schd_info_update(hdev, num_tc); 190 191 ret = hclge_ieee_ets_to_tm_info(hdev, ets); 192 if (ret) 193 return ret; 194 195 if (map_changed) { 196 ret = hclge_client_setup_tc(hdev); 197 if (ret) 198 return ret; 199 } 200 201 return hclge_tm_dwrr_cfg(hdev); 202 } 203 204 static int hclge_ieee_getpfc(struct hnae3_handle *h, struct ieee_pfc *pfc) 205 { 206 struct hclge_vport *vport = hclge_get_vport(h); 207 struct hclge_dev *hdev = vport->back; 208 u8 i, j, pfc_map, *prio_tc; 209 210 memset(pfc, 0, sizeof(*pfc)); 211 pfc->pfc_cap = hdev->pfc_max; 212 prio_tc = hdev->tm_info.prio_tc; 213 pfc_map = hdev->tm_info.hw_pfc_map; 214 215 /* Pfc setting is based on TC */ 216 for (i = 0; i < hdev->tm_info.num_tc; i++) { 217 for (j = 0; j < HNAE3_MAX_USER_PRIO; j++) { 218 if ((prio_tc[j] == i) && (pfc_map & BIT(i))) 219 pfc->pfc_en |= BIT(j); 220 } 221 } 222 223 return 0; 224 } 225 226 static int hclge_ieee_setpfc(struct hnae3_handle *h, struct ieee_pfc *pfc) 227 { 228 struct hclge_vport *vport = hclge_get_vport(h); 229 struct hclge_dev *hdev = vport->back; 230 u8 i, j, pfc_map, *prio_tc; 231 232 if (!(hdev->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) || 233 hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE) 234 return -EINVAL; 235 236 prio_tc = hdev->tm_info.prio_tc; 237 pfc_map = 0; 238 239 for (i = 0; i < hdev->tm_info.num_tc; i++) { 240 for (j = 0; j < HNAE3_MAX_USER_PRIO; j++) { 241 if ((prio_tc[j] == i) && (pfc->pfc_en & BIT(j))) { 242 pfc_map |= BIT(i); 243 break; 244 } 245 } 246 } 247 248 if (pfc_map == hdev->tm_info.hw_pfc_map) 249 return 0; 250 251 hdev->tm_info.hw_pfc_map = pfc_map; 252 253 return hclge_pause_setup_hw(hdev); 254 } 255 256 /* DCBX configuration */ 257 static u8 hclge_getdcbx(struct hnae3_handle *h) 258 { 259 struct hclge_vport *vport = hclge_get_vport(h); 260 struct hclge_dev *hdev = vport->back; 261 262 if (hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE) 263 return 0; 264 265 return hdev->dcbx_cap; 266 } 267 268 static u8 hclge_setdcbx(struct hnae3_handle *h, u8 mode) 269 { 270 struct hclge_vport *vport = hclge_get_vport(h); 271 struct hclge_dev *hdev = vport->back; 272 273 /* No support for LLD_MANAGED modes or CEE */ 274 if ((mode & DCB_CAP_DCBX_LLD_MANAGED) || 275 (mode & DCB_CAP_DCBX_VER_CEE) || 276 !(mode & DCB_CAP_DCBX_HOST)) 277 return 1; 278 279 hdev->dcbx_cap = mode; 280 281 return 0; 282 } 283 284 /* Set up TC for hardware offloaded mqprio in channel mode */ 285 static int hclge_setup_tc(struct hnae3_handle *h, u8 tc, u8 *prio_tc) 286 { 287 struct hclge_vport *vport = hclge_get_vport(h); 288 struct hclge_dev *hdev = vport->back; 289 int ret; 290 291 if (hdev->flag & HCLGE_FLAG_DCB_ENABLE) 292 return -EINVAL; 293 294 if (tc > hdev->tc_max) { 295 dev_err(&hdev->pdev->dev, 296 "setup tc failed, tc(%u) > tc_max(%u)\n", 297 tc, hdev->tc_max); 298 return -EINVAL; 299 } 300 301 hclge_tm_schd_info_update(hdev, tc); 302 303 ret = hclge_tm_prio_tc_info_update(hdev, prio_tc); 304 if (ret) 305 return ret; 306 307 ret = hclge_tm_init_hw(hdev); 308 if (ret) 309 return ret; 310 311 hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE; 312 313 if (tc > 1) 314 hdev->flag |= HCLGE_FLAG_MQPRIO_ENABLE; 315 else 316 hdev->flag &= ~HCLGE_FLAG_MQPRIO_ENABLE; 317 318 return 0; 319 } 320 321 static const struct hnae3_dcb_ops hns3_dcb_ops = { 322 .ieee_getets = hclge_ieee_getets, 323 .ieee_setets = hclge_ieee_setets, 324 .ieee_getpfc = hclge_ieee_getpfc, 325 .ieee_setpfc = hclge_ieee_setpfc, 326 .getdcbx = hclge_getdcbx, 327 .setdcbx = hclge_setdcbx, 328 .map_update = hclge_map_update, 329 .setup_tc = hclge_setup_tc, 330 }; 331 332 void hclge_dcb_ops_set(struct hclge_dev *hdev) 333 { 334 struct hclge_vport *vport = hdev->vport; 335 struct hnae3_knic_private_info *kinfo; 336 337 /* Hdev does not support DCB or vport is 338 * not a pf, then dcb_ops is not set. 339 */ 340 if (!hnae3_dev_dcb_supported(hdev) || 341 vport->vport_id != 0) 342 return; 343 344 kinfo = &vport->nic.kinfo; 345 kinfo->dcb_ops = &hns3_dcb_ops; 346 hdev->dcbx_cap = DCB_CAP_DCBX_VER_IEEE | DCB_CAP_DCBX_HOST; 347 } 348