1 /* QLogic qed NIC Driver 2 * Copyright (c) 2015-2017 QLogic Corporation 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and /or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 #include <linux/types.h> 33 #include "qed.h" 34 #include "qed_dev_api.h" 35 #include "qed_hw.h" 36 #include "qed_l2.h" 37 #include "qed_mcp.h" 38 #include "qed_reg_addr.h" 39 40 /* 16 nano second time quantas to wait before making a Drift adjustment */ 41 #define QED_DRIFT_CNTR_TIME_QUANTA_SHIFT 0 42 /* Nano seconds to add/subtract when making a Drift adjustment */ 43 #define QED_DRIFT_CNTR_ADJUSTMENT_SHIFT 28 44 /* Add/subtract the Adjustment_Value when making a Drift adjustment */ 45 #define QED_DRIFT_CNTR_DIRECTION_SHIFT 31 46 #define QED_TIMESTAMP_MASK BIT(16) 47 48 static enum qed_resc_lock qed_ptcdev_to_resc(struct qed_hwfn *p_hwfn) 49 { 50 switch (MFW_PORT(p_hwfn)) { 51 case 0: 52 return QED_RESC_LOCK_PTP_PORT0; 53 case 1: 54 return QED_RESC_LOCK_PTP_PORT1; 55 case 2: 56 return QED_RESC_LOCK_PTP_PORT2; 57 case 3: 58 return QED_RESC_LOCK_PTP_PORT3; 59 default: 60 return QED_RESC_LOCK_RESC_INVALID; 61 } 62 } 63 64 static int qed_ptp_res_lock(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 65 { 66 struct qed_resc_lock_params params; 67 enum qed_resc_lock resource; 68 int rc; 69 70 resource = qed_ptcdev_to_resc(p_hwfn); 71 if (resource == QED_RESC_LOCK_RESC_INVALID) 72 return -EINVAL; 73 74 qed_mcp_resc_lock_default_init(¶ms, NULL, resource, true); 75 76 rc = qed_mcp_resc_lock(p_hwfn, p_ptt, ¶ms); 77 if (rc && rc != -EINVAL) { 78 return rc; 79 } else if (rc == -EINVAL) { 80 /* MFW doesn't support resource locking, first PF on the port 81 * has lock ownership. 82 */ 83 if (p_hwfn->abs_pf_id < p_hwfn->cdev->num_ports_in_engine) 84 return 0; 85 86 DP_INFO(p_hwfn, "PF doesn't have lock ownership\n"); 87 return -EBUSY; 88 } else if (!rc && !params.b_granted) { 89 DP_INFO(p_hwfn, "Failed to acquire ptp resource lock\n"); 90 return -EBUSY; 91 } 92 93 return rc; 94 } 95 96 static int qed_ptp_res_unlock(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 97 { 98 struct qed_resc_unlock_params params; 99 enum qed_resc_lock resource; 100 int rc; 101 102 resource = qed_ptcdev_to_resc(p_hwfn); 103 if (resource == QED_RESC_LOCK_RESC_INVALID) 104 return -EINVAL; 105 106 qed_mcp_resc_lock_default_init(NULL, ¶ms, resource, true); 107 108 rc = qed_mcp_resc_unlock(p_hwfn, p_ptt, ¶ms); 109 if (rc == -EINVAL) { 110 /* MFW doesn't support locking, first PF has lock ownership */ 111 if (p_hwfn->abs_pf_id < p_hwfn->cdev->num_ports_in_engine) { 112 rc = 0; 113 } else { 114 DP_INFO(p_hwfn, "PF doesn't have lock ownership\n"); 115 return -EINVAL; 116 } 117 } else if (rc) { 118 DP_INFO(p_hwfn, "Failed to release the ptp resource lock\n"); 119 } 120 121 return rc; 122 } 123 124 /* Read Rx timestamp */ 125 static int qed_ptp_hw_read_rx_ts(struct qed_dev *cdev, u64 *timestamp) 126 { 127 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); 128 struct qed_ptt *p_ptt = p_hwfn->p_ptp_ptt; 129 u32 val; 130 131 *timestamp = 0; 132 val = qed_rd(p_hwfn, p_ptt, NIG_REG_LLH_PTP_HOST_BUF_SEQID); 133 if (!(val & QED_TIMESTAMP_MASK)) { 134 DP_INFO(p_hwfn, "Invalid Rx timestamp, buf_seqid = %d\n", val); 135 return -EINVAL; 136 } 137 138 val = qed_rd(p_hwfn, p_ptt, NIG_REG_LLH_PTP_HOST_BUF_TS_LSB); 139 *timestamp = qed_rd(p_hwfn, p_ptt, NIG_REG_LLH_PTP_HOST_BUF_TS_MSB); 140 *timestamp <<= 32; 141 *timestamp |= val; 142 143 /* Reset timestamp register to allow new timestamp */ 144 qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_HOST_BUF_SEQID, 145 QED_TIMESTAMP_MASK); 146 147 return 0; 148 } 149 150 /* Read Tx timestamp */ 151 static int qed_ptp_hw_read_tx_ts(struct qed_dev *cdev, u64 *timestamp) 152 { 153 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); 154 struct qed_ptt *p_ptt = p_hwfn->p_ptp_ptt; 155 u32 val; 156 157 *timestamp = 0; 158 val = qed_rd(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_BUF_SEQID); 159 if (!(val & QED_TIMESTAMP_MASK)) { 160 DP_VERBOSE(p_hwfn, QED_MSG_DEBUG, 161 "Invalid Tx timestamp, buf_seqid = %08x\n", val); 162 return -EINVAL; 163 } 164 165 val = qed_rd(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_BUF_TS_LSB); 166 *timestamp = qed_rd(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_BUF_TS_MSB); 167 *timestamp <<= 32; 168 *timestamp |= val; 169 170 /* Reset timestamp register to allow new timestamp */ 171 qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_BUF_SEQID, QED_TIMESTAMP_MASK); 172 173 return 0; 174 } 175 176 /* Read Phy Hardware Clock */ 177 static int qed_ptp_hw_read_cc(struct qed_dev *cdev, u64 *phc_cycles) 178 { 179 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); 180 struct qed_ptt *p_ptt = p_hwfn->p_ptp_ptt; 181 u32 temp = 0; 182 183 temp = qed_rd(p_hwfn, p_ptt, NIG_REG_TSGEN_SYNC_TIME_LSB); 184 *phc_cycles = qed_rd(p_hwfn, p_ptt, NIG_REG_TSGEN_SYNC_TIME_MSB); 185 *phc_cycles <<= 32; 186 *phc_cycles |= temp; 187 188 return 0; 189 } 190 191 /* Filter PTP protocol packets that need to be timestamped */ 192 static int qed_ptp_hw_cfg_filters(struct qed_dev *cdev, 193 enum qed_ptp_filter_type rx_type, 194 enum qed_ptp_hwtstamp_tx_type tx_type) 195 { 196 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); 197 struct qed_ptt *p_ptt = p_hwfn->p_ptp_ptt; 198 u32 rule_mask, enable_cfg = 0x0; 199 200 switch (rx_type) { 201 case QED_PTP_FILTER_NONE: 202 enable_cfg = 0x0; 203 rule_mask = 0x3FFF; 204 break; 205 case QED_PTP_FILTER_ALL: 206 enable_cfg = 0x7; 207 rule_mask = 0x3CAA; 208 break; 209 case QED_PTP_FILTER_V1_L4_EVENT: 210 enable_cfg = 0x3; 211 rule_mask = 0x3FFA; 212 break; 213 case QED_PTP_FILTER_V1_L4_GEN: 214 enable_cfg = 0x3; 215 rule_mask = 0x3FFE; 216 break; 217 case QED_PTP_FILTER_V2_L4_EVENT: 218 enable_cfg = 0x5; 219 rule_mask = 0x3FAA; 220 break; 221 case QED_PTP_FILTER_V2_L4_GEN: 222 enable_cfg = 0x5; 223 rule_mask = 0x3FEE; 224 break; 225 case QED_PTP_FILTER_V2_L2_EVENT: 226 enable_cfg = 0x5; 227 rule_mask = 0x3CFF; 228 break; 229 case QED_PTP_FILTER_V2_L2_GEN: 230 enable_cfg = 0x5; 231 rule_mask = 0x3EFF; 232 break; 233 case QED_PTP_FILTER_V2_EVENT: 234 enable_cfg = 0x5; 235 rule_mask = 0x3CAA; 236 break; 237 case QED_PTP_FILTER_V2_GEN: 238 enable_cfg = 0x5; 239 rule_mask = 0x3EEE; 240 break; 241 default: 242 DP_INFO(p_hwfn, "Invalid PTP filter type %d\n", rx_type); 243 return -EINVAL; 244 } 245 246 qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_PARAM_MASK, 0); 247 qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_RULE_MASK, rule_mask); 248 qed_wr(p_hwfn, p_ptt, NIG_REG_RX_PTP_EN, enable_cfg); 249 250 if (tx_type == QED_PTP_HWTSTAMP_TX_OFF) { 251 qed_wr(p_hwfn, p_ptt, NIG_REG_TX_PTP_EN, 0x0); 252 qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_PARAM_MASK, 0x7FF); 253 qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_RULE_MASK, 0x3FFF); 254 } else { 255 qed_wr(p_hwfn, p_ptt, NIG_REG_TX_PTP_EN, enable_cfg); 256 qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_PARAM_MASK, 0); 257 qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_RULE_MASK, rule_mask); 258 } 259 260 /* Reset possibly old timestamps */ 261 qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_HOST_BUF_SEQID, 262 QED_TIMESTAMP_MASK); 263 264 return 0; 265 } 266 267 /* Adjust the HW clock by a rate given in parts-per-billion (ppb) units. 268 * FW/HW accepts the adjustment value in terms of 3 parameters: 269 * Drift period - adjustment happens once in certain number of nano seconds. 270 * Drift value - time is adjusted by a certain value, for example by 5 ns. 271 * Drift direction - add or subtract the adjustment value. 272 * The routine translates ppb into the adjustment triplet in an optimal manner. 273 */ 274 static int qed_ptp_hw_adjfreq(struct qed_dev *cdev, s32 ppb) 275 { 276 s64 best_val = 0, val, best_period = 0, period, approx_dev, dif, dif2; 277 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); 278 struct qed_ptt *p_ptt = p_hwfn->p_ptp_ptt; 279 u32 drift_ctr_cfg = 0, drift_state; 280 int drift_dir = 1; 281 282 if (ppb < 0) { 283 ppb = -ppb; 284 drift_dir = 0; 285 } 286 287 if (ppb > 1) { 288 s64 best_dif = ppb, best_approx_dev = 1; 289 290 /* Adjustment value is up to +/-7ns, find an optimal value in 291 * this range. 292 */ 293 for (val = 7; val > 0; val--) { 294 period = div_s64(val * 1000000000, ppb); 295 period -= 8; 296 period >>= 4; 297 if (period < 1) 298 period = 1; 299 if (period > 0xFFFFFFE) 300 period = 0xFFFFFFE; 301 302 /* Check both rounding ends for approximate error */ 303 approx_dev = period * 16 + 8; 304 dif = ppb * approx_dev - val * 1000000000; 305 dif2 = dif + 16 * ppb; 306 307 if (dif < 0) 308 dif = -dif; 309 if (dif2 < 0) 310 dif2 = -dif2; 311 312 /* Determine which end gives better approximation */ 313 if (dif * (approx_dev + 16) > dif2 * approx_dev) { 314 period++; 315 approx_dev += 16; 316 dif = dif2; 317 } 318 319 /* Track best approximation found so far */ 320 if (best_dif * approx_dev > dif * best_approx_dev) { 321 best_dif = dif; 322 best_val = val; 323 best_period = period; 324 best_approx_dev = approx_dev; 325 } 326 } 327 } else if (ppb == 1) { 328 /* This is a special case as its the only value which wouldn't 329 * fit in a s64 variable. In order to prevent castings simple 330 * handle it seperately. 331 */ 332 best_val = 4; 333 best_period = 0xee6b27f; 334 } else { 335 best_val = 0; 336 best_period = 0xFFFFFFF; 337 } 338 339 drift_ctr_cfg = (best_period << QED_DRIFT_CNTR_TIME_QUANTA_SHIFT) | 340 (((int)best_val) << QED_DRIFT_CNTR_ADJUSTMENT_SHIFT) | 341 (((int)drift_dir) << QED_DRIFT_CNTR_DIRECTION_SHIFT); 342 343 qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_RST_DRIFT_CNTR, 0x1); 344 345 drift_state = qed_rd(p_hwfn, p_ptt, NIG_REG_TSGEN_RST_DRIFT_CNTR); 346 if (drift_state & 1) { 347 qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_DRIFT_CNTR_CONF, 348 drift_ctr_cfg); 349 } else { 350 DP_INFO(p_hwfn, "Drift counter is not reset\n"); 351 return -EINVAL; 352 } 353 354 qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_RST_DRIFT_CNTR, 0x0); 355 356 return 0; 357 } 358 359 static int qed_ptp_hw_enable(struct qed_dev *cdev) 360 { 361 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); 362 struct qed_ptt *p_ptt; 363 int rc; 364 365 p_ptt = qed_ptt_acquire(p_hwfn); 366 if (!p_ptt) { 367 DP_NOTICE(p_hwfn, "Failed to acquire PTT for PTP\n"); 368 return -EBUSY; 369 } 370 371 p_hwfn->p_ptp_ptt = p_ptt; 372 373 rc = qed_ptp_res_lock(p_hwfn, p_ptt); 374 if (rc) { 375 DP_INFO(p_hwfn, 376 "Couldn't acquire the resource lock, skip ptp enable for this PF\n"); 377 qed_ptt_release(p_hwfn, p_ptt); 378 p_hwfn->p_ptp_ptt = NULL; 379 return rc; 380 } 381 382 /* Reset PTP event detection rules - will be configured in the IOCTL */ 383 qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_PARAM_MASK, 0x7FF); 384 qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_RULE_MASK, 0x3FFF); 385 qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_PARAM_MASK, 0x7FF); 386 qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_RULE_MASK, 0x3FFF); 387 388 qed_wr(p_hwfn, p_ptt, NIG_REG_TX_PTP_EN, 7); 389 qed_wr(p_hwfn, p_ptt, NIG_REG_RX_PTP_EN, 7); 390 391 qed_wr(p_hwfn, p_ptt, NIG_REG_TS_OUTPUT_ENABLE_PDA, 0x1); 392 393 /* Pause free running counter */ 394 if (QED_IS_BB_B0(p_hwfn->cdev)) 395 qed_wr(p_hwfn, p_ptt, NIG_REG_TIMESYNC_GEN_REG_BB, 2); 396 if (QED_IS_AH(p_hwfn->cdev)) 397 qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_FREECNT_UPDATE_K2, 2); 398 399 qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_FREE_CNT_VALUE_LSB, 0); 400 qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_FREE_CNT_VALUE_MSB, 0); 401 /* Resume free running counter */ 402 if (QED_IS_BB_B0(p_hwfn->cdev)) 403 qed_wr(p_hwfn, p_ptt, NIG_REG_TIMESYNC_GEN_REG_BB, 4); 404 if (QED_IS_AH(p_hwfn->cdev)) { 405 qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_FREECNT_UPDATE_K2, 4); 406 qed_wr(p_hwfn, p_ptt, NIG_REG_PTP_LATCH_OSTS_PKT_TIME, 1); 407 } 408 409 /* Disable drift register */ 410 qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_DRIFT_CNTR_CONF, 0x0); 411 qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_RST_DRIFT_CNTR, 0x0); 412 413 /* Reset possibly old timestamps */ 414 qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_HOST_BUF_SEQID, 415 QED_TIMESTAMP_MASK); 416 qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_BUF_SEQID, QED_TIMESTAMP_MASK); 417 418 return 0; 419 } 420 421 static int qed_ptp_hw_disable(struct qed_dev *cdev) 422 { 423 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); 424 struct qed_ptt *p_ptt = p_hwfn->p_ptp_ptt; 425 426 qed_ptp_res_unlock(p_hwfn, p_ptt); 427 428 /* Reset PTP event detection rules */ 429 qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_PARAM_MASK, 0x7FF); 430 qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_RULE_MASK, 0x3FFF); 431 432 qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_PARAM_MASK, 0x7FF); 433 qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_RULE_MASK, 0x3FFF); 434 435 /* Disable the PTP feature */ 436 qed_wr(p_hwfn, p_ptt, NIG_REG_RX_PTP_EN, 0x0); 437 qed_wr(p_hwfn, p_ptt, NIG_REG_TX_PTP_EN, 0x0); 438 439 qed_ptt_release(p_hwfn, p_ptt); 440 p_hwfn->p_ptp_ptt = NULL; 441 442 return 0; 443 } 444 445 const struct qed_eth_ptp_ops qed_ptp_ops_pass = { 446 .cfg_filters = qed_ptp_hw_cfg_filters, 447 .read_rx_ts = qed_ptp_hw_read_rx_ts, 448 .read_tx_ts = qed_ptp_hw_read_tx_ts, 449 .read_cc = qed_ptp_hw_read_cc, 450 .adjfreq = qed_ptp_hw_adjfreq, 451 .disable = qed_ptp_hw_disable, 452 .enable = qed_ptp_hw_enable, 453 }; 454