1 /* 2 * Copyright (c) 2015, Mellanox Technologies. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #include <linux/clocksource.h> 34 #include <linux/highmem.h> 35 #include <linux/ptp_clock_kernel.h> 36 #include <rdma/mlx5-abi.h> 37 #include "lib/eq.h" 38 #include "en.h" 39 #include "clock.h" 40 41 enum { 42 MLX5_CYCLES_SHIFT = 23 43 }; 44 45 enum { 46 MLX5_PIN_MODE_IN = 0x0, 47 MLX5_PIN_MODE_OUT = 0x1, 48 }; 49 50 enum { 51 MLX5_OUT_PATTERN_PULSE = 0x0, 52 MLX5_OUT_PATTERN_PERIODIC = 0x1, 53 }; 54 55 enum { 56 MLX5_EVENT_MODE_DISABLE = 0x0, 57 MLX5_EVENT_MODE_REPETETIVE = 0x1, 58 MLX5_EVENT_MODE_ONCE_TILL_ARM = 0x2, 59 }; 60 61 enum { 62 MLX5_MTPPS_FS_ENABLE = BIT(0x0), 63 MLX5_MTPPS_FS_PATTERN = BIT(0x2), 64 MLX5_MTPPS_FS_PIN_MODE = BIT(0x3), 65 MLX5_MTPPS_FS_TIME_STAMP = BIT(0x4), 66 MLX5_MTPPS_FS_OUT_PULSE_DURATION = BIT(0x5), 67 MLX5_MTPPS_FS_ENH_OUT_PER_ADJ = BIT(0x7), 68 MLX5_MTPPS_FS_NPPS_PERIOD = BIT(0x9), 69 MLX5_MTPPS_FS_OUT_PULSE_DURATION_NS = BIT(0xa), 70 }; 71 72 static bool mlx5_real_time_mode(struct mlx5_core_dev *mdev) 73 { 74 return (mlx5_is_real_time_rq(mdev) || mlx5_is_real_time_sq(mdev)); 75 } 76 77 static bool mlx5_npps_real_time_supported(struct mlx5_core_dev *mdev) 78 { 79 return (mlx5_real_time_mode(mdev) && 80 MLX5_CAP_MCAM_FEATURE(mdev, npps_period) && 81 MLX5_CAP_MCAM_FEATURE(mdev, out_pulse_duration_ns)); 82 } 83 84 static bool mlx5_modify_mtutc_allowed(struct mlx5_core_dev *mdev) 85 { 86 return MLX5_CAP_MCAM_FEATURE(mdev, ptpcyc2realtime_modify); 87 } 88 89 static int mlx5_set_mtutc(struct mlx5_core_dev *dev, u32 *mtutc, u32 size) 90 { 91 u32 out[MLX5_ST_SZ_DW(mtutc_reg)] = {}; 92 93 if (!MLX5_CAP_MCAM_REG(dev, mtutc)) 94 return -EOPNOTSUPP; 95 96 return mlx5_core_access_reg(dev, mtutc, size, out, sizeof(out), 97 MLX5_REG_MTUTC, 0, 1); 98 } 99 100 static u64 mlx5_read_time(struct mlx5_core_dev *dev, 101 struct ptp_system_timestamp *sts, 102 bool real_time) 103 { 104 u32 timer_h, timer_h1, timer_l; 105 106 timer_h = ioread32be(real_time ? &dev->iseg->real_time_h : 107 &dev->iseg->internal_timer_h); 108 ptp_read_system_prets(sts); 109 timer_l = ioread32be(real_time ? &dev->iseg->real_time_l : 110 &dev->iseg->internal_timer_l); 111 ptp_read_system_postts(sts); 112 timer_h1 = ioread32be(real_time ? &dev->iseg->real_time_h : 113 &dev->iseg->internal_timer_h); 114 if (timer_h != timer_h1) { 115 /* wrap around */ 116 ptp_read_system_prets(sts); 117 timer_l = ioread32be(real_time ? &dev->iseg->real_time_l : 118 &dev->iseg->internal_timer_l); 119 ptp_read_system_postts(sts); 120 } 121 122 return real_time ? REAL_TIME_TO_NS(timer_h1, timer_l) : 123 (u64)timer_l | (u64)timer_h1 << 32; 124 } 125 126 static u64 read_internal_timer(const struct cyclecounter *cc) 127 { 128 struct mlx5_timer *timer = container_of(cc, struct mlx5_timer, cycles); 129 struct mlx5_clock *clock = container_of(timer, struct mlx5_clock, timer); 130 struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev, 131 clock); 132 133 return mlx5_read_time(mdev, NULL, false) & cc->mask; 134 } 135 136 static void mlx5_update_clock_info_page(struct mlx5_core_dev *mdev) 137 { 138 struct mlx5_ib_clock_info *clock_info = mdev->clock_info; 139 struct mlx5_clock *clock = &mdev->clock; 140 struct mlx5_timer *timer; 141 u32 sign; 142 143 if (!clock_info) 144 return; 145 146 sign = smp_load_acquire(&clock_info->sign); 147 smp_store_mb(clock_info->sign, 148 sign | MLX5_IB_CLOCK_INFO_KERNEL_UPDATING); 149 150 timer = &clock->timer; 151 clock_info->cycles = timer->tc.cycle_last; 152 clock_info->mult = timer->cycles.mult; 153 clock_info->nsec = timer->tc.nsec; 154 clock_info->frac = timer->tc.frac; 155 156 smp_store_release(&clock_info->sign, 157 sign + MLX5_IB_CLOCK_INFO_KERNEL_UPDATING * 2); 158 } 159 160 static void mlx5_pps_out(struct work_struct *work) 161 { 162 struct mlx5_pps *pps_info = container_of(work, struct mlx5_pps, 163 out_work); 164 struct mlx5_clock *clock = container_of(pps_info, struct mlx5_clock, 165 pps_info); 166 struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev, 167 clock); 168 u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0}; 169 unsigned long flags; 170 int i; 171 172 for (i = 0; i < clock->ptp_info.n_pins; i++) { 173 u64 tstart; 174 175 write_seqlock_irqsave(&clock->lock, flags); 176 tstart = clock->pps_info.start[i]; 177 clock->pps_info.start[i] = 0; 178 write_sequnlock_irqrestore(&clock->lock, flags); 179 if (!tstart) 180 continue; 181 182 MLX5_SET(mtpps_reg, in, pin, i); 183 MLX5_SET64(mtpps_reg, in, time_stamp, tstart); 184 MLX5_SET(mtpps_reg, in, field_select, MLX5_MTPPS_FS_TIME_STAMP); 185 mlx5_set_mtpps(mdev, in, sizeof(in)); 186 } 187 } 188 189 static void mlx5_timestamp_overflow(struct work_struct *work) 190 { 191 struct delayed_work *dwork = to_delayed_work(work); 192 struct mlx5_core_dev *mdev; 193 struct mlx5_timer *timer; 194 struct mlx5_clock *clock; 195 unsigned long flags; 196 197 timer = container_of(dwork, struct mlx5_timer, overflow_work); 198 clock = container_of(timer, struct mlx5_clock, timer); 199 mdev = container_of(clock, struct mlx5_core_dev, clock); 200 201 write_seqlock_irqsave(&clock->lock, flags); 202 timecounter_read(&timer->tc); 203 mlx5_update_clock_info_page(mdev); 204 write_sequnlock_irqrestore(&clock->lock, flags); 205 schedule_delayed_work(&timer->overflow_work, timer->overflow_period); 206 } 207 208 static int mlx5_ptp_settime_real_time(struct mlx5_core_dev *mdev, 209 const struct timespec64 *ts) 210 { 211 u32 in[MLX5_ST_SZ_DW(mtutc_reg)] = {}; 212 213 if (!mlx5_modify_mtutc_allowed(mdev)) 214 return 0; 215 216 if (ts->tv_sec < 0 || ts->tv_sec > U32_MAX || 217 ts->tv_nsec < 0 || ts->tv_nsec > NSEC_PER_SEC) 218 return -EINVAL; 219 220 MLX5_SET(mtutc_reg, in, operation, MLX5_MTUTC_OPERATION_SET_TIME_IMMEDIATE); 221 MLX5_SET(mtutc_reg, in, utc_sec, ts->tv_sec); 222 MLX5_SET(mtutc_reg, in, utc_nsec, ts->tv_nsec); 223 224 return mlx5_set_mtutc(mdev, in, sizeof(in)); 225 } 226 227 static int mlx5_ptp_settime(struct ptp_clock_info *ptp, const struct timespec64 *ts) 228 { 229 struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info); 230 struct mlx5_timer *timer = &clock->timer; 231 struct mlx5_core_dev *mdev; 232 unsigned long flags; 233 int err; 234 235 mdev = container_of(clock, struct mlx5_core_dev, clock); 236 err = mlx5_ptp_settime_real_time(mdev, ts); 237 if (err) 238 return err; 239 240 write_seqlock_irqsave(&clock->lock, flags); 241 timecounter_init(&timer->tc, &timer->cycles, timespec64_to_ns(ts)); 242 mlx5_update_clock_info_page(mdev); 243 write_sequnlock_irqrestore(&clock->lock, flags); 244 245 return 0; 246 } 247 248 static 249 struct timespec64 mlx5_ptp_gettimex_real_time(struct mlx5_core_dev *mdev, 250 struct ptp_system_timestamp *sts) 251 { 252 struct timespec64 ts; 253 u64 time; 254 255 time = mlx5_read_time(mdev, sts, true); 256 ts = ns_to_timespec64(time); 257 return ts; 258 } 259 260 static int mlx5_ptp_gettimex(struct ptp_clock_info *ptp, struct timespec64 *ts, 261 struct ptp_system_timestamp *sts) 262 { 263 struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info); 264 struct mlx5_timer *timer = &clock->timer; 265 struct mlx5_core_dev *mdev; 266 unsigned long flags; 267 u64 cycles, ns; 268 269 mdev = container_of(clock, struct mlx5_core_dev, clock); 270 if (mlx5_real_time_mode(mdev)) { 271 *ts = mlx5_ptp_gettimex_real_time(mdev, sts); 272 goto out; 273 } 274 275 write_seqlock_irqsave(&clock->lock, flags); 276 cycles = mlx5_read_time(mdev, sts, false); 277 ns = timecounter_cyc2time(&timer->tc, cycles); 278 write_sequnlock_irqrestore(&clock->lock, flags); 279 *ts = ns_to_timespec64(ns); 280 out: 281 return 0; 282 } 283 284 static int mlx5_ptp_adjtime_real_time(struct mlx5_core_dev *mdev, s64 delta) 285 { 286 u32 in[MLX5_ST_SZ_DW(mtutc_reg)] = {}; 287 288 if (!mlx5_modify_mtutc_allowed(mdev)) 289 return 0; 290 291 /* HW time adjustment range is s16. If out of range, settime instead */ 292 if (delta < S16_MIN || delta > S16_MAX) { 293 struct timespec64 ts; 294 s64 ns; 295 296 ts = mlx5_ptp_gettimex_real_time(mdev, NULL); 297 ns = timespec64_to_ns(&ts) + delta; 298 ts = ns_to_timespec64(ns); 299 return mlx5_ptp_settime_real_time(mdev, &ts); 300 } 301 302 MLX5_SET(mtutc_reg, in, operation, MLX5_MTUTC_OPERATION_ADJUST_TIME); 303 MLX5_SET(mtutc_reg, in, time_adjustment, delta); 304 305 return mlx5_set_mtutc(mdev, in, sizeof(in)); 306 } 307 308 static int mlx5_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) 309 { 310 struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info); 311 struct mlx5_timer *timer = &clock->timer; 312 struct mlx5_core_dev *mdev; 313 unsigned long flags; 314 int err; 315 316 mdev = container_of(clock, struct mlx5_core_dev, clock); 317 318 err = mlx5_ptp_adjtime_real_time(mdev, delta); 319 if (err) 320 return err; 321 write_seqlock_irqsave(&clock->lock, flags); 322 timecounter_adjtime(&timer->tc, delta); 323 mlx5_update_clock_info_page(mdev); 324 write_sequnlock_irqrestore(&clock->lock, flags); 325 326 return 0; 327 } 328 329 static int mlx5_ptp_adjfreq_real_time(struct mlx5_core_dev *mdev, s32 freq) 330 { 331 u32 in[MLX5_ST_SZ_DW(mtutc_reg)] = {}; 332 333 if (!mlx5_modify_mtutc_allowed(mdev)) 334 return 0; 335 336 MLX5_SET(mtutc_reg, in, operation, MLX5_MTUTC_OPERATION_ADJUST_FREQ_UTC); 337 MLX5_SET(mtutc_reg, in, freq_adjustment, freq); 338 339 return mlx5_set_mtutc(mdev, in, sizeof(in)); 340 } 341 342 static int mlx5_ptp_adjfreq(struct ptp_clock_info *ptp, s32 delta) 343 { 344 struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info); 345 struct mlx5_timer *timer = &clock->timer; 346 struct mlx5_core_dev *mdev; 347 unsigned long flags; 348 int neg_adj = 0; 349 u32 diff; 350 u64 adj; 351 int err; 352 353 mdev = container_of(clock, struct mlx5_core_dev, clock); 354 err = mlx5_ptp_adjfreq_real_time(mdev, delta); 355 if (err) 356 return err; 357 358 if (delta < 0) { 359 neg_adj = 1; 360 delta = -delta; 361 } 362 363 adj = timer->nominal_c_mult; 364 adj *= delta; 365 diff = div_u64(adj, 1000000000ULL); 366 367 write_seqlock_irqsave(&clock->lock, flags); 368 timecounter_read(&timer->tc); 369 timer->cycles.mult = neg_adj ? timer->nominal_c_mult - diff : 370 timer->nominal_c_mult + diff; 371 mlx5_update_clock_info_page(mdev); 372 write_sequnlock_irqrestore(&clock->lock, flags); 373 374 return 0; 375 } 376 377 static int mlx5_extts_configure(struct ptp_clock_info *ptp, 378 struct ptp_clock_request *rq, 379 int on) 380 { 381 struct mlx5_clock *clock = 382 container_of(ptp, struct mlx5_clock, ptp_info); 383 struct mlx5_core_dev *mdev = 384 container_of(clock, struct mlx5_core_dev, clock); 385 u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0}; 386 u32 field_select = 0; 387 u8 pin_mode = 0; 388 u8 pattern = 0; 389 int pin = -1; 390 int err = 0; 391 392 if (!MLX5_PPS_CAP(mdev)) 393 return -EOPNOTSUPP; 394 395 /* Reject requests with unsupported flags */ 396 if (rq->extts.flags & ~(PTP_ENABLE_FEATURE | 397 PTP_RISING_EDGE | 398 PTP_FALLING_EDGE | 399 PTP_STRICT_FLAGS)) 400 return -EOPNOTSUPP; 401 402 /* Reject requests to enable time stamping on both edges. */ 403 if ((rq->extts.flags & PTP_STRICT_FLAGS) && 404 (rq->extts.flags & PTP_ENABLE_FEATURE) && 405 (rq->extts.flags & PTP_EXTTS_EDGES) == PTP_EXTTS_EDGES) 406 return -EOPNOTSUPP; 407 408 if (rq->extts.index >= clock->ptp_info.n_pins) 409 return -EINVAL; 410 411 pin = ptp_find_pin(clock->ptp, PTP_PF_EXTTS, rq->extts.index); 412 if (pin < 0) 413 return -EBUSY; 414 415 if (on) { 416 pin_mode = MLX5_PIN_MODE_IN; 417 pattern = !!(rq->extts.flags & PTP_FALLING_EDGE); 418 field_select = MLX5_MTPPS_FS_PIN_MODE | 419 MLX5_MTPPS_FS_PATTERN | 420 MLX5_MTPPS_FS_ENABLE; 421 } else { 422 field_select = MLX5_MTPPS_FS_ENABLE; 423 } 424 425 MLX5_SET(mtpps_reg, in, pin, pin); 426 MLX5_SET(mtpps_reg, in, pin_mode, pin_mode); 427 MLX5_SET(mtpps_reg, in, pattern, pattern); 428 MLX5_SET(mtpps_reg, in, enable, on); 429 MLX5_SET(mtpps_reg, in, field_select, field_select); 430 431 err = mlx5_set_mtpps(mdev, in, sizeof(in)); 432 if (err) 433 return err; 434 435 return mlx5_set_mtppse(mdev, pin, 0, 436 MLX5_EVENT_MODE_REPETETIVE & on); 437 } 438 439 static u64 find_target_cycles(struct mlx5_core_dev *mdev, s64 target_ns) 440 { 441 struct mlx5_clock *clock = &mdev->clock; 442 u64 cycles_now, cycles_delta; 443 u64 nsec_now, nsec_delta; 444 struct mlx5_timer *timer; 445 unsigned long flags; 446 447 timer = &clock->timer; 448 449 cycles_now = mlx5_read_time(mdev, NULL, false); 450 write_seqlock_irqsave(&clock->lock, flags); 451 nsec_now = timecounter_cyc2time(&timer->tc, cycles_now); 452 nsec_delta = target_ns - nsec_now; 453 cycles_delta = div64_u64(nsec_delta << timer->cycles.shift, 454 timer->cycles.mult); 455 write_sequnlock_irqrestore(&clock->lock, flags); 456 457 return cycles_now + cycles_delta; 458 } 459 460 static u64 perout_conf_internal_timer(struct mlx5_core_dev *mdev, s64 sec) 461 { 462 struct timespec64 ts = {}; 463 s64 target_ns; 464 465 ts.tv_sec = sec; 466 target_ns = timespec64_to_ns(&ts); 467 468 return find_target_cycles(mdev, target_ns); 469 } 470 471 static u64 perout_conf_real_time(s64 sec, u32 nsec) 472 { 473 return (u64)nsec | (u64)sec << 32; 474 } 475 476 static int perout_conf_1pps(struct mlx5_core_dev *mdev, struct ptp_clock_request *rq, 477 u64 *time_stamp, bool real_time) 478 { 479 struct timespec64 ts; 480 s64 ns; 481 482 ts.tv_nsec = rq->perout.period.nsec; 483 ts.tv_sec = rq->perout.period.sec; 484 ns = timespec64_to_ns(&ts); 485 486 if ((ns >> 1) != 500000000LL) 487 return -EINVAL; 488 489 *time_stamp = real_time ? perout_conf_real_time(rq->perout.start.sec, 0) : 490 perout_conf_internal_timer(mdev, rq->perout.start.sec); 491 492 return 0; 493 } 494 495 #define MLX5_MAX_PULSE_DURATION (BIT(__mlx5_bit_sz(mtpps_reg, out_pulse_duration_ns)) - 1) 496 static int mlx5_perout_conf_out_pulse_duration(struct mlx5_core_dev *mdev, 497 struct ptp_clock_request *rq, 498 u32 *out_pulse_duration_ns) 499 { 500 struct mlx5_pps *pps_info = &mdev->clock.pps_info; 501 u32 out_pulse_duration; 502 struct timespec64 ts; 503 504 if (rq->perout.flags & PTP_PEROUT_DUTY_CYCLE) { 505 ts.tv_sec = rq->perout.on.sec; 506 ts.tv_nsec = rq->perout.on.nsec; 507 out_pulse_duration = (u32)timespec64_to_ns(&ts); 508 } else { 509 /* out_pulse_duration_ns should be up to 50% of the 510 * pulse period as default 511 */ 512 ts.tv_sec = rq->perout.period.sec; 513 ts.tv_nsec = rq->perout.period.nsec; 514 out_pulse_duration = (u32)timespec64_to_ns(&ts) >> 1; 515 } 516 517 if (out_pulse_duration < pps_info->min_out_pulse_duration_ns || 518 out_pulse_duration > MLX5_MAX_PULSE_DURATION) { 519 mlx5_core_err(mdev, "NPPS pulse duration %u is not in [%llu, %lu]\n", 520 out_pulse_duration, pps_info->min_out_pulse_duration_ns, 521 MLX5_MAX_PULSE_DURATION); 522 return -EINVAL; 523 } 524 *out_pulse_duration_ns = out_pulse_duration; 525 526 return 0; 527 } 528 529 static int perout_conf_npps_real_time(struct mlx5_core_dev *mdev, struct ptp_clock_request *rq, 530 u32 *field_select, u32 *out_pulse_duration_ns, 531 u64 *period, u64 *time_stamp) 532 { 533 struct mlx5_pps *pps_info = &mdev->clock.pps_info; 534 struct ptp_clock_time *time = &rq->perout.start; 535 struct timespec64 ts; 536 537 ts.tv_sec = rq->perout.period.sec; 538 ts.tv_nsec = rq->perout.period.nsec; 539 if (timespec64_to_ns(&ts) < pps_info->min_npps_period) { 540 mlx5_core_err(mdev, "NPPS period is lower than minimal npps period %llu\n", 541 pps_info->min_npps_period); 542 return -EINVAL; 543 } 544 *period = perout_conf_real_time(rq->perout.period.sec, rq->perout.period.nsec); 545 546 if (mlx5_perout_conf_out_pulse_duration(mdev, rq, out_pulse_duration_ns)) 547 return -EINVAL; 548 549 *time_stamp = perout_conf_real_time(time->sec, time->nsec); 550 *field_select |= MLX5_MTPPS_FS_NPPS_PERIOD | 551 MLX5_MTPPS_FS_OUT_PULSE_DURATION_NS; 552 553 return 0; 554 } 555 556 static bool mlx5_perout_verify_flags(struct mlx5_core_dev *mdev, unsigned int flags) 557 { 558 return ((!mlx5_npps_real_time_supported(mdev) && flags) || 559 (mlx5_npps_real_time_supported(mdev) && flags & ~PTP_PEROUT_DUTY_CYCLE)); 560 } 561 562 static int mlx5_perout_configure(struct ptp_clock_info *ptp, 563 struct ptp_clock_request *rq, 564 int on) 565 { 566 struct mlx5_clock *clock = 567 container_of(ptp, struct mlx5_clock, ptp_info); 568 struct mlx5_core_dev *mdev = 569 container_of(clock, struct mlx5_core_dev, clock); 570 bool rt_mode = mlx5_real_time_mode(mdev); 571 u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0}; 572 u32 out_pulse_duration_ns = 0; 573 u32 field_select = 0; 574 u64 npps_period = 0; 575 u64 time_stamp = 0; 576 u8 pin_mode = 0; 577 u8 pattern = 0; 578 int pin = -1; 579 int err = 0; 580 581 if (!MLX5_PPS_CAP(mdev)) 582 return -EOPNOTSUPP; 583 584 /* Reject requests with unsupported flags */ 585 if (mlx5_perout_verify_flags(mdev, rq->perout.flags)) 586 return -EOPNOTSUPP; 587 588 if (rq->perout.index >= clock->ptp_info.n_pins) 589 return -EINVAL; 590 591 field_select = MLX5_MTPPS_FS_ENABLE; 592 pin = ptp_find_pin(clock->ptp, PTP_PF_PEROUT, rq->perout.index); 593 if (pin < 0) 594 return -EBUSY; 595 596 if (on) { 597 bool rt_mode = mlx5_real_time_mode(mdev); 598 599 pin_mode = MLX5_PIN_MODE_OUT; 600 pattern = MLX5_OUT_PATTERN_PERIODIC; 601 602 if (rt_mode && rq->perout.start.sec > U32_MAX) 603 return -EINVAL; 604 605 field_select |= MLX5_MTPPS_FS_PIN_MODE | 606 MLX5_MTPPS_FS_PATTERN | 607 MLX5_MTPPS_FS_TIME_STAMP; 608 609 if (mlx5_npps_real_time_supported(mdev)) 610 err = perout_conf_npps_real_time(mdev, rq, &field_select, 611 &out_pulse_duration_ns, &npps_period, 612 &time_stamp); 613 else 614 err = perout_conf_1pps(mdev, rq, &time_stamp, rt_mode); 615 if (err) 616 return err; 617 } 618 619 MLX5_SET(mtpps_reg, in, pin, pin); 620 MLX5_SET(mtpps_reg, in, pin_mode, pin_mode); 621 MLX5_SET(mtpps_reg, in, pattern, pattern); 622 MLX5_SET(mtpps_reg, in, enable, on); 623 MLX5_SET64(mtpps_reg, in, time_stamp, time_stamp); 624 MLX5_SET(mtpps_reg, in, field_select, field_select); 625 MLX5_SET64(mtpps_reg, in, npps_period, npps_period); 626 MLX5_SET(mtpps_reg, in, out_pulse_duration_ns, out_pulse_duration_ns); 627 err = mlx5_set_mtpps(mdev, in, sizeof(in)); 628 if (err) 629 return err; 630 631 if (rt_mode) 632 return 0; 633 634 return mlx5_set_mtppse(mdev, pin, 0, 635 MLX5_EVENT_MODE_REPETETIVE & on); 636 } 637 638 static int mlx5_pps_configure(struct ptp_clock_info *ptp, 639 struct ptp_clock_request *rq, 640 int on) 641 { 642 struct mlx5_clock *clock = 643 container_of(ptp, struct mlx5_clock, ptp_info); 644 645 clock->pps_info.enabled = !!on; 646 return 0; 647 } 648 649 static int mlx5_ptp_enable(struct ptp_clock_info *ptp, 650 struct ptp_clock_request *rq, 651 int on) 652 { 653 switch (rq->type) { 654 case PTP_CLK_REQ_EXTTS: 655 return mlx5_extts_configure(ptp, rq, on); 656 case PTP_CLK_REQ_PEROUT: 657 return mlx5_perout_configure(ptp, rq, on); 658 case PTP_CLK_REQ_PPS: 659 return mlx5_pps_configure(ptp, rq, on); 660 default: 661 return -EOPNOTSUPP; 662 } 663 return 0; 664 } 665 666 enum { 667 MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_IN = BIT(0), 668 MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_OUT = BIT(1), 669 }; 670 671 static int mlx5_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin, 672 enum ptp_pin_function func, unsigned int chan) 673 { 674 struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, 675 ptp_info); 676 677 switch (func) { 678 case PTP_PF_NONE: 679 return 0; 680 case PTP_PF_EXTTS: 681 return !(clock->pps_info.pin_caps[pin] & 682 MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_IN); 683 case PTP_PF_PEROUT: 684 return !(clock->pps_info.pin_caps[pin] & 685 MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_OUT); 686 default: 687 return -EOPNOTSUPP; 688 } 689 } 690 691 static const struct ptp_clock_info mlx5_ptp_clock_info = { 692 .owner = THIS_MODULE, 693 .name = "mlx5_ptp", 694 .max_adj = 100000000, 695 .n_alarm = 0, 696 .n_ext_ts = 0, 697 .n_per_out = 0, 698 .n_pins = 0, 699 .pps = 0, 700 .adjfreq = mlx5_ptp_adjfreq, 701 .adjtime = mlx5_ptp_adjtime, 702 .gettimex64 = mlx5_ptp_gettimex, 703 .settime64 = mlx5_ptp_settime, 704 .enable = NULL, 705 .verify = NULL, 706 }; 707 708 static int mlx5_query_mtpps_pin_mode(struct mlx5_core_dev *mdev, u8 pin, 709 u32 *mtpps, u32 mtpps_size) 710 { 711 u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {}; 712 713 MLX5_SET(mtpps_reg, in, pin, pin); 714 715 return mlx5_core_access_reg(mdev, in, sizeof(in), mtpps, 716 mtpps_size, MLX5_REG_MTPPS, 0, 0); 717 } 718 719 static int mlx5_get_pps_pin_mode(struct mlx5_clock *clock, u8 pin) 720 { 721 struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev, clock); 722 723 u32 out[MLX5_ST_SZ_DW(mtpps_reg)] = {}; 724 u8 mode; 725 int err; 726 727 err = mlx5_query_mtpps_pin_mode(mdev, pin, out, sizeof(out)); 728 if (err || !MLX5_GET(mtpps_reg, out, enable)) 729 return PTP_PF_NONE; 730 731 mode = MLX5_GET(mtpps_reg, out, pin_mode); 732 733 if (mode == MLX5_PIN_MODE_IN) 734 return PTP_PF_EXTTS; 735 else if (mode == MLX5_PIN_MODE_OUT) 736 return PTP_PF_PEROUT; 737 738 return PTP_PF_NONE; 739 } 740 741 static void mlx5_init_pin_config(struct mlx5_clock *clock) 742 { 743 int i; 744 745 if (!clock->ptp_info.n_pins) 746 return; 747 748 clock->ptp_info.pin_config = 749 kcalloc(clock->ptp_info.n_pins, 750 sizeof(*clock->ptp_info.pin_config), 751 GFP_KERNEL); 752 if (!clock->ptp_info.pin_config) 753 return; 754 clock->ptp_info.enable = mlx5_ptp_enable; 755 clock->ptp_info.verify = mlx5_ptp_verify; 756 clock->ptp_info.pps = 1; 757 758 for (i = 0; i < clock->ptp_info.n_pins; i++) { 759 snprintf(clock->ptp_info.pin_config[i].name, 760 sizeof(clock->ptp_info.pin_config[i].name), 761 "mlx5_pps%d", i); 762 clock->ptp_info.pin_config[i].index = i; 763 clock->ptp_info.pin_config[i].func = mlx5_get_pps_pin_mode(clock, i); 764 clock->ptp_info.pin_config[i].chan = 0; 765 } 766 } 767 768 static void mlx5_get_pps_caps(struct mlx5_core_dev *mdev) 769 { 770 struct mlx5_clock *clock = &mdev->clock; 771 u32 out[MLX5_ST_SZ_DW(mtpps_reg)] = {0}; 772 773 mlx5_query_mtpps(mdev, out, sizeof(out)); 774 775 clock->ptp_info.n_pins = MLX5_GET(mtpps_reg, out, 776 cap_number_of_pps_pins); 777 clock->ptp_info.n_ext_ts = MLX5_GET(mtpps_reg, out, 778 cap_max_num_of_pps_in_pins); 779 clock->ptp_info.n_per_out = MLX5_GET(mtpps_reg, out, 780 cap_max_num_of_pps_out_pins); 781 782 if (MLX5_CAP_MCAM_FEATURE(mdev, npps_period)) 783 clock->pps_info.min_npps_period = 1 << MLX5_GET(mtpps_reg, out, 784 cap_log_min_npps_period); 785 if (MLX5_CAP_MCAM_FEATURE(mdev, out_pulse_duration_ns)) 786 clock->pps_info.min_out_pulse_duration_ns = 1 << MLX5_GET(mtpps_reg, out, 787 cap_log_min_out_pulse_duration_ns); 788 789 clock->pps_info.pin_caps[0] = MLX5_GET(mtpps_reg, out, cap_pin_0_mode); 790 clock->pps_info.pin_caps[1] = MLX5_GET(mtpps_reg, out, cap_pin_1_mode); 791 clock->pps_info.pin_caps[2] = MLX5_GET(mtpps_reg, out, cap_pin_2_mode); 792 clock->pps_info.pin_caps[3] = MLX5_GET(mtpps_reg, out, cap_pin_3_mode); 793 clock->pps_info.pin_caps[4] = MLX5_GET(mtpps_reg, out, cap_pin_4_mode); 794 clock->pps_info.pin_caps[5] = MLX5_GET(mtpps_reg, out, cap_pin_5_mode); 795 clock->pps_info.pin_caps[6] = MLX5_GET(mtpps_reg, out, cap_pin_6_mode); 796 clock->pps_info.pin_caps[7] = MLX5_GET(mtpps_reg, out, cap_pin_7_mode); 797 } 798 799 static void ts_next_sec(struct timespec64 *ts) 800 { 801 ts->tv_sec += 1; 802 ts->tv_nsec = 0; 803 } 804 805 static u64 perout_conf_next_event_timer(struct mlx5_core_dev *mdev, 806 struct mlx5_clock *clock) 807 { 808 struct timespec64 ts; 809 s64 target_ns; 810 811 mlx5_ptp_gettimex(&clock->ptp_info, &ts, NULL); 812 ts_next_sec(&ts); 813 target_ns = timespec64_to_ns(&ts); 814 815 return find_target_cycles(mdev, target_ns); 816 } 817 818 static int mlx5_pps_event(struct notifier_block *nb, 819 unsigned long type, void *data) 820 { 821 struct mlx5_clock *clock = mlx5_nb_cof(nb, struct mlx5_clock, pps_nb); 822 struct ptp_clock_event ptp_event; 823 struct mlx5_eqe *eqe = data; 824 int pin = eqe->data.pps.pin; 825 struct mlx5_core_dev *mdev; 826 unsigned long flags; 827 u64 ns; 828 829 mdev = container_of(clock, struct mlx5_core_dev, clock); 830 831 switch (clock->ptp_info.pin_config[pin].func) { 832 case PTP_PF_EXTTS: 833 ptp_event.index = pin; 834 ptp_event.timestamp = mlx5_real_time_mode(mdev) ? 835 mlx5_real_time_cyc2time(clock, 836 be64_to_cpu(eqe->data.pps.time_stamp)) : 837 mlx5_timecounter_cyc2time(clock, 838 be64_to_cpu(eqe->data.pps.time_stamp)); 839 if (clock->pps_info.enabled) { 840 ptp_event.type = PTP_CLOCK_PPSUSR; 841 ptp_event.pps_times.ts_real = 842 ns_to_timespec64(ptp_event.timestamp); 843 } else { 844 ptp_event.type = PTP_CLOCK_EXTTS; 845 } 846 /* TODOL clock->ptp can be NULL if ptp_clock_register fails */ 847 ptp_clock_event(clock->ptp, &ptp_event); 848 break; 849 case PTP_PF_PEROUT: 850 ns = perout_conf_next_event_timer(mdev, clock); 851 write_seqlock_irqsave(&clock->lock, flags); 852 clock->pps_info.start[pin] = ns; 853 write_sequnlock_irqrestore(&clock->lock, flags); 854 schedule_work(&clock->pps_info.out_work); 855 break; 856 default: 857 mlx5_core_err(mdev, " Unhandled clock PPS event, func %d\n", 858 clock->ptp_info.pin_config[pin].func); 859 } 860 861 return NOTIFY_OK; 862 } 863 864 static void mlx5_timecounter_init(struct mlx5_core_dev *mdev) 865 { 866 struct mlx5_clock *clock = &mdev->clock; 867 struct mlx5_timer *timer = &clock->timer; 868 u32 dev_freq; 869 870 dev_freq = MLX5_CAP_GEN(mdev, device_frequency_khz); 871 timer->cycles.read = read_internal_timer; 872 timer->cycles.shift = MLX5_CYCLES_SHIFT; 873 timer->cycles.mult = clocksource_khz2mult(dev_freq, 874 timer->cycles.shift); 875 timer->nominal_c_mult = timer->cycles.mult; 876 timer->cycles.mask = CLOCKSOURCE_MASK(41); 877 878 timecounter_init(&timer->tc, &timer->cycles, 879 ktime_to_ns(ktime_get_real())); 880 } 881 882 static void mlx5_init_overflow_period(struct mlx5_clock *clock) 883 { 884 struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev, clock); 885 struct mlx5_ib_clock_info *clock_info = mdev->clock_info; 886 struct mlx5_timer *timer = &clock->timer; 887 u64 overflow_cycles; 888 u64 frac = 0; 889 u64 ns; 890 891 /* Calculate period in seconds to call the overflow watchdog - to make 892 * sure counter is checked at least twice every wrap around. 893 * The period is calculated as the minimum between max HW cycles count 894 * (The clock source mask) and max amount of cycles that can be 895 * multiplied by clock multiplier where the result doesn't exceed 896 * 64bits. 897 */ 898 overflow_cycles = div64_u64(~0ULL >> 1, timer->cycles.mult); 899 overflow_cycles = min(overflow_cycles, div_u64(timer->cycles.mask, 3)); 900 901 ns = cyclecounter_cyc2ns(&timer->cycles, overflow_cycles, 902 frac, &frac); 903 do_div(ns, NSEC_PER_SEC / HZ); 904 timer->overflow_period = ns; 905 906 INIT_DELAYED_WORK(&timer->overflow_work, mlx5_timestamp_overflow); 907 if (timer->overflow_period) 908 schedule_delayed_work(&timer->overflow_work, 0); 909 else 910 mlx5_core_warn(mdev, 911 "invalid overflow period, overflow_work is not scheduled\n"); 912 913 if (clock_info) 914 clock_info->overflow_period = timer->overflow_period; 915 } 916 917 static void mlx5_init_clock_info(struct mlx5_core_dev *mdev) 918 { 919 struct mlx5_clock *clock = &mdev->clock; 920 struct mlx5_ib_clock_info *info; 921 struct mlx5_timer *timer; 922 923 mdev->clock_info = (struct mlx5_ib_clock_info *)get_zeroed_page(GFP_KERNEL); 924 if (!mdev->clock_info) { 925 mlx5_core_warn(mdev, "Failed to allocate IB clock info page\n"); 926 return; 927 } 928 929 info = mdev->clock_info; 930 timer = &clock->timer; 931 932 info->nsec = timer->tc.nsec; 933 info->cycles = timer->tc.cycle_last; 934 info->mask = timer->cycles.mask; 935 info->mult = timer->nominal_c_mult; 936 info->shift = timer->cycles.shift; 937 info->frac = timer->tc.frac; 938 } 939 940 static void mlx5_init_timer_clock(struct mlx5_core_dev *mdev) 941 { 942 struct mlx5_clock *clock = &mdev->clock; 943 944 mlx5_timecounter_init(mdev); 945 mlx5_init_clock_info(mdev); 946 mlx5_init_overflow_period(clock); 947 clock->ptp_info = mlx5_ptp_clock_info; 948 949 if (mlx5_real_time_mode(mdev)) { 950 struct timespec64 ts; 951 952 ktime_get_real_ts64(&ts); 953 mlx5_ptp_settime(&clock->ptp_info, &ts); 954 } 955 } 956 957 static void mlx5_init_pps(struct mlx5_core_dev *mdev) 958 { 959 struct mlx5_clock *clock = &mdev->clock; 960 961 if (!MLX5_PPS_CAP(mdev)) 962 return; 963 964 mlx5_get_pps_caps(mdev); 965 mlx5_init_pin_config(clock); 966 } 967 968 void mlx5_init_clock(struct mlx5_core_dev *mdev) 969 { 970 struct mlx5_clock *clock = &mdev->clock; 971 972 if (!MLX5_CAP_GEN(mdev, device_frequency_khz)) { 973 mlx5_core_warn(mdev, "invalid device_frequency_khz, aborting HW clock init\n"); 974 return; 975 } 976 977 seqlock_init(&clock->lock); 978 mlx5_init_timer_clock(mdev); 979 INIT_WORK(&clock->pps_info.out_work, mlx5_pps_out); 980 981 /* Configure the PHC */ 982 clock->ptp_info = mlx5_ptp_clock_info; 983 984 /* Initialize 1PPS data structures */ 985 mlx5_init_pps(mdev); 986 987 clock->ptp = ptp_clock_register(&clock->ptp_info, 988 &mdev->pdev->dev); 989 if (IS_ERR(clock->ptp)) { 990 mlx5_core_warn(mdev, "ptp_clock_register failed %ld\n", 991 PTR_ERR(clock->ptp)); 992 clock->ptp = NULL; 993 } 994 995 MLX5_NB_INIT(&clock->pps_nb, mlx5_pps_event, PPS_EVENT); 996 mlx5_eq_notifier_register(mdev, &clock->pps_nb); 997 } 998 999 void mlx5_cleanup_clock(struct mlx5_core_dev *mdev) 1000 { 1001 struct mlx5_clock *clock = &mdev->clock; 1002 1003 if (!MLX5_CAP_GEN(mdev, device_frequency_khz)) 1004 return; 1005 1006 mlx5_eq_notifier_unregister(mdev, &clock->pps_nb); 1007 if (clock->ptp) { 1008 ptp_clock_unregister(clock->ptp); 1009 clock->ptp = NULL; 1010 } 1011 1012 cancel_work_sync(&clock->pps_info.out_work); 1013 cancel_delayed_work_sync(&clock->timer.overflow_work); 1014 1015 if (mdev->clock_info) { 1016 free_page((unsigned long)mdev->clock_info); 1017 mdev->clock_info = NULL; 1018 } 1019 1020 kfree(clock->ptp_info.pin_config); 1021 } 1022