1 /* 2 * Copyright (c) 2012 Mellanox Technologies. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 * 32 */ 33 34 #include <linux/mlx4/device.h> 35 #include <linux/clocksource.h> 36 37 #include "mlx4_en.h" 38 39 /* mlx4_en_read_clock - read raw cycle counter (to be used by time counter) 40 */ 41 static u64 mlx4_en_read_clock(const struct cyclecounter *tc) 42 { 43 struct mlx4_en_dev *mdev = 44 container_of(tc, struct mlx4_en_dev, cycles); 45 struct mlx4_dev *dev = mdev->dev; 46 47 return mlx4_read_clock(dev) & tc->mask; 48 } 49 50 u64 mlx4_en_get_cqe_ts(struct mlx4_cqe *cqe) 51 { 52 u64 hi, lo; 53 struct mlx4_ts_cqe *ts_cqe = (struct mlx4_ts_cqe *)cqe; 54 55 lo = (u64)be16_to_cpu(ts_cqe->timestamp_lo); 56 hi = ((u64)be32_to_cpu(ts_cqe->timestamp_hi) + !lo) << 16; 57 58 return hi | lo; 59 } 60 61 void mlx4_en_fill_hwtstamps(struct mlx4_en_dev *mdev, 62 struct skb_shared_hwtstamps *hwts, 63 u64 timestamp) 64 { 65 unsigned int seq; 66 u64 nsec; 67 68 do { 69 seq = read_seqbegin(&mdev->clock_lock); 70 nsec = timecounter_cyc2time(&mdev->clock, timestamp); 71 } while (read_seqretry(&mdev->clock_lock, seq)); 72 73 memset(hwts, 0, sizeof(struct skb_shared_hwtstamps)); 74 hwts->hwtstamp = ns_to_ktime(nsec); 75 } 76 77 /** 78 * mlx4_en_remove_timestamp - disable PTP device 79 * @mdev: board private structure 80 * 81 * Stop the PTP support. 82 **/ 83 void mlx4_en_remove_timestamp(struct mlx4_en_dev *mdev) 84 { 85 if (mdev->ptp_clock) { 86 ptp_clock_unregister(mdev->ptp_clock); 87 mdev->ptp_clock = NULL; 88 mlx4_info(mdev, "removed PHC\n"); 89 } 90 } 91 92 #define MLX4_EN_WRAP_AROUND_SEC 10UL 93 /* By scheduling the overflow check every 5 seconds, we have a reasonably 94 * good chance we wont miss a wrap around. 95 * TOTO: Use a timer instead of a work queue to increase the guarantee. 96 */ 97 #define MLX4_EN_OVERFLOW_PERIOD (MLX4_EN_WRAP_AROUND_SEC * HZ / 2) 98 99 void mlx4_en_ptp_overflow_check(struct mlx4_en_dev *mdev) 100 { 101 bool timeout = time_is_before_jiffies(mdev->last_overflow_check + 102 MLX4_EN_OVERFLOW_PERIOD); 103 unsigned long flags; 104 105 if (timeout) { 106 write_seqlock_irqsave(&mdev->clock_lock, flags); 107 timecounter_read(&mdev->clock); 108 write_sequnlock_irqrestore(&mdev->clock_lock, flags); 109 mdev->last_overflow_check = jiffies; 110 } 111 } 112 113 /** 114 * mlx4_en_phc_adjfine - adjust the frequency of the hardware clock 115 * @ptp: ptp clock structure 116 * @scaled_ppm: Desired frequency change in scaled parts per million 117 * 118 * Adjust the frequency of the PHC cycle counter by the indicated scaled_ppm 119 * from the base frequency. 120 * 121 * Scaled parts per million is ppm with a 16-bit binary fractional field. 122 **/ 123 static int mlx4_en_phc_adjfine(struct ptp_clock_info *ptp, long scaled_ppm) 124 { 125 u32 mult; 126 unsigned long flags; 127 struct mlx4_en_dev *mdev = container_of(ptp, struct mlx4_en_dev, 128 ptp_clock_info); 129 130 mult = (u32)adjust_by_scaled_ppm(mdev->nominal_c_mult, scaled_ppm); 131 132 write_seqlock_irqsave(&mdev->clock_lock, flags); 133 timecounter_read(&mdev->clock); 134 mdev->cycles.mult = mult; 135 write_sequnlock_irqrestore(&mdev->clock_lock, flags); 136 137 return 0; 138 } 139 140 /** 141 * mlx4_en_phc_adjtime - Shift the time of the hardware clock 142 * @ptp: ptp clock structure 143 * @delta: Desired change in nanoseconds 144 * 145 * Adjust the timer by resetting the timecounter structure. 146 **/ 147 static int mlx4_en_phc_adjtime(struct ptp_clock_info *ptp, s64 delta) 148 { 149 struct mlx4_en_dev *mdev = container_of(ptp, struct mlx4_en_dev, 150 ptp_clock_info); 151 unsigned long flags; 152 153 write_seqlock_irqsave(&mdev->clock_lock, flags); 154 timecounter_adjtime(&mdev->clock, delta); 155 write_sequnlock_irqrestore(&mdev->clock_lock, flags); 156 157 return 0; 158 } 159 160 /** 161 * mlx4_en_phc_gettime - Reads the current time from the hardware clock 162 * @ptp: ptp clock structure 163 * @ts: timespec structure to hold the current time value 164 * 165 * Read the timecounter and return the correct value in ns after converting 166 * it into a struct timespec. 167 **/ 168 static int mlx4_en_phc_gettime(struct ptp_clock_info *ptp, 169 struct timespec64 *ts) 170 { 171 struct mlx4_en_dev *mdev = container_of(ptp, struct mlx4_en_dev, 172 ptp_clock_info); 173 unsigned long flags; 174 u64 ns; 175 176 write_seqlock_irqsave(&mdev->clock_lock, flags); 177 ns = timecounter_read(&mdev->clock); 178 write_sequnlock_irqrestore(&mdev->clock_lock, flags); 179 180 *ts = ns_to_timespec64(ns); 181 182 return 0; 183 } 184 185 /** 186 * mlx4_en_phc_settime - Set the current time on the hardware clock 187 * @ptp: ptp clock structure 188 * @ts: timespec containing the new time for the cycle counter 189 * 190 * Reset the timecounter to use a new base value instead of the kernel 191 * wall timer value. 192 **/ 193 static int mlx4_en_phc_settime(struct ptp_clock_info *ptp, 194 const struct timespec64 *ts) 195 { 196 struct mlx4_en_dev *mdev = container_of(ptp, struct mlx4_en_dev, 197 ptp_clock_info); 198 u64 ns = timespec64_to_ns(ts); 199 unsigned long flags; 200 201 /* reset the timecounter */ 202 write_seqlock_irqsave(&mdev->clock_lock, flags); 203 timecounter_init(&mdev->clock, &mdev->cycles, ns); 204 write_sequnlock_irqrestore(&mdev->clock_lock, flags); 205 206 return 0; 207 } 208 209 /** 210 * mlx4_en_phc_enable - enable or disable an ancillary feature 211 * @ptp: ptp clock structure 212 * @request: Desired resource to enable or disable 213 * @on: Caller passes one to enable or zero to disable 214 * 215 * Enable (or disable) ancillary features of the PHC subsystem. 216 * Currently, no ancillary features are supported. 217 **/ 218 static int mlx4_en_phc_enable(struct ptp_clock_info __always_unused *ptp, 219 struct ptp_clock_request __always_unused *request, 220 int __always_unused on) 221 { 222 return -EOPNOTSUPP; 223 } 224 225 static const struct ptp_clock_info mlx4_en_ptp_clock_info = { 226 .owner = THIS_MODULE, 227 .max_adj = 100000000, 228 .n_alarm = 0, 229 .n_ext_ts = 0, 230 .n_per_out = 0, 231 .n_pins = 0, 232 .pps = 0, 233 .adjfine = mlx4_en_phc_adjfine, 234 .adjtime = mlx4_en_phc_adjtime, 235 .gettime64 = mlx4_en_phc_gettime, 236 .settime64 = mlx4_en_phc_settime, 237 .enable = mlx4_en_phc_enable, 238 }; 239 240 241 /* This function calculates the max shift that enables the user range 242 * of MLX4_EN_WRAP_AROUND_SEC values in the cycles register. 243 */ 244 static u32 freq_to_shift(u16 freq) 245 { 246 u32 freq_khz = freq * 1000; 247 u64 max_val_cycles = freq_khz * 1000 * MLX4_EN_WRAP_AROUND_SEC; 248 u64 max_val_cycles_rounded = 1ULL << fls64(max_val_cycles - 1); 249 /* calculate max possible multiplier in order to fit in 64bit */ 250 u64 max_mul = div64_u64(ULLONG_MAX, max_val_cycles_rounded); 251 252 /* This comes from the reverse of clocksource_khz2mult */ 253 return ilog2(div_u64(max_mul * freq_khz, 1000000)); 254 } 255 256 void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev) 257 { 258 struct mlx4_dev *dev = mdev->dev; 259 unsigned long flags; 260 261 /* mlx4_en_init_timestamp is called for each netdev. 262 * mdev->ptp_clock is common for all ports, skip initialization if 263 * was done for other port. 264 */ 265 if (mdev->ptp_clock) 266 return; 267 268 seqlock_init(&mdev->clock_lock); 269 270 memset(&mdev->cycles, 0, sizeof(mdev->cycles)); 271 mdev->cycles.read = mlx4_en_read_clock; 272 mdev->cycles.mask = CLOCKSOURCE_MASK(48); 273 mdev->cycles.shift = freq_to_shift(dev->caps.hca_core_clock); 274 mdev->cycles.mult = 275 clocksource_khz2mult(1000 * dev->caps.hca_core_clock, mdev->cycles.shift); 276 mdev->nominal_c_mult = mdev->cycles.mult; 277 278 write_seqlock_irqsave(&mdev->clock_lock, flags); 279 timecounter_init(&mdev->clock, &mdev->cycles, 280 ktime_to_ns(ktime_get_real())); 281 write_sequnlock_irqrestore(&mdev->clock_lock, flags); 282 283 /* Configure the PHC */ 284 mdev->ptp_clock_info = mlx4_en_ptp_clock_info; 285 snprintf(mdev->ptp_clock_info.name, 16, "mlx4 ptp"); 286 287 mdev->ptp_clock = ptp_clock_register(&mdev->ptp_clock_info, 288 &mdev->pdev->dev); 289 if (IS_ERR(mdev->ptp_clock)) { 290 mdev->ptp_clock = NULL; 291 mlx4_err(mdev, "ptp_clock_register failed\n"); 292 } else if (mdev->ptp_clock) { 293 mlx4_info(mdev, "registered PHC clock\n"); 294 } 295 296 } 297