1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * PTP 1588 clock support 4 * 5 * Copyright (C) 2010 OMICRON electronics GmbH 6 */ 7 #include <linux/idr.h> 8 #include <linux/device.h> 9 #include <linux/err.h> 10 #include <linux/init.h> 11 #include <linux/kernel.h> 12 #include <linux/module.h> 13 #include <linux/posix-clock.h> 14 #include <linux/pps_kernel.h> 15 #include <linux/slab.h> 16 #include <linux/syscalls.h> 17 #include <linux/uaccess.h> 18 #include <uapi/linux/sched/types.h> 19 20 #include "ptp_private.h" 21 22 #define PTP_MAX_ALARMS 4 23 #define PTP_PPS_DEFAULTS (PPS_CAPTUREASSERT | PPS_OFFSETASSERT) 24 #define PTP_PPS_EVENT PPS_CAPTUREASSERT 25 #define PTP_PPS_MODE (PTP_PPS_DEFAULTS | PPS_CANWAIT | PPS_TSFMT_TSPEC) 26 27 struct class *ptp_class; 28 29 /* private globals */ 30 31 static dev_t ptp_devt; 32 33 static DEFINE_IDA(ptp_clocks_map); 34 35 /* time stamp event queue operations */ 36 37 static inline int queue_free(struct timestamp_event_queue *q) 38 { 39 return PTP_MAX_TIMESTAMPS - queue_cnt(q) - 1; 40 } 41 42 static void enqueue_external_timestamp(struct timestamp_event_queue *queue, 43 struct ptp_clock_event *src) 44 { 45 struct ptp_extts_event *dst; 46 unsigned long flags; 47 s64 seconds; 48 u32 remainder; 49 50 seconds = div_u64_rem(src->timestamp, 1000000000, &remainder); 51 52 spin_lock_irqsave(&queue->lock, flags); 53 54 dst = &queue->buf[queue->tail]; 55 dst->index = src->index; 56 dst->t.sec = seconds; 57 dst->t.nsec = remainder; 58 59 /* Both WRITE_ONCE() are paired with READ_ONCE() in queue_cnt() */ 60 if (!queue_free(queue)) 61 WRITE_ONCE(queue->head, (queue->head + 1) % PTP_MAX_TIMESTAMPS); 62 63 WRITE_ONCE(queue->tail, (queue->tail + 1) % PTP_MAX_TIMESTAMPS); 64 65 spin_unlock_irqrestore(&queue->lock, flags); 66 } 67 68 /* posix clock implementation */ 69 70 static int ptp_clock_getres(struct posix_clock *pc, struct timespec64 *tp) 71 { 72 tp->tv_sec = 0; 73 tp->tv_nsec = 1; 74 return 0; 75 } 76 77 static int ptp_clock_settime(struct posix_clock *pc, const struct timespec64 *tp) 78 { 79 struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock); 80 81 if (ptp_clock_freerun(ptp)) { 82 pr_err("ptp: physical clock is free running\n"); 83 return -EBUSY; 84 } 85 86 return ptp->info->settime64(ptp->info, tp); 87 } 88 89 static int ptp_clock_gettime(struct posix_clock *pc, struct timespec64 *tp) 90 { 91 struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock); 92 int err; 93 94 if (ptp->info->gettimex64) 95 err = ptp->info->gettimex64(ptp->info, tp, NULL); 96 else 97 err = ptp->info->gettime64(ptp->info, tp); 98 return err; 99 } 100 101 static int ptp_clock_adjtime(struct posix_clock *pc, struct __kernel_timex *tx) 102 { 103 struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock); 104 struct ptp_clock_info *ops; 105 int err = -EOPNOTSUPP; 106 107 if (ptp_clock_freerun(ptp)) { 108 pr_err("ptp: physical clock is free running\n"); 109 return -EBUSY; 110 } 111 112 ops = ptp->info; 113 114 if (tx->modes & ADJ_SETOFFSET) { 115 struct timespec64 ts; 116 ktime_t kt; 117 s64 delta; 118 119 ts.tv_sec = tx->time.tv_sec; 120 ts.tv_nsec = tx->time.tv_usec; 121 122 if (!(tx->modes & ADJ_NANO)) 123 ts.tv_nsec *= 1000; 124 125 if ((unsigned long) ts.tv_nsec >= NSEC_PER_SEC) 126 return -EINVAL; 127 128 kt = timespec64_to_ktime(ts); 129 delta = ktime_to_ns(kt); 130 err = ops->adjtime(ops, delta); 131 } else if (tx->modes & ADJ_FREQUENCY) { 132 long ppb = scaled_ppm_to_ppb(tx->freq); 133 if (ppb > ops->max_adj || ppb < -ops->max_adj) 134 return -ERANGE; 135 err = ops->adjfine(ops, tx->freq); 136 if (!err) 137 ptp->dialed_frequency = tx->freq; 138 } else if (tx->modes & ADJ_OFFSET) { 139 if (ops->adjphase) { 140 s32 max_phase_adj = ops->getmaxphase(ops); 141 s32 offset = tx->offset; 142 143 if (!(tx->modes & ADJ_NANO)) 144 offset *= NSEC_PER_USEC; 145 146 if (offset > max_phase_adj || offset < -max_phase_adj) 147 return -ERANGE; 148 149 err = ops->adjphase(ops, offset); 150 } 151 } else if (tx->modes == 0) { 152 tx->freq = ptp->dialed_frequency; 153 err = 0; 154 } 155 156 return err; 157 } 158 159 static struct posix_clock_operations ptp_clock_ops = { 160 .owner = THIS_MODULE, 161 .clock_adjtime = ptp_clock_adjtime, 162 .clock_gettime = ptp_clock_gettime, 163 .clock_getres = ptp_clock_getres, 164 .clock_settime = ptp_clock_settime, 165 .ioctl = ptp_ioctl, 166 .open = ptp_open, 167 .poll = ptp_poll, 168 .read = ptp_read, 169 }; 170 171 static void ptp_clock_release(struct device *dev) 172 { 173 struct ptp_clock *ptp = container_of(dev, struct ptp_clock, dev); 174 175 ptp_cleanup_pin_groups(ptp); 176 kfree(ptp->vclock_index); 177 mutex_destroy(&ptp->tsevq_mux); 178 mutex_destroy(&ptp->pincfg_mux); 179 mutex_destroy(&ptp->n_vclocks_mux); 180 ida_free(&ptp_clocks_map, ptp->index); 181 kfree(ptp); 182 } 183 184 static int ptp_getcycles64(struct ptp_clock_info *info, struct timespec64 *ts) 185 { 186 if (info->getcyclesx64) 187 return info->getcyclesx64(info, ts, NULL); 188 else 189 return info->gettime64(info, ts); 190 } 191 192 static void ptp_aux_kworker(struct kthread_work *work) 193 { 194 struct ptp_clock *ptp = container_of(work, struct ptp_clock, 195 aux_work.work); 196 struct ptp_clock_info *info = ptp->info; 197 long delay; 198 199 delay = info->do_aux_work(info); 200 201 if (delay >= 0) 202 kthread_queue_delayed_work(ptp->kworker, &ptp->aux_work, delay); 203 } 204 205 /* public interface */ 206 207 struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info, 208 struct device *parent) 209 { 210 struct ptp_clock *ptp; 211 int err = 0, index, major = MAJOR(ptp_devt); 212 size_t size; 213 214 if (info->n_alarm > PTP_MAX_ALARMS) 215 return ERR_PTR(-EINVAL); 216 217 /* Initialize a clock structure. */ 218 err = -ENOMEM; 219 ptp = kzalloc(sizeof(struct ptp_clock), GFP_KERNEL); 220 if (ptp == NULL) 221 goto no_memory; 222 223 index = ida_alloc_max(&ptp_clocks_map, MINORMASK, GFP_KERNEL); 224 if (index < 0) { 225 err = index; 226 goto no_slot; 227 } 228 229 ptp->clock.ops = ptp_clock_ops; 230 ptp->info = info; 231 ptp->devid = MKDEV(major, index); 232 ptp->index = index; 233 spin_lock_init(&ptp->tsevq.lock); 234 mutex_init(&ptp->tsevq_mux); 235 mutex_init(&ptp->pincfg_mux); 236 mutex_init(&ptp->n_vclocks_mux); 237 init_waitqueue_head(&ptp->tsev_wq); 238 239 if (ptp->info->getcycles64 || ptp->info->getcyclesx64) { 240 ptp->has_cycles = true; 241 if (!ptp->info->getcycles64 && ptp->info->getcyclesx64) 242 ptp->info->getcycles64 = ptp_getcycles64; 243 } else { 244 /* Free running cycle counter not supported, use time. */ 245 ptp->info->getcycles64 = ptp_getcycles64; 246 247 if (ptp->info->gettimex64) 248 ptp->info->getcyclesx64 = ptp->info->gettimex64; 249 250 if (ptp->info->getcrosststamp) 251 ptp->info->getcrosscycles = ptp->info->getcrosststamp; 252 } 253 254 if (ptp->info->do_aux_work) { 255 kthread_init_delayed_work(&ptp->aux_work, ptp_aux_kworker); 256 ptp->kworker = kthread_create_worker(0, "ptp%d", ptp->index); 257 if (IS_ERR(ptp->kworker)) { 258 err = PTR_ERR(ptp->kworker); 259 pr_err("failed to create ptp aux_worker %d\n", err); 260 goto kworker_err; 261 } 262 } 263 264 /* PTP virtual clock is being registered under physical clock */ 265 if (parent && parent->class && parent->class->name && 266 strcmp(parent->class->name, "ptp") == 0) 267 ptp->is_virtual_clock = true; 268 269 if (!ptp->is_virtual_clock) { 270 ptp->max_vclocks = PTP_DEFAULT_MAX_VCLOCKS; 271 272 size = sizeof(int) * ptp->max_vclocks; 273 ptp->vclock_index = kzalloc(size, GFP_KERNEL); 274 if (!ptp->vclock_index) { 275 err = -ENOMEM; 276 goto no_mem_for_vclocks; 277 } 278 } 279 280 err = ptp_populate_pin_groups(ptp); 281 if (err) 282 goto no_pin_groups; 283 284 /* Register a new PPS source. */ 285 if (info->pps) { 286 struct pps_source_info pps; 287 memset(&pps, 0, sizeof(pps)); 288 snprintf(pps.name, PPS_MAX_NAME_LEN, "ptp%d", index); 289 pps.mode = PTP_PPS_MODE; 290 pps.owner = info->owner; 291 ptp->pps_source = pps_register_source(&pps, PTP_PPS_DEFAULTS); 292 if (IS_ERR(ptp->pps_source)) { 293 err = PTR_ERR(ptp->pps_source); 294 pr_err("failed to register pps source\n"); 295 goto no_pps; 296 } 297 ptp->pps_source->lookup_cookie = ptp; 298 } 299 300 /* Initialize a new device of our class in our clock structure. */ 301 device_initialize(&ptp->dev); 302 ptp->dev.devt = ptp->devid; 303 ptp->dev.class = ptp_class; 304 ptp->dev.parent = parent; 305 ptp->dev.groups = ptp->pin_attr_groups; 306 ptp->dev.release = ptp_clock_release; 307 dev_set_drvdata(&ptp->dev, ptp); 308 dev_set_name(&ptp->dev, "ptp%d", ptp->index); 309 310 /* Create a posix clock and link it to the device. */ 311 err = posix_clock_register(&ptp->clock, &ptp->dev); 312 if (err) { 313 if (ptp->pps_source) 314 pps_unregister_source(ptp->pps_source); 315 316 if (ptp->kworker) 317 kthread_destroy_worker(ptp->kworker); 318 319 put_device(&ptp->dev); 320 321 pr_err("failed to create posix clock\n"); 322 return ERR_PTR(err); 323 } 324 325 return ptp; 326 327 no_pps: 328 ptp_cleanup_pin_groups(ptp); 329 no_pin_groups: 330 kfree(ptp->vclock_index); 331 no_mem_for_vclocks: 332 if (ptp->kworker) 333 kthread_destroy_worker(ptp->kworker); 334 kworker_err: 335 mutex_destroy(&ptp->tsevq_mux); 336 mutex_destroy(&ptp->pincfg_mux); 337 mutex_destroy(&ptp->n_vclocks_mux); 338 ida_free(&ptp_clocks_map, index); 339 no_slot: 340 kfree(ptp); 341 no_memory: 342 return ERR_PTR(err); 343 } 344 EXPORT_SYMBOL(ptp_clock_register); 345 346 static int unregister_vclock(struct device *dev, void *data) 347 { 348 struct ptp_clock *ptp = dev_get_drvdata(dev); 349 350 ptp_vclock_unregister(info_to_vclock(ptp->info)); 351 return 0; 352 } 353 354 int ptp_clock_unregister(struct ptp_clock *ptp) 355 { 356 if (ptp_vclock_in_use(ptp)) { 357 device_for_each_child(&ptp->dev, NULL, unregister_vclock); 358 } 359 360 ptp->defunct = 1; 361 wake_up_interruptible(&ptp->tsev_wq); 362 363 if (ptp->kworker) { 364 kthread_cancel_delayed_work_sync(&ptp->aux_work); 365 kthread_destroy_worker(ptp->kworker); 366 } 367 368 /* Release the clock's resources. */ 369 if (ptp->pps_source) 370 pps_unregister_source(ptp->pps_source); 371 372 posix_clock_unregister(&ptp->clock); 373 374 return 0; 375 } 376 EXPORT_SYMBOL(ptp_clock_unregister); 377 378 void ptp_clock_event(struct ptp_clock *ptp, struct ptp_clock_event *event) 379 { 380 struct pps_event_time evt; 381 382 switch (event->type) { 383 384 case PTP_CLOCK_ALARM: 385 break; 386 387 case PTP_CLOCK_EXTTS: 388 enqueue_external_timestamp(&ptp->tsevq, event); 389 wake_up_interruptible(&ptp->tsev_wq); 390 break; 391 392 case PTP_CLOCK_PPS: 393 pps_get_ts(&evt); 394 pps_event(ptp->pps_source, &evt, PTP_PPS_EVENT, NULL); 395 break; 396 397 case PTP_CLOCK_PPSUSR: 398 pps_event(ptp->pps_source, &event->pps_times, 399 PTP_PPS_EVENT, NULL); 400 break; 401 } 402 } 403 EXPORT_SYMBOL(ptp_clock_event); 404 405 int ptp_clock_index(struct ptp_clock *ptp) 406 { 407 return ptp->index; 408 } 409 EXPORT_SYMBOL(ptp_clock_index); 410 411 int ptp_find_pin(struct ptp_clock *ptp, 412 enum ptp_pin_function func, unsigned int chan) 413 { 414 struct ptp_pin_desc *pin = NULL; 415 int i; 416 417 for (i = 0; i < ptp->info->n_pins; i++) { 418 if (ptp->info->pin_config[i].func == func && 419 ptp->info->pin_config[i].chan == chan) { 420 pin = &ptp->info->pin_config[i]; 421 break; 422 } 423 } 424 425 return pin ? i : -1; 426 } 427 EXPORT_SYMBOL(ptp_find_pin); 428 429 int ptp_find_pin_unlocked(struct ptp_clock *ptp, 430 enum ptp_pin_function func, unsigned int chan) 431 { 432 int result; 433 434 mutex_lock(&ptp->pincfg_mux); 435 436 result = ptp_find_pin(ptp, func, chan); 437 438 mutex_unlock(&ptp->pincfg_mux); 439 440 return result; 441 } 442 EXPORT_SYMBOL(ptp_find_pin_unlocked); 443 444 int ptp_schedule_worker(struct ptp_clock *ptp, unsigned long delay) 445 { 446 return kthread_mod_delayed_work(ptp->kworker, &ptp->aux_work, delay); 447 } 448 EXPORT_SYMBOL(ptp_schedule_worker); 449 450 void ptp_cancel_worker_sync(struct ptp_clock *ptp) 451 { 452 kthread_cancel_delayed_work_sync(&ptp->aux_work); 453 } 454 EXPORT_SYMBOL(ptp_cancel_worker_sync); 455 456 /* module operations */ 457 458 static void __exit ptp_exit(void) 459 { 460 class_destroy(ptp_class); 461 unregister_chrdev_region(ptp_devt, MINORMASK + 1); 462 ida_destroy(&ptp_clocks_map); 463 } 464 465 static int __init ptp_init(void) 466 { 467 int err; 468 469 ptp_class = class_create("ptp"); 470 if (IS_ERR(ptp_class)) { 471 pr_err("ptp: failed to allocate class\n"); 472 return PTR_ERR(ptp_class); 473 } 474 475 err = alloc_chrdev_region(&ptp_devt, 0, MINORMASK + 1, "ptp"); 476 if (err < 0) { 477 pr_err("ptp: failed to allocate device region\n"); 478 goto no_region; 479 } 480 481 ptp_class->dev_groups = ptp_groups; 482 pr_info("PTP clock support registered\n"); 483 return 0; 484 485 no_region: 486 class_destroy(ptp_class); 487 return err; 488 } 489 490 subsys_initcall(ptp_init); 491 module_exit(ptp_exit); 492 493 MODULE_AUTHOR("Richard Cochran <richardcochran@gmail.com>"); 494 MODULE_DESCRIPTION("PTP clocks support"); 495 MODULE_LICENSE("GPL"); 496