1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * PTP 1588 clock support 4 * 5 * Copyright (C) 2010 OMICRON electronics GmbH 6 */ 7 #include <linux/idr.h> 8 #include <linux/device.h> 9 #include <linux/err.h> 10 #include <linux/init.h> 11 #include <linux/kernel.h> 12 #include <linux/module.h> 13 #include <linux/posix-clock.h> 14 #include <linux/pps_kernel.h> 15 #include <linux/slab.h> 16 #include <linux/syscalls.h> 17 #include <linux/uaccess.h> 18 #include <uapi/linux/sched/types.h> 19 20 #include "ptp_private.h" 21 22 #define PTP_MAX_ALARMS 4 23 #define PTP_PPS_DEFAULTS (PPS_CAPTUREASSERT | PPS_OFFSETASSERT) 24 #define PTP_PPS_EVENT PPS_CAPTUREASSERT 25 #define PTP_PPS_MODE (PTP_PPS_DEFAULTS | PPS_CANWAIT | PPS_TSFMT_TSPEC) 26 27 struct class *ptp_class; 28 29 /* private globals */ 30 31 static dev_t ptp_devt; 32 33 static DEFINE_IDA(ptp_clocks_map); 34 35 /* time stamp event queue operations */ 36 37 static inline int queue_free(struct timestamp_event_queue *q) 38 { 39 return PTP_MAX_TIMESTAMPS - queue_cnt(q) - 1; 40 } 41 42 static void enqueue_external_timestamp(struct timestamp_event_queue *queue, 43 struct ptp_clock_event *src) 44 { 45 struct ptp_extts_event *dst; 46 unsigned long flags; 47 s64 seconds; 48 u32 remainder; 49 50 seconds = div_u64_rem(src->timestamp, 1000000000, &remainder); 51 52 spin_lock_irqsave(&queue->lock, flags); 53 54 dst = &queue->buf[queue->tail]; 55 dst->index = src->index; 56 dst->t.sec = seconds; 57 dst->t.nsec = remainder; 58 59 if (!queue_free(queue)) 60 queue->head = (queue->head + 1) % PTP_MAX_TIMESTAMPS; 61 62 queue->tail = (queue->tail + 1) % PTP_MAX_TIMESTAMPS; 63 64 spin_unlock_irqrestore(&queue->lock, flags); 65 } 66 67 /* posix clock implementation */ 68 69 static int ptp_clock_getres(struct posix_clock *pc, struct timespec64 *tp) 70 { 71 tp->tv_sec = 0; 72 tp->tv_nsec = 1; 73 return 0; 74 } 75 76 static int ptp_clock_settime(struct posix_clock *pc, const struct timespec64 *tp) 77 { 78 struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock); 79 80 if (ptp_vclock_in_use(ptp)) { 81 pr_err("ptp: virtual clock in use\n"); 82 return -EBUSY; 83 } 84 85 return ptp->info->settime64(ptp->info, tp); 86 } 87 88 static int ptp_clock_gettime(struct posix_clock *pc, struct timespec64 *tp) 89 { 90 struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock); 91 int err; 92 93 if (ptp->info->gettimex64) 94 err = ptp->info->gettimex64(ptp->info, tp, NULL); 95 else 96 err = ptp->info->gettime64(ptp->info, tp); 97 return err; 98 } 99 100 static int ptp_clock_adjtime(struct posix_clock *pc, struct __kernel_timex *tx) 101 { 102 struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock); 103 struct ptp_clock_info *ops; 104 int err = -EOPNOTSUPP; 105 106 if (ptp_vclock_in_use(ptp)) { 107 pr_err("ptp: virtual clock in use\n"); 108 return -EBUSY; 109 } 110 111 ops = ptp->info; 112 113 if (tx->modes & ADJ_SETOFFSET) { 114 struct timespec64 ts; 115 ktime_t kt; 116 s64 delta; 117 118 ts.tv_sec = tx->time.tv_sec; 119 ts.tv_nsec = tx->time.tv_usec; 120 121 if (!(tx->modes & ADJ_NANO)) 122 ts.tv_nsec *= 1000; 123 124 if ((unsigned long) ts.tv_nsec >= NSEC_PER_SEC) 125 return -EINVAL; 126 127 kt = timespec64_to_ktime(ts); 128 delta = ktime_to_ns(kt); 129 err = ops->adjtime(ops, delta); 130 } else if (tx->modes & ADJ_FREQUENCY) { 131 long ppb = scaled_ppm_to_ppb(tx->freq); 132 if (ppb > ops->max_adj || ppb < -ops->max_adj) 133 return -ERANGE; 134 if (ops->adjfine) 135 err = ops->adjfine(ops, tx->freq); 136 else 137 err = ops->adjfreq(ops, ppb); 138 ptp->dialed_frequency = tx->freq; 139 } else if (tx->modes & ADJ_OFFSET) { 140 if (ops->adjphase) { 141 s32 offset = tx->offset; 142 143 if (!(tx->modes & ADJ_NANO)) 144 offset *= NSEC_PER_USEC; 145 146 err = ops->adjphase(ops, offset); 147 } 148 } else if (tx->modes == 0) { 149 tx->freq = ptp->dialed_frequency; 150 err = 0; 151 } 152 153 return err; 154 } 155 156 static struct posix_clock_operations ptp_clock_ops = { 157 .owner = THIS_MODULE, 158 .clock_adjtime = ptp_clock_adjtime, 159 .clock_gettime = ptp_clock_gettime, 160 .clock_getres = ptp_clock_getres, 161 .clock_settime = ptp_clock_settime, 162 .ioctl = ptp_ioctl, 163 .open = ptp_open, 164 .poll = ptp_poll, 165 .read = ptp_read, 166 }; 167 168 static void ptp_clock_release(struct device *dev) 169 { 170 struct ptp_clock *ptp = container_of(dev, struct ptp_clock, dev); 171 172 ptp_cleanup_pin_groups(ptp); 173 mutex_destroy(&ptp->tsevq_mux); 174 mutex_destroy(&ptp->pincfg_mux); 175 mutex_destroy(&ptp->n_vclocks_mux); 176 ida_simple_remove(&ptp_clocks_map, ptp->index); 177 kfree(ptp); 178 } 179 180 static void ptp_aux_kworker(struct kthread_work *work) 181 { 182 struct ptp_clock *ptp = container_of(work, struct ptp_clock, 183 aux_work.work); 184 struct ptp_clock_info *info = ptp->info; 185 long delay; 186 187 delay = info->do_aux_work(info); 188 189 if (delay >= 0) 190 kthread_queue_delayed_work(ptp->kworker, &ptp->aux_work, delay); 191 } 192 193 /* public interface */ 194 195 struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info, 196 struct device *parent) 197 { 198 struct ptp_clock *ptp; 199 int err = 0, index, major = MAJOR(ptp_devt); 200 size_t size; 201 202 if (info->n_alarm > PTP_MAX_ALARMS) 203 return ERR_PTR(-EINVAL); 204 205 /* Initialize a clock structure. */ 206 err = -ENOMEM; 207 ptp = kzalloc(sizeof(struct ptp_clock), GFP_KERNEL); 208 if (ptp == NULL) 209 goto no_memory; 210 211 index = ida_simple_get(&ptp_clocks_map, 0, MINORMASK + 1, GFP_KERNEL); 212 if (index < 0) { 213 err = index; 214 goto no_slot; 215 } 216 217 ptp->clock.ops = ptp_clock_ops; 218 ptp->info = info; 219 ptp->devid = MKDEV(major, index); 220 ptp->index = index; 221 spin_lock_init(&ptp->tsevq.lock); 222 mutex_init(&ptp->tsevq_mux); 223 mutex_init(&ptp->pincfg_mux); 224 mutex_init(&ptp->n_vclocks_mux); 225 init_waitqueue_head(&ptp->tsev_wq); 226 227 if (ptp->info->do_aux_work) { 228 kthread_init_delayed_work(&ptp->aux_work, ptp_aux_kworker); 229 ptp->kworker = kthread_create_worker(0, "ptp%d", ptp->index); 230 if (IS_ERR(ptp->kworker)) { 231 err = PTR_ERR(ptp->kworker); 232 pr_err("failed to create ptp aux_worker %d\n", err); 233 goto kworker_err; 234 } 235 } 236 237 /* PTP virtual clock is being registered under physical clock */ 238 if (parent && parent->class && parent->class->name && 239 strcmp(parent->class->name, "ptp") == 0) 240 ptp->is_virtual_clock = true; 241 242 if (!ptp->is_virtual_clock) { 243 ptp->max_vclocks = PTP_DEFAULT_MAX_VCLOCKS; 244 245 size = sizeof(int) * ptp->max_vclocks; 246 ptp->vclock_index = kzalloc(size, GFP_KERNEL); 247 if (!ptp->vclock_index) { 248 err = -ENOMEM; 249 goto no_mem_for_vclocks; 250 } 251 } 252 253 err = ptp_populate_pin_groups(ptp); 254 if (err) 255 goto no_pin_groups; 256 257 /* Register a new PPS source. */ 258 if (info->pps) { 259 struct pps_source_info pps; 260 memset(&pps, 0, sizeof(pps)); 261 snprintf(pps.name, PPS_MAX_NAME_LEN, "ptp%d", index); 262 pps.mode = PTP_PPS_MODE; 263 pps.owner = info->owner; 264 ptp->pps_source = pps_register_source(&pps, PTP_PPS_DEFAULTS); 265 if (IS_ERR(ptp->pps_source)) { 266 err = PTR_ERR(ptp->pps_source); 267 pr_err("failed to register pps source\n"); 268 goto no_pps; 269 } 270 ptp->pps_source->lookup_cookie = ptp; 271 } 272 273 /* Initialize a new device of our class in our clock structure. */ 274 device_initialize(&ptp->dev); 275 ptp->dev.devt = ptp->devid; 276 ptp->dev.class = ptp_class; 277 ptp->dev.parent = parent; 278 ptp->dev.groups = ptp->pin_attr_groups; 279 ptp->dev.release = ptp_clock_release; 280 dev_set_drvdata(&ptp->dev, ptp); 281 dev_set_name(&ptp->dev, "ptp%d", ptp->index); 282 283 /* Create a posix clock and link it to the device. */ 284 err = posix_clock_register(&ptp->clock, &ptp->dev); 285 if (err) { 286 pr_err("failed to create posix clock\n"); 287 goto no_clock; 288 } 289 290 return ptp; 291 292 no_clock: 293 if (ptp->pps_source) 294 pps_unregister_source(ptp->pps_source); 295 no_pps: 296 ptp_cleanup_pin_groups(ptp); 297 no_pin_groups: 298 kfree(ptp->vclock_index); 299 no_mem_for_vclocks: 300 if (ptp->kworker) 301 kthread_destroy_worker(ptp->kworker); 302 kworker_err: 303 mutex_destroy(&ptp->tsevq_mux); 304 mutex_destroy(&ptp->pincfg_mux); 305 mutex_destroy(&ptp->n_vclocks_mux); 306 ida_simple_remove(&ptp_clocks_map, index); 307 no_slot: 308 kfree(ptp); 309 no_memory: 310 return ERR_PTR(err); 311 } 312 EXPORT_SYMBOL(ptp_clock_register); 313 314 int ptp_clock_unregister(struct ptp_clock *ptp) 315 { 316 if (ptp_vclock_in_use(ptp)) { 317 pr_err("ptp: virtual clock in use\n"); 318 return -EBUSY; 319 } 320 321 ptp->defunct = 1; 322 wake_up_interruptible(&ptp->tsev_wq); 323 324 kfree(ptp->vclock_index); 325 326 if (ptp->kworker) { 327 kthread_cancel_delayed_work_sync(&ptp->aux_work); 328 kthread_destroy_worker(ptp->kworker); 329 } 330 331 /* Release the clock's resources. */ 332 if (ptp->pps_source) 333 pps_unregister_source(ptp->pps_source); 334 335 posix_clock_unregister(&ptp->clock); 336 337 return 0; 338 } 339 EXPORT_SYMBOL(ptp_clock_unregister); 340 341 void ptp_clock_event(struct ptp_clock *ptp, struct ptp_clock_event *event) 342 { 343 struct pps_event_time evt; 344 345 switch (event->type) { 346 347 case PTP_CLOCK_ALARM: 348 break; 349 350 case PTP_CLOCK_EXTTS: 351 enqueue_external_timestamp(&ptp->tsevq, event); 352 wake_up_interruptible(&ptp->tsev_wq); 353 break; 354 355 case PTP_CLOCK_PPS: 356 pps_get_ts(&evt); 357 pps_event(ptp->pps_source, &evt, PTP_PPS_EVENT, NULL); 358 break; 359 360 case PTP_CLOCK_PPSUSR: 361 pps_event(ptp->pps_source, &event->pps_times, 362 PTP_PPS_EVENT, NULL); 363 break; 364 } 365 } 366 EXPORT_SYMBOL(ptp_clock_event); 367 368 int ptp_clock_index(struct ptp_clock *ptp) 369 { 370 return ptp->index; 371 } 372 EXPORT_SYMBOL(ptp_clock_index); 373 374 int ptp_find_pin(struct ptp_clock *ptp, 375 enum ptp_pin_function func, unsigned int chan) 376 { 377 struct ptp_pin_desc *pin = NULL; 378 int i; 379 380 for (i = 0; i < ptp->info->n_pins; i++) { 381 if (ptp->info->pin_config[i].func == func && 382 ptp->info->pin_config[i].chan == chan) { 383 pin = &ptp->info->pin_config[i]; 384 break; 385 } 386 } 387 388 return pin ? i : -1; 389 } 390 EXPORT_SYMBOL(ptp_find_pin); 391 392 int ptp_find_pin_unlocked(struct ptp_clock *ptp, 393 enum ptp_pin_function func, unsigned int chan) 394 { 395 int result; 396 397 mutex_lock(&ptp->pincfg_mux); 398 399 result = ptp_find_pin(ptp, func, chan); 400 401 mutex_unlock(&ptp->pincfg_mux); 402 403 return result; 404 } 405 EXPORT_SYMBOL(ptp_find_pin_unlocked); 406 407 int ptp_schedule_worker(struct ptp_clock *ptp, unsigned long delay) 408 { 409 return kthread_mod_delayed_work(ptp->kworker, &ptp->aux_work, delay); 410 } 411 EXPORT_SYMBOL(ptp_schedule_worker); 412 413 void ptp_cancel_worker_sync(struct ptp_clock *ptp) 414 { 415 kthread_cancel_delayed_work_sync(&ptp->aux_work); 416 } 417 EXPORT_SYMBOL(ptp_cancel_worker_sync); 418 419 /* module operations */ 420 421 static void __exit ptp_exit(void) 422 { 423 class_destroy(ptp_class); 424 unregister_chrdev_region(ptp_devt, MINORMASK + 1); 425 ida_destroy(&ptp_clocks_map); 426 } 427 428 static int __init ptp_init(void) 429 { 430 int err; 431 432 ptp_class = class_create(THIS_MODULE, "ptp"); 433 if (IS_ERR(ptp_class)) { 434 pr_err("ptp: failed to allocate class\n"); 435 return PTR_ERR(ptp_class); 436 } 437 438 err = alloc_chrdev_region(&ptp_devt, 0, MINORMASK + 1, "ptp"); 439 if (err < 0) { 440 pr_err("ptp: failed to allocate device region\n"); 441 goto no_region; 442 } 443 444 ptp_class->dev_groups = ptp_groups; 445 pr_info("PTP clock support registered\n"); 446 return 0; 447 448 no_region: 449 class_destroy(ptp_class); 450 return err; 451 } 452 453 subsys_initcall(ptp_init); 454 module_exit(ptp_exit); 455 456 MODULE_AUTHOR("Richard Cochran <richardcochran@gmail.com>"); 457 MODULE_DESCRIPTION("PTP clocks support"); 458 MODULE_LICENSE("GPL"); 459