2019-05-20 15:19:02 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
2011-04-22 18:03:08 +08:00
|
|
|
/*
|
|
|
|
* PTP 1588 clock support
|
|
|
|
*
|
|
|
|
* Copyright (C) 2010 OMICRON electronics GmbH
|
|
|
|
*/
|
|
|
|
#include <linux/device.h>
|
|
|
|
#include <linux/err.h>
|
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/posix-clock.h>
|
|
|
|
#include <linux/pps_kernel.h>
|
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/syscalls.h>
|
|
|
|
#include <linux/uaccess.h>
|
2023-10-12 06:39:57 +08:00
|
|
|
#include <linux/debugfs.h>
|
2024-03-11 22:47:29 +08:00
|
|
|
#include <linux/xarray.h>
|
ptp: introduce ptp auxiliary worker
Many PTP drivers required to perform some asynchronous or periodic work,
like periodically handling PHC counter overflow or handle delayed timestamp
for RX/TX network packets. In most of the cases, such work is implemented
using workqueues. Unfortunately, Kernel workqueues might introduce
significant delay in work scheduling under high system load and on -RT,
which could cause misbehavior of PTP drivers due to internal counter
overflow, for example, and there is no way to tune its execution policy and
priority manuallly.
Hence, The kthread_worker can be used insted of workqueues, as it create
separte named kthread for each worker and its its execution policy and
priority can be configured using chrt tool.
This prblem was reported for two drivers TI CPSW CPTS and dp83640, so
instead of modifying each of these driver it was proposed to add PTP
auxiliary worker to the PHC subsystem.
The patch adds PTP auxiliary worker in PHC subsystem using kthread_worker
and kthread_delayed_work and introduces two new PHC subsystem APIs:
- long (*do_aux_work)(struct ptp_clock_info *ptp) callback in
ptp_clock_info structure, which driver should assign if it require to
perform asynchronous or periodic work. Driver should return the delay of
the PTP next auxiliary work scheduling time (>=0) or negative value in case
further scheduling is not required.
- int ptp_schedule_worker(struct ptp_clock *ptp, unsigned long delay) which
allows schedule PTP auxiliary work.
The name of kthread_worker thread corresponds PTP PHC device name "ptp%d".
Signed-off-by: Grygorii Strashko <grygorii.strashko@ti.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-07-29 06:30:02 +08:00
|
|
|
#include <uapi/linux/sched/types.h>
|
2011-04-22 18:03:08 +08:00
|
|
|
|
|
|
|
#include "ptp_private.h"
|
|
|
|
|
|
|
|
#define PTP_MAX_ALARMS 4
|
|
|
|
#define PTP_PPS_DEFAULTS (PPS_CAPTUREASSERT | PPS_OFFSETASSERT)
|
|
|
|
#define PTP_PPS_EVENT PPS_CAPTUREASSERT
|
|
|
|
#define PTP_PPS_MODE (PTP_PPS_DEFAULTS | PPS_CANWAIT | PPS_TSFMT_TSPEC)
|
|
|
|
|
2024-03-06 04:11:27 +08:00
|
|
|
const struct class ptp_class = {
|
|
|
|
.name = "ptp",
|
|
|
|
.dev_groups = ptp_groups
|
|
|
|
};
|
2021-06-30 16:11:55 +08:00
|
|
|
|
2011-04-22 18:03:08 +08:00
|
|
|
/* private globals */
|
|
|
|
|
|
|
|
static dev_t ptp_devt;
|
|
|
|
|
2024-03-11 22:47:29 +08:00
|
|
|
static DEFINE_XARRAY_ALLOC(ptp_clocks_map);
|
2011-04-22 18:03:08 +08:00
|
|
|
|
|
|
|
/* time stamp event queue operations */
|
|
|
|
|
|
|
|
static inline int queue_free(struct timestamp_event_queue *q)
|
|
|
|
{
|
|
|
|
return PTP_MAX_TIMESTAMPS - queue_cnt(q) - 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void enqueue_external_timestamp(struct timestamp_event_queue *queue,
|
|
|
|
struct ptp_clock_event *src)
|
|
|
|
{
|
|
|
|
struct ptp_extts_event *dst;
|
2024-01-25 02:49:46 +08:00
|
|
|
struct timespec64 offset_ts;
|
2011-04-22 18:03:08 +08:00
|
|
|
unsigned long flags;
|
|
|
|
s64 seconds;
|
|
|
|
u32 remainder;
|
|
|
|
|
2024-01-25 02:49:46 +08:00
|
|
|
if (src->type == PTP_CLOCK_EXTTS) {
|
|
|
|
seconds = div_u64_rem(src->timestamp, 1000000000, &remainder);
|
|
|
|
} else if (src->type == PTP_CLOCK_EXTOFF) {
|
|
|
|
offset_ts = ns_to_timespec64(src->offset);
|
|
|
|
seconds = offset_ts.tv_sec;
|
|
|
|
remainder = offset_ts.tv_nsec;
|
|
|
|
} else {
|
|
|
|
WARN(1, "%s: unknown type %d\n", __func__, src->type);
|
|
|
|
return;
|
|
|
|
}
|
2011-04-22 18:03:08 +08:00
|
|
|
|
|
|
|
spin_lock_irqsave(&queue->lock, flags);
|
|
|
|
|
|
|
|
dst = &queue->buf[queue->tail];
|
|
|
|
dst->index = src->index;
|
2024-01-25 02:49:46 +08:00
|
|
|
dst->flags = PTP_EXTTS_EVENT_VALID;
|
2011-04-22 18:03:08 +08:00
|
|
|
dst->t.sec = seconds;
|
|
|
|
dst->t.nsec = remainder;
|
2024-01-25 02:49:46 +08:00
|
|
|
if (src->type == PTP_CLOCK_EXTOFF)
|
|
|
|
dst->flags |= PTP_EXT_OFFSET;
|
2011-04-22 18:03:08 +08:00
|
|
|
|
2023-11-10 01:48:59 +08:00
|
|
|
/* Both WRITE_ONCE() are paired with READ_ONCE() in queue_cnt() */
|
2011-04-22 18:03:08 +08:00
|
|
|
if (!queue_free(queue))
|
2023-11-10 01:48:59 +08:00
|
|
|
WRITE_ONCE(queue->head, (queue->head + 1) % PTP_MAX_TIMESTAMPS);
|
2011-04-22 18:03:08 +08:00
|
|
|
|
2023-11-10 01:48:59 +08:00
|
|
|
WRITE_ONCE(queue->tail, (queue->tail + 1) % PTP_MAX_TIMESTAMPS);
|
2011-04-22 18:03:08 +08:00
|
|
|
|
|
|
|
spin_unlock_irqrestore(&queue->lock, flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* posix clock implementation */
|
|
|
|
|
2017-03-27 03:04:13 +08:00
|
|
|
static int ptp_clock_getres(struct posix_clock *pc, struct timespec64 *tp)
|
2011-04-22 18:03:08 +08:00
|
|
|
{
|
2011-12-06 04:16:06 +08:00
|
|
|
tp->tv_sec = 0;
|
|
|
|
tp->tv_nsec = 1;
|
|
|
|
return 0;
|
2011-04-22 18:03:08 +08:00
|
|
|
}
|
|
|
|
|
2017-03-27 03:04:13 +08:00
|
|
|
static int ptp_clock_settime(struct posix_clock *pc, const struct timespec64 *tp)
|
2011-04-22 18:03:08 +08:00
|
|
|
{
|
|
|
|
struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock);
|
2015-03-30 05:11:53 +08:00
|
|
|
|
2022-05-07 04:01:37 +08:00
|
|
|
if (ptp_clock_freerun(ptp)) {
|
|
|
|
pr_err("ptp: physical clock is free running\n");
|
2021-06-30 16:11:53 +08:00
|
|
|
return -EBUSY;
|
|
|
|
}
|
|
|
|
|
2017-03-27 03:04:13 +08:00
|
|
|
return ptp->info->settime64(ptp->info, tp);
|
2011-04-22 18:03:08 +08:00
|
|
|
}
|
|
|
|
|
2017-03-27 03:04:13 +08:00
|
|
|
static int ptp_clock_gettime(struct posix_clock *pc, struct timespec64 *tp)
|
2011-04-22 18:03:08 +08:00
|
|
|
{
|
|
|
|
struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock);
|
2015-03-30 05:11:53 +08:00
|
|
|
int err;
|
|
|
|
|
2018-11-09 18:14:45 +08:00
|
|
|
if (ptp->info->gettimex64)
|
|
|
|
err = ptp->info->gettimex64(ptp->info, tp, NULL);
|
|
|
|
else
|
|
|
|
err = ptp->info->gettime64(ptp->info, tp);
|
2015-03-30 05:11:53 +08:00
|
|
|
return err;
|
2011-04-22 18:03:08 +08:00
|
|
|
}
|
|
|
|
|
2018-07-03 13:44:21 +08:00
|
|
|
static int ptp_clock_adjtime(struct posix_clock *pc, struct __kernel_timex *tx)
|
2011-04-22 18:03:08 +08:00
|
|
|
{
|
|
|
|
struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock);
|
|
|
|
struct ptp_clock_info *ops;
|
|
|
|
int err = -EOPNOTSUPP;
|
|
|
|
|
2022-05-07 04:01:37 +08:00
|
|
|
if (ptp_clock_freerun(ptp)) {
|
|
|
|
pr_err("ptp: physical clock is free running\n");
|
2021-06-30 16:11:53 +08:00
|
|
|
return -EBUSY;
|
|
|
|
}
|
|
|
|
|
2011-04-22 18:03:08 +08:00
|
|
|
ops = ptp->info;
|
|
|
|
|
|
|
|
if (tx->modes & ADJ_SETOFFSET) {
|
2017-03-27 03:04:13 +08:00
|
|
|
struct timespec64 ts;
|
2011-04-22 18:03:08 +08:00
|
|
|
ktime_t kt;
|
|
|
|
s64 delta;
|
|
|
|
|
|
|
|
ts.tv_sec = tx->time.tv_sec;
|
|
|
|
ts.tv_nsec = tx->time.tv_usec;
|
|
|
|
|
|
|
|
if (!(tx->modes & ADJ_NANO))
|
|
|
|
ts.tv_nsec *= 1000;
|
|
|
|
|
|
|
|
if ((unsigned long) ts.tv_nsec >= NSEC_PER_SEC)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2017-03-27 03:04:13 +08:00
|
|
|
kt = timespec64_to_ktime(ts);
|
2011-04-22 18:03:08 +08:00
|
|
|
delta = ktime_to_ns(kt);
|
|
|
|
err = ops->adjtime(ops, delta);
|
|
|
|
} else if (tx->modes & ADJ_FREQUENCY) {
|
2021-06-15 06:24:05 +08:00
|
|
|
long ppb = scaled_ppm_to_ppb(tx->freq);
|
2014-04-27 21:01:27 +08:00
|
|
|
if (ppb > ops->max_adj || ppb < -ops->max_adj)
|
|
|
|
return -ERANGE;
|
2022-11-10 07:09:45 +08:00
|
|
|
err = ops->adjfine(ops, tx->freq);
|
2012-09-22 15:02:01 +08:00
|
|
|
ptp->dialed_frequency = tx->freq;
|
2020-05-02 11:35:36 +08:00
|
|
|
} else if (tx->modes & ADJ_OFFSET) {
|
2020-05-25 02:27:10 +08:00
|
|
|
if (ops->adjphase) {
|
2023-06-13 05:14:56 +08:00
|
|
|
s32 max_phase_adj = ops->getmaxphase(ops);
|
2020-05-25 02:27:10 +08:00
|
|
|
s32 offset = tx->offset;
|
|
|
|
|
|
|
|
if (!(tx->modes & ADJ_NANO))
|
|
|
|
offset *= NSEC_PER_USEC;
|
|
|
|
|
2023-06-13 05:14:56 +08:00
|
|
|
if (offset > max_phase_adj || offset < -max_phase_adj)
|
|
|
|
return -ERANGE;
|
|
|
|
|
2020-05-25 02:27:10 +08:00
|
|
|
err = ops->adjphase(ops, offset);
|
|
|
|
}
|
2012-09-22 15:02:02 +08:00
|
|
|
} else if (tx->modes == 0) {
|
|
|
|
tx->freq = ptp->dialed_frequency;
|
|
|
|
err = 0;
|
2011-04-22 18:03:08 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct posix_clock_operations ptp_clock_ops = {
|
|
|
|
.owner = THIS_MODULE,
|
|
|
|
.clock_adjtime = ptp_clock_adjtime,
|
|
|
|
.clock_gettime = ptp_clock_gettime,
|
|
|
|
.clock_getres = ptp_clock_getres,
|
|
|
|
.clock_settime = ptp_clock_settime,
|
|
|
|
.ioctl = ptp_ioctl,
|
|
|
|
.open = ptp_open,
|
2023-10-12 06:39:55 +08:00
|
|
|
.release = ptp_release,
|
2011-04-22 18:03:08 +08:00
|
|
|
.poll = ptp_poll,
|
|
|
|
.read = ptp_read,
|
|
|
|
};
|
|
|
|
|
2019-12-27 10:26:27 +08:00
|
|
|
static void ptp_clock_release(struct device *dev)
|
2011-04-22 18:03:08 +08:00
|
|
|
{
|
2019-12-27 10:26:27 +08:00
|
|
|
struct ptp_clock *ptp = container_of(dev, struct ptp_clock, dev);
|
2023-10-12 06:39:54 +08:00
|
|
|
struct timestamp_event_queue *tsevq;
|
|
|
|
unsigned long flags;
|
2011-04-22 18:03:08 +08:00
|
|
|
|
2020-01-13 21:00:09 +08:00
|
|
|
ptp_cleanup_pin_groups(ptp);
|
2021-10-21 17:13:53 +08:00
|
|
|
kfree(ptp->vclock_index);
|
2014-03-21 05:21:52 +08:00
|
|
|
mutex_destroy(&ptp->pincfg_mux);
|
2021-06-30 16:11:53 +08:00
|
|
|
mutex_destroy(&ptp->n_vclocks_mux);
|
2023-10-12 06:39:54 +08:00
|
|
|
/* Delete first entry */
|
2023-11-07 16:00:41 +08:00
|
|
|
spin_lock_irqsave(&ptp->tsevqs_lock, flags);
|
2023-10-12 06:39:54 +08:00
|
|
|
tsevq = list_first_entry(&ptp->tsevqs, struct timestamp_event_queue,
|
|
|
|
qlist);
|
|
|
|
list_del(&tsevq->qlist);
|
2023-11-07 16:00:41 +08:00
|
|
|
spin_unlock_irqrestore(&ptp->tsevqs_lock, flags);
|
2023-10-12 06:39:56 +08:00
|
|
|
bitmap_free(tsevq->mask);
|
2023-10-12 06:39:54 +08:00
|
|
|
kfree(tsevq);
|
2023-10-12 06:39:57 +08:00
|
|
|
debugfs_remove(ptp->debugfs_root);
|
2024-03-11 22:47:29 +08:00
|
|
|
xa_erase(&ptp_clocks_map, ptp->index);
|
2011-04-22 18:03:08 +08:00
|
|
|
kfree(ptp);
|
|
|
|
}
|
|
|
|
|
2022-05-07 04:01:37 +08:00
|
|
|
static int ptp_getcycles64(struct ptp_clock_info *info, struct timespec64 *ts)
|
|
|
|
{
|
|
|
|
if (info->getcyclesx64)
|
|
|
|
return info->getcyclesx64(info, ts, NULL);
|
|
|
|
else
|
|
|
|
return info->gettime64(info, ts);
|
|
|
|
}
|
|
|
|
|
ptp: introduce ptp auxiliary worker
Many PTP drivers required to perform some asynchronous or periodic work,
like periodically handling PHC counter overflow or handle delayed timestamp
for RX/TX network packets. In most of the cases, such work is implemented
using workqueues. Unfortunately, Kernel workqueues might introduce
significant delay in work scheduling under high system load and on -RT,
which could cause misbehavior of PTP drivers due to internal counter
overflow, for example, and there is no way to tune its execution policy and
priority manuallly.
Hence, The kthread_worker can be used insted of workqueues, as it create
separte named kthread for each worker and its its execution policy and
priority can be configured using chrt tool.
This prblem was reported for two drivers TI CPSW CPTS and dp83640, so
instead of modifying each of these driver it was proposed to add PTP
auxiliary worker to the PHC subsystem.
The patch adds PTP auxiliary worker in PHC subsystem using kthread_worker
and kthread_delayed_work and introduces two new PHC subsystem APIs:
- long (*do_aux_work)(struct ptp_clock_info *ptp) callback in
ptp_clock_info structure, which driver should assign if it require to
perform asynchronous or periodic work. Driver should return the delay of
the PTP next auxiliary work scheduling time (>=0) or negative value in case
further scheduling is not required.
- int ptp_schedule_worker(struct ptp_clock *ptp, unsigned long delay) which
allows schedule PTP auxiliary work.
The name of kthread_worker thread corresponds PTP PHC device name "ptp%d".
Signed-off-by: Grygorii Strashko <grygorii.strashko@ti.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-07-29 06:30:02 +08:00
|
|
|
static void ptp_aux_kworker(struct kthread_work *work)
|
|
|
|
{
|
|
|
|
struct ptp_clock *ptp = container_of(work, struct ptp_clock,
|
|
|
|
aux_work.work);
|
|
|
|
struct ptp_clock_info *info = ptp->info;
|
|
|
|
long delay;
|
|
|
|
|
|
|
|
delay = info->do_aux_work(info);
|
|
|
|
|
|
|
|
if (delay >= 0)
|
|
|
|
kthread_queue_delayed_work(ptp->kworker, &ptp->aux_work, delay);
|
|
|
|
}
|
|
|
|
|
2011-04-22 18:03:08 +08:00
|
|
|
/* public interface */
|
|
|
|
|
2012-09-22 15:02:03 +08:00
|
|
|
struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
|
|
|
|
struct device *parent)
|
2011-04-22 18:03:08 +08:00
|
|
|
{
|
|
|
|
struct ptp_clock *ptp;
|
2023-10-12 06:39:54 +08:00
|
|
|
struct timestamp_event_queue *queue = NULL;
|
2024-03-11 22:47:29 +08:00
|
|
|
int err, index, major = MAJOR(ptp_devt);
|
2023-10-18 22:20:11 +08:00
|
|
|
char debugfsname[16];
|
2021-06-30 16:11:54 +08:00
|
|
|
size_t size;
|
2011-04-22 18:03:08 +08:00
|
|
|
|
|
|
|
if (info->n_alarm > PTP_MAX_ALARMS)
|
|
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
|
|
|
|
/* Initialize a clock structure. */
|
|
|
|
ptp = kzalloc(sizeof(struct ptp_clock), GFP_KERNEL);
|
2024-03-11 22:47:29 +08:00
|
|
|
if (!ptp) {
|
|
|
|
err = -ENOMEM;
|
2011-04-22 18:03:08 +08:00
|
|
|
goto no_memory;
|
2024-03-11 22:47:29 +08:00
|
|
|
}
|
2011-04-22 18:03:08 +08:00
|
|
|
|
2024-03-11 22:47:29 +08:00
|
|
|
err = xa_alloc(&ptp_clocks_map, &index, ptp, xa_limit_31b,
|
|
|
|
GFP_KERNEL);
|
|
|
|
if (err)
|
2013-04-12 08:56:15 +08:00
|
|
|
goto no_slot;
|
|
|
|
|
2011-04-22 18:03:08 +08:00
|
|
|
ptp->clock.ops = ptp_clock_ops;
|
|
|
|
ptp->info = info;
|
|
|
|
ptp->devid = MKDEV(major, index);
|
|
|
|
ptp->index = index;
|
2023-10-12 06:39:54 +08:00
|
|
|
INIT_LIST_HEAD(&ptp->tsevqs);
|
|
|
|
queue = kzalloc(sizeof(*queue), GFP_KERNEL);
|
2024-03-11 22:47:29 +08:00
|
|
|
if (!queue) {
|
|
|
|
err = -ENOMEM;
|
2023-10-12 06:39:54 +08:00
|
|
|
goto no_memory_queue;
|
2024-03-11 22:47:29 +08:00
|
|
|
}
|
2023-10-12 06:39:54 +08:00
|
|
|
list_add_tail(&queue->qlist, &ptp->tsevqs);
|
2023-11-07 16:00:41 +08:00
|
|
|
spin_lock_init(&ptp->tsevqs_lock);
|
2023-10-12 06:39:56 +08:00
|
|
|
queue->mask = bitmap_alloc(PTP_MAX_CHANNELS, GFP_KERNEL);
|
2024-03-11 22:47:29 +08:00
|
|
|
if (!queue->mask) {
|
|
|
|
err = -ENOMEM;
|
2023-10-12 06:39:56 +08:00
|
|
|
goto no_memory_bitmap;
|
2024-03-11 22:47:29 +08:00
|
|
|
}
|
2023-10-12 06:39:56 +08:00
|
|
|
bitmap_set(queue->mask, 0, PTP_MAX_CHANNELS);
|
2023-10-12 06:39:55 +08:00
|
|
|
spin_lock_init(&queue->lock);
|
2014-03-21 05:21:52 +08:00
|
|
|
mutex_init(&ptp->pincfg_mux);
|
2021-06-30 16:11:53 +08:00
|
|
|
mutex_init(&ptp->n_vclocks_mux);
|
2011-04-22 18:03:08 +08:00
|
|
|
init_waitqueue_head(&ptp->tsev_wq);
|
|
|
|
|
2022-05-07 04:01:37 +08:00
|
|
|
if (ptp->info->getcycles64 || ptp->info->getcyclesx64) {
|
|
|
|
ptp->has_cycles = true;
|
|
|
|
if (!ptp->info->getcycles64 && ptp->info->getcyclesx64)
|
|
|
|
ptp->info->getcycles64 = ptp_getcycles64;
|
|
|
|
} else {
|
|
|
|
/* Free running cycle counter not supported, use time. */
|
|
|
|
ptp->info->getcycles64 = ptp_getcycles64;
|
|
|
|
|
|
|
|
if (ptp->info->gettimex64)
|
|
|
|
ptp->info->getcyclesx64 = ptp->info->gettimex64;
|
|
|
|
|
|
|
|
if (ptp->info->getcrosststamp)
|
|
|
|
ptp->info->getcrosscycles = ptp->info->getcrosststamp;
|
|
|
|
}
|
|
|
|
|
ptp: introduce ptp auxiliary worker
Many PTP drivers required to perform some asynchronous or periodic work,
like periodically handling PHC counter overflow or handle delayed timestamp
for RX/TX network packets. In most of the cases, such work is implemented
using workqueues. Unfortunately, Kernel workqueues might introduce
significant delay in work scheduling under high system load and on -RT,
which could cause misbehavior of PTP drivers due to internal counter
overflow, for example, and there is no way to tune its execution policy and
priority manuallly.
Hence, The kthread_worker can be used insted of workqueues, as it create
separte named kthread for each worker and its its execution policy and
priority can be configured using chrt tool.
This prblem was reported for two drivers TI CPSW CPTS and dp83640, so
instead of modifying each of these driver it was proposed to add PTP
auxiliary worker to the PHC subsystem.
The patch adds PTP auxiliary worker in PHC subsystem using kthread_worker
and kthread_delayed_work and introduces two new PHC subsystem APIs:
- long (*do_aux_work)(struct ptp_clock_info *ptp) callback in
ptp_clock_info structure, which driver should assign if it require to
perform asynchronous or periodic work. Driver should return the delay of
the PTP next auxiliary work scheduling time (>=0) or negative value in case
further scheduling is not required.
- int ptp_schedule_worker(struct ptp_clock *ptp, unsigned long delay) which
allows schedule PTP auxiliary work.
The name of kthread_worker thread corresponds PTP PHC device name "ptp%d".
Signed-off-by: Grygorii Strashko <grygorii.strashko@ti.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-07-29 06:30:02 +08:00
|
|
|
if (ptp->info->do_aux_work) {
|
|
|
|
kthread_init_delayed_work(&ptp->aux_work, ptp_aux_kworker);
|
2018-10-27 05:22:59 +08:00
|
|
|
ptp->kworker = kthread_create_worker(0, "ptp%d", ptp->index);
|
ptp: introduce ptp auxiliary worker
Many PTP drivers required to perform some asynchronous or periodic work,
like periodically handling PHC counter overflow or handle delayed timestamp
for RX/TX network packets. In most of the cases, such work is implemented
using workqueues. Unfortunately, Kernel workqueues might introduce
significant delay in work scheduling under high system load and on -RT,
which could cause misbehavior of PTP drivers due to internal counter
overflow, for example, and there is no way to tune its execution policy and
priority manuallly.
Hence, The kthread_worker can be used insted of workqueues, as it create
separte named kthread for each worker and its its execution policy and
priority can be configured using chrt tool.
This prblem was reported for two drivers TI CPSW CPTS and dp83640, so
instead of modifying each of these driver it was proposed to add PTP
auxiliary worker to the PHC subsystem.
The patch adds PTP auxiliary worker in PHC subsystem using kthread_worker
and kthread_delayed_work and introduces two new PHC subsystem APIs:
- long (*do_aux_work)(struct ptp_clock_info *ptp) callback in
ptp_clock_info structure, which driver should assign if it require to
perform asynchronous or periodic work. Driver should return the delay of
the PTP next auxiliary work scheduling time (>=0) or negative value in case
further scheduling is not required.
- int ptp_schedule_worker(struct ptp_clock *ptp, unsigned long delay) which
allows schedule PTP auxiliary work.
The name of kthread_worker thread corresponds PTP PHC device name "ptp%d".
Signed-off-by: Grygorii Strashko <grygorii.strashko@ti.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-07-29 06:30:02 +08:00
|
|
|
if (IS_ERR(ptp->kworker)) {
|
|
|
|
err = PTR_ERR(ptp->kworker);
|
|
|
|
pr_err("failed to create ptp aux_worker %d\n", err);
|
|
|
|
goto kworker_err;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-06-30 16:11:53 +08:00
|
|
|
/* PTP virtual clock is being registered under physical clock */
|
2021-07-05 16:53:06 +08:00
|
|
|
if (parent && parent->class && parent->class->name &&
|
2021-06-30 16:11:53 +08:00
|
|
|
strcmp(parent->class->name, "ptp") == 0)
|
|
|
|
ptp->is_virtual_clock = true;
|
|
|
|
|
2021-06-30 16:11:54 +08:00
|
|
|
if (!ptp->is_virtual_clock) {
|
2021-06-30 16:11:53 +08:00
|
|
|
ptp->max_vclocks = PTP_DEFAULT_MAX_VCLOCKS;
|
|
|
|
|
2021-06-30 16:11:54 +08:00
|
|
|
size = sizeof(int) * ptp->max_vclocks;
|
|
|
|
ptp->vclock_index = kzalloc(size, GFP_KERNEL);
|
|
|
|
if (!ptp->vclock_index) {
|
|
|
|
err = -ENOMEM;
|
|
|
|
goto no_mem_for_vclocks;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-02-15 02:23:34 +08:00
|
|
|
err = ptp_populate_pin_groups(ptp);
|
|
|
|
if (err)
|
|
|
|
goto no_pin_groups;
|
|
|
|
|
2011-04-22 18:03:08 +08:00
|
|
|
/* Register a new PPS source. */
|
|
|
|
if (info->pps) {
|
|
|
|
struct pps_source_info pps;
|
|
|
|
memset(&pps, 0, sizeof(pps));
|
|
|
|
snprintf(pps.name, PPS_MAX_NAME_LEN, "ptp%d", index);
|
|
|
|
pps.mode = PTP_PPS_MODE;
|
|
|
|
pps.owner = info->owner;
|
|
|
|
ptp->pps_source = pps_register_source(&pps, PTP_PPS_DEFAULTS);
|
2018-12-07 14:00:46 +08:00
|
|
|
if (IS_ERR(ptp->pps_source)) {
|
|
|
|
err = PTR_ERR(ptp->pps_source);
|
2011-04-22 18:03:08 +08:00
|
|
|
pr_err("failed to register pps source\n");
|
|
|
|
goto no_pps;
|
|
|
|
}
|
2021-07-09 02:04:08 +08:00
|
|
|
ptp->pps_source->lookup_cookie = ptp;
|
2011-04-22 18:03:08 +08:00
|
|
|
}
|
|
|
|
|
2019-12-27 10:26:27 +08:00
|
|
|
/* Initialize a new device of our class in our clock structure. */
|
|
|
|
device_initialize(&ptp->dev);
|
|
|
|
ptp->dev.devt = ptp->devid;
|
2024-03-06 04:11:27 +08:00
|
|
|
ptp->dev.class = &ptp_class;
|
2019-12-27 10:26:27 +08:00
|
|
|
ptp->dev.parent = parent;
|
|
|
|
ptp->dev.groups = ptp->pin_attr_groups;
|
|
|
|
ptp->dev.release = ptp_clock_release;
|
|
|
|
dev_set_drvdata(&ptp->dev, ptp);
|
|
|
|
dev_set_name(&ptp->dev, "ptp%d", ptp->index);
|
|
|
|
|
|
|
|
/* Create a posix clock and link it to the device. */
|
|
|
|
err = posix_clock_register(&ptp->clock, &ptp->dev);
|
2011-04-22 18:03:08 +08:00
|
|
|
if (err) {
|
2021-10-28 07:18:02 +08:00
|
|
|
if (ptp->pps_source)
|
|
|
|
pps_unregister_source(ptp->pps_source);
|
2021-10-20 16:18:34 +08:00
|
|
|
|
|
|
|
if (ptp->kworker)
|
2021-10-28 07:18:02 +08:00
|
|
|
kthread_destroy_worker(ptp->kworker);
|
2021-10-20 16:18:34 +08:00
|
|
|
|
|
|
|
put_device(&ptp->dev);
|
|
|
|
|
2011-04-22 18:03:08 +08:00
|
|
|
pr_err("failed to create posix clock\n");
|
2021-10-20 16:18:34 +08:00
|
|
|
return ERR_PTR(err);
|
2011-04-22 18:03:08 +08:00
|
|
|
}
|
|
|
|
|
2023-10-12 06:39:57 +08:00
|
|
|
/* Debugfs initialization */
|
2023-10-18 22:20:11 +08:00
|
|
|
snprintf(debugfsname, sizeof(debugfsname), "ptp%d", ptp->index);
|
2023-10-12 06:39:57 +08:00
|
|
|
ptp->debugfs_root = debugfs_create_dir(debugfsname, NULL);
|
|
|
|
|
2011-04-22 18:03:08 +08:00
|
|
|
return ptp;
|
|
|
|
|
|
|
|
no_pps:
|
2017-02-15 02:23:34 +08:00
|
|
|
ptp_cleanup_pin_groups(ptp);
|
|
|
|
no_pin_groups:
|
2021-06-30 16:11:54 +08:00
|
|
|
kfree(ptp->vclock_index);
|
|
|
|
no_mem_for_vclocks:
|
ptp: introduce ptp auxiliary worker
Many PTP drivers required to perform some asynchronous or periodic work,
like periodically handling PHC counter overflow or handle delayed timestamp
for RX/TX network packets. In most of the cases, such work is implemented
using workqueues. Unfortunately, Kernel workqueues might introduce
significant delay in work scheduling under high system load and on -RT,
which could cause misbehavior of PTP drivers due to internal counter
overflow, for example, and there is no way to tune its execution policy and
priority manuallly.
Hence, The kthread_worker can be used insted of workqueues, as it create
separte named kthread for each worker and its its execution policy and
priority can be configured using chrt tool.
This prblem was reported for two drivers TI CPSW CPTS and dp83640, so
instead of modifying each of these driver it was proposed to add PTP
auxiliary worker to the PHC subsystem.
The patch adds PTP auxiliary worker in PHC subsystem using kthread_worker
and kthread_delayed_work and introduces two new PHC subsystem APIs:
- long (*do_aux_work)(struct ptp_clock_info *ptp) callback in
ptp_clock_info structure, which driver should assign if it require to
perform asynchronous or periodic work. Driver should return the delay of
the PTP next auxiliary work scheduling time (>=0) or negative value in case
further scheduling is not required.
- int ptp_schedule_worker(struct ptp_clock *ptp, unsigned long delay) which
allows schedule PTP auxiliary work.
The name of kthread_worker thread corresponds PTP PHC device name "ptp%d".
Signed-off-by: Grygorii Strashko <grygorii.strashko@ti.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-07-29 06:30:02 +08:00
|
|
|
if (ptp->kworker)
|
|
|
|
kthread_destroy_worker(ptp->kworker);
|
|
|
|
kworker_err:
|
2014-03-21 05:21:52 +08:00
|
|
|
mutex_destroy(&ptp->pincfg_mux);
|
2021-06-30 16:11:53 +08:00
|
|
|
mutex_destroy(&ptp->n_vclocks_mux);
|
2023-10-12 06:39:56 +08:00
|
|
|
bitmap_free(queue->mask);
|
|
|
|
no_memory_bitmap:
|
2023-10-12 06:39:54 +08:00
|
|
|
list_del(&queue->qlist);
|
|
|
|
kfree(queue);
|
|
|
|
no_memory_queue:
|
2024-03-11 22:47:29 +08:00
|
|
|
xa_erase(&ptp_clocks_map, index);
|
2013-04-12 08:56:15 +08:00
|
|
|
no_slot:
|
2011-04-22 18:03:08 +08:00
|
|
|
kfree(ptp);
|
|
|
|
no_memory:
|
|
|
|
return ERR_PTR(err);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(ptp_clock_register);
|
|
|
|
|
2022-02-02 17:33:55 +08:00
|
|
|
static int unregister_vclock(struct device *dev, void *data)
|
|
|
|
{
|
|
|
|
struct ptp_clock *ptp = dev_get_drvdata(dev);
|
|
|
|
|
|
|
|
ptp_vclock_unregister(info_to_vclock(ptp->info));
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2011-04-22 18:03:08 +08:00
|
|
|
int ptp_clock_unregister(struct ptp_clock *ptp)
|
|
|
|
{
|
2021-06-30 16:11:53 +08:00
|
|
|
if (ptp_vclock_in_use(ptp)) {
|
2022-02-02 17:33:55 +08:00
|
|
|
device_for_each_child(&ptp->dev, NULL, unregister_vclock);
|
2021-06-30 16:11:53 +08:00
|
|
|
}
|
|
|
|
|
2011-04-22 18:03:08 +08:00
|
|
|
ptp->defunct = 1;
|
|
|
|
wake_up_interruptible(&ptp->tsev_wq);
|
|
|
|
|
ptp: introduce ptp auxiliary worker
Many PTP drivers required to perform some asynchronous or periodic work,
like periodically handling PHC counter overflow or handle delayed timestamp
for RX/TX network packets. In most of the cases, such work is implemented
using workqueues. Unfortunately, Kernel workqueues might introduce
significant delay in work scheduling under high system load and on -RT,
which could cause misbehavior of PTP drivers due to internal counter
overflow, for example, and there is no way to tune its execution policy and
priority manuallly.
Hence, The kthread_worker can be used insted of workqueues, as it create
separte named kthread for each worker and its its execution policy and
priority can be configured using chrt tool.
This prblem was reported for two drivers TI CPSW CPTS and dp83640, so
instead of modifying each of these driver it was proposed to add PTP
auxiliary worker to the PHC subsystem.
The patch adds PTP auxiliary worker in PHC subsystem using kthread_worker
and kthread_delayed_work and introduces two new PHC subsystem APIs:
- long (*do_aux_work)(struct ptp_clock_info *ptp) callback in
ptp_clock_info structure, which driver should assign if it require to
perform asynchronous or periodic work. Driver should return the delay of
the PTP next auxiliary work scheduling time (>=0) or negative value in case
further scheduling is not required.
- int ptp_schedule_worker(struct ptp_clock *ptp, unsigned long delay) which
allows schedule PTP auxiliary work.
The name of kthread_worker thread corresponds PTP PHC device name "ptp%d".
Signed-off-by: Grygorii Strashko <grygorii.strashko@ti.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-07-29 06:30:02 +08:00
|
|
|
if (ptp->kworker) {
|
|
|
|
kthread_cancel_delayed_work_sync(&ptp->aux_work);
|
|
|
|
kthread_destroy_worker(ptp->kworker);
|
|
|
|
}
|
|
|
|
|
2011-04-22 18:03:08 +08:00
|
|
|
/* Release the clock's resources. */
|
|
|
|
if (ptp->pps_source)
|
|
|
|
pps_unregister_source(ptp->pps_source);
|
2017-02-15 02:23:34 +08:00
|
|
|
|
2011-04-22 18:03:08 +08:00
|
|
|
posix_clock_unregister(&ptp->clock);
|
2020-01-13 21:00:09 +08:00
|
|
|
|
2011-04-22 18:03:08 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(ptp_clock_unregister);
|
|
|
|
|
|
|
|
void ptp_clock_event(struct ptp_clock *ptp, struct ptp_clock_event *event)
|
|
|
|
{
|
2023-10-12 06:39:54 +08:00
|
|
|
struct timestamp_event_queue *tsevq;
|
2011-04-22 18:03:08 +08:00
|
|
|
struct pps_event_time evt;
|
2023-11-07 16:00:41 +08:00
|
|
|
unsigned long flags;
|
2011-04-22 18:03:08 +08:00
|
|
|
|
|
|
|
switch (event->type) {
|
|
|
|
|
|
|
|
case PTP_CLOCK_ALARM:
|
|
|
|
break;
|
|
|
|
|
|
|
|
case PTP_CLOCK_EXTTS:
|
2024-01-25 02:49:46 +08:00
|
|
|
case PTP_CLOCK_EXTOFF:
|
2023-10-12 06:39:56 +08:00
|
|
|
/* Enqueue timestamp on selected queues */
|
2023-11-07 16:00:41 +08:00
|
|
|
spin_lock_irqsave(&ptp->tsevqs_lock, flags);
|
2023-10-12 06:39:54 +08:00
|
|
|
list_for_each_entry(tsevq, &ptp->tsevqs, qlist) {
|
2023-10-12 06:39:56 +08:00
|
|
|
if (test_bit((unsigned int)event->index, tsevq->mask))
|
|
|
|
enqueue_external_timestamp(tsevq, event);
|
2023-10-12 06:39:54 +08:00
|
|
|
}
|
2023-11-07 16:00:41 +08:00
|
|
|
spin_unlock_irqrestore(&ptp->tsevqs_lock, flags);
|
2011-04-22 18:03:08 +08:00
|
|
|
wake_up_interruptible(&ptp->tsev_wq);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case PTP_CLOCK_PPS:
|
|
|
|
pps_get_ts(&evt);
|
|
|
|
pps_event(ptp->pps_source, &evt, PTP_PPS_EVENT, NULL);
|
|
|
|
break;
|
2012-09-03 18:34:58 +08:00
|
|
|
|
|
|
|
case PTP_CLOCK_PPSUSR:
|
|
|
|
pps_event(ptp->pps_source, &event->pps_times,
|
|
|
|
PTP_PPS_EVENT, NULL);
|
|
|
|
break;
|
2011-04-22 18:03:08 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(ptp_clock_event);
|
|
|
|
|
2012-04-04 06:59:16 +08:00
|
|
|
int ptp_clock_index(struct ptp_clock *ptp)
|
|
|
|
{
|
|
|
|
return ptp->index;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(ptp_clock_index);
|
|
|
|
|
2014-03-21 05:21:52 +08:00
|
|
|
int ptp_find_pin(struct ptp_clock *ptp,
|
|
|
|
enum ptp_pin_function func, unsigned int chan)
|
|
|
|
{
|
|
|
|
struct ptp_pin_desc *pin = NULL;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < ptp->info->n_pins; i++) {
|
|
|
|
if (ptp->info->pin_config[i].func == func &&
|
|
|
|
ptp->info->pin_config[i].chan == chan) {
|
|
|
|
pin = &ptp->info->pin_config[i];
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return pin ? i : -1;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(ptp_find_pin);
|
|
|
|
|
2020-03-29 22:55:10 +08:00
|
|
|
int ptp_find_pin_unlocked(struct ptp_clock *ptp,
|
|
|
|
enum ptp_pin_function func, unsigned int chan)
|
|
|
|
{
|
|
|
|
int result;
|
|
|
|
|
|
|
|
mutex_lock(&ptp->pincfg_mux);
|
|
|
|
|
|
|
|
result = ptp_find_pin(ptp, func, chan);
|
|
|
|
|
|
|
|
mutex_unlock(&ptp->pincfg_mux);
|
|
|
|
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(ptp_find_pin_unlocked);
|
|
|
|
|
ptp: introduce ptp auxiliary worker
Many PTP drivers required to perform some asynchronous or periodic work,
like periodically handling PHC counter overflow or handle delayed timestamp
for RX/TX network packets. In most of the cases, such work is implemented
using workqueues. Unfortunately, Kernel workqueues might introduce
significant delay in work scheduling under high system load and on -RT,
which could cause misbehavior of PTP drivers due to internal counter
overflow, for example, and there is no way to tune its execution policy and
priority manuallly.
Hence, The kthread_worker can be used insted of workqueues, as it create
separte named kthread for each worker and its its execution policy and
priority can be configured using chrt tool.
This prblem was reported for two drivers TI CPSW CPTS and dp83640, so
instead of modifying each of these driver it was proposed to add PTP
auxiliary worker to the PHC subsystem.
The patch adds PTP auxiliary worker in PHC subsystem using kthread_worker
and kthread_delayed_work and introduces two new PHC subsystem APIs:
- long (*do_aux_work)(struct ptp_clock_info *ptp) callback in
ptp_clock_info structure, which driver should assign if it require to
perform asynchronous or periodic work. Driver should return the delay of
the PTP next auxiliary work scheduling time (>=0) or negative value in case
further scheduling is not required.
- int ptp_schedule_worker(struct ptp_clock *ptp, unsigned long delay) which
allows schedule PTP auxiliary work.
The name of kthread_worker thread corresponds PTP PHC device name "ptp%d".
Signed-off-by: Grygorii Strashko <grygorii.strashko@ti.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-07-29 06:30:02 +08:00
|
|
|
int ptp_schedule_worker(struct ptp_clock *ptp, unsigned long delay)
|
|
|
|
{
|
|
|
|
return kthread_mod_delayed_work(ptp->kworker, &ptp->aux_work, delay);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(ptp_schedule_worker);
|
|
|
|
|
2019-12-27 21:02:28 +08:00
|
|
|
void ptp_cancel_worker_sync(struct ptp_clock *ptp)
|
|
|
|
{
|
|
|
|
kthread_cancel_delayed_work_sync(&ptp->aux_work);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(ptp_cancel_worker_sync);
|
|
|
|
|
2011-04-22 18:03:08 +08:00
|
|
|
/* module operations */
|
|
|
|
|
|
|
|
static void __exit ptp_exit(void)
|
|
|
|
{
|
2024-03-06 04:11:27 +08:00
|
|
|
class_unregister(&ptp_class);
|
2013-04-12 08:56:15 +08:00
|
|
|
unregister_chrdev_region(ptp_devt, MINORMASK + 1);
|
2024-03-11 22:47:29 +08:00
|
|
|
xa_destroy(&ptp_clocks_map);
|
2011-04-22 18:03:08 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int __init ptp_init(void)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
|
2024-03-06 04:11:27 +08:00
|
|
|
err = class_register(&ptp_class);
|
|
|
|
if (err) {
|
2011-04-22 18:03:08 +08:00
|
|
|
pr_err("ptp: failed to allocate class\n");
|
2024-03-06 04:11:27 +08:00
|
|
|
return err;
|
2011-04-22 18:03:08 +08:00
|
|
|
}
|
|
|
|
|
2013-04-12 08:56:15 +08:00
|
|
|
err = alloc_chrdev_region(&ptp_devt, 0, MINORMASK + 1, "ptp");
|
2011-04-22 18:03:08 +08:00
|
|
|
if (err < 0) {
|
|
|
|
pr_err("ptp: failed to allocate device region\n");
|
|
|
|
goto no_region;
|
|
|
|
}
|
|
|
|
|
|
|
|
pr_info("PTP clock support registered\n");
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
no_region:
|
2024-03-06 04:11:27 +08:00
|
|
|
class_unregister(&ptp_class);
|
2011-04-22 18:03:08 +08:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
subsys_initcall(ptp_init);
|
|
|
|
module_exit(ptp_exit);
|
|
|
|
|
2012-03-17 06:39:29 +08:00
|
|
|
MODULE_AUTHOR("Richard Cochran <richardcochran@gmail.com>");
|
2011-04-22 18:03:08 +08:00
|
|
|
MODULE_DESCRIPTION("PTP clocks support");
|
|
|
|
MODULE_LICENSE("GPL");
|