7a40ed
From 07a8062a47609a58c67692635e99ae6275207dee Mon Sep 17 00:00:00 2001
7a40ed
From: Vadim Fedorenko <vadfed@meta.com>
7a40ed
Date: Wed, 11 Jan 2023 06:58:36 -0800
7a40ed
Subject: [PATCH 2/2] filter: treat negative path_delay as a spike
7a40ed
7a40ed
There should be no negative path delay during normal operation. Let's
7a40ed
filter such values out. And with that fix there is no need to use
7a40ed
"best" frequency in case of holdover - just use the latest mean from
7a40ed
the filter.
7a40ed
7a40ed
Signed-off-by: Vadim Fedorenko <vadfed@meta.com>
7a40ed
---
7a40ed
 clock.c   |  4 ++--
7a40ed
 mmedian.c | 21 ++++++++++++++++-----
7a40ed
 2 files changed, 18 insertions(+), 7 deletions(-)
7a40ed
7a40ed
diff --git a/clock.c b/clock.c
7a40ed
index 3787ec7..6c9c12c 100644
7a40ed
--- a/clock.c
7a40ed
+++ b/clock.c
7a40ed
@@ -1973,9 +1973,9 @@ enum servo_state clock_synchronize(struct clock *c, tmv_t ingress, tmv_t origin)
7a40ed
 
7a40ed
 			bool is_spike = llabs(offset) > llabs(max_func(c->max_offset_locked, c->min_offset_locked));
7a40ed
 			if (is_spike) {
7a40ed
-				adj = c->min_offset_freq_mean;
7a40ed
+				adj = c->freq_mean;
7a40ed
 				c->master_offset = nanoseconds_to_tmv(c->max_offset_locked);
7a40ed
-				pr_notice("spike detected => max_offset_locked: %ld, setting offset to min_offset_freq_mean: %lf", c->max_offset_locked, adj);
7a40ed
+				pr_notice("spike detected => max_offset_locked: %ld, setting freq to freq_mean: %lf", c->max_offset_locked, adj);
7a40ed
 				clock_synchronize_locked(c, adj);
7a40ed
 				if (c->offset_skipped_count < c->max_offset_skipped_count) {
7a40ed
 					c->offset_skipped_count++;
7a40ed
diff --git a/mmedian.c b/mmedian.c
7a40ed
index 2383467..50d8b90 100644
7a40ed
--- a/mmedian.c
7a40ed
+++ b/mmedian.c
7a40ed
@@ -21,6 +21,7 @@
7a40ed
 
7a40ed
 #include "mmedian.h"
7a40ed
 #include "filter_private.h"
7a40ed
+#include "print.h"
7a40ed
 
7a40ed
 struct mmedian {
7a40ed
 	struct filter filter;
7a40ed
@@ -41,11 +42,25 @@ static void mmedian_destroy(struct filter *filter)
7a40ed
 	free(m);
7a40ed
 }
7a40ed
 
7a40ed
+static inline tmv_t mmedian_calc_pdelay(const struct mmedian *m)
7a40ed
+{
7a40ed
+	if (m->cnt % 2)
7a40ed
+		return m->samples[m->order[m->cnt / 2]];
7a40ed
+	else
7a40ed
+		return tmv_div(tmv_add(m->samples[m->order[m->cnt / 2 - 1]],
7a40ed
+				       m->samples[m->order[m->cnt / 2]]), 2);
7a40ed
+}
7a40ed
+
7a40ed
 static tmv_t mmedian_sample(struct filter *filter, tmv_t sample)
7a40ed
 {
7a40ed
 	struct mmedian *m = container_of(filter, struct mmedian, filter);
7a40ed
 	int i;
7a40ed
 
7a40ed
+	if (m->cnt && tmv_to_nanoseconds(sample) < 2000) {
7a40ed
+		pr_info("skipping path delay sample %ld", tmv_to_nanoseconds(sample));
7a40ed
+		return mmedian_calc_pdelay(m);
7a40ed
+	}
7a40ed
+
7a40ed
 	m->samples[m->index] = sample;
7a40ed
 	if (m->cnt < m->len) {
7a40ed
 		m->cnt++;
7a40ed
@@ -69,11 +84,7 @@ static tmv_t mmedian_sample(struct filter *filter, tmv_t sample)
7a40ed
 
7a40ed
 	m->index = (1 + m->index) % m->len;
7a40ed
 
7a40ed
-	if (m->cnt % 2)
7a40ed
-		return m->samples[m->order[m->cnt / 2]];
7a40ed
-	else
7a40ed
-		return tmv_div(tmv_add(m->samples[m->order[m->cnt / 2 - 1]],
7a40ed
-				       m->samples[m->order[m->cnt / 2]]), 2);
7a40ed
+	return mmedian_calc_pdelay(m);
7a40ed
 }
7a40ed
 
7a40ed
 static void mmedian_reset(struct filter *filter)
7a40ed
-- 
7a40ed
2.30.2
7a40ed