Blob Blame History Raw
commit 7e8eba5332671abfd95d06dd191059eded1d2cca
Author: Miroslav Lichvar <mlichvar@redhat.com>
Date:   Mon May 31 11:07:52 2021 +0200

    clock: Reset state when switching port with same best clock.
    
    When the best port is changed, but the ID of the best clock doesn't
    change (e.g. a passive port is activated on link failure), reset the
    current delay and other master/link-specific state to avoid the switch
    throwing the clock off.
    
    Reviewed-by: Jacob Keller <jacob.e.keller@intel.com>
    Signed-off-by: Miroslav Lichvar <mlichvar@redhat.com>

diff --git a/clock.c b/clock.c
index d428ae2..f14006f 100644
--- a/clock.c
+++ b/clock.c
@@ -1940,7 +1940,7 @@ static void handle_state_decision_event(struct clock *c)
 		best_id = c->dds.clockIdentity;
 	}
 
-	if (!cid_eq(&best_id, &c->best_id)) {
+	if (!cid_eq(&best_id, &c->best_id) || best != c->best) {
 		clock_freq_est_reset(c);
 		tsproc_reset(c->tsproc, 1);
 		if (!tmv_is_zero(c->initial_delay))

commit 262a49b07eaccc0f0237e3cd4df01b185b8f664f
Author: Miroslav Lichvar <mlichvar@redhat.com>
Date:   Mon May 31 11:07:53 2021 +0200

    clock: Reset clock check on best clock/port change.
    
    Reset the clock check when the best clock or port changes, together with
    the other state like current estimated delay and frequency. This avoids
    false positives if the clock is controlled by an external process when
    not synchronized by PTP (e.g. phc2sys -rr).
    
    Reviewed-by: Jacob Keller <jacob.e.keller@intel.com>
    Signed-off-by: Miroslav Lichvar <mlichvar@redhat.com>

diff --git a/clock.c b/clock.c
index f14006f..7d0f985 100644
--- a/clock.c
+++ b/clock.c
@@ -1942,6 +1942,8 @@ static void handle_state_decision_event(struct clock *c)
 
 	if (!cid_eq(&best_id, &c->best_id) || best != c->best) {
 		clock_freq_est_reset(c);
+		if (c->sanity_check)
+			clockcheck_reset(c->sanity_check);
 		tsproc_reset(c->tsproc, 1);
 		if (!tmv_is_zero(c->initial_delay))
 			tsproc_set_delay(c->tsproc, c->initial_delay);
diff --git a/clockcheck.c b/clockcheck.c
index d48a578..d0b4714 100644
--- a/clockcheck.c
+++ b/clockcheck.c
@@ -47,9 +47,16 @@ struct clockcheck *clockcheck_create(int freq_limit)
 	if (!cc)
 		return NULL;
 	cc->freq_limit = freq_limit;
+	clockcheck_reset(cc);
+	return cc;
+}
+
+void clockcheck_reset(struct clockcheck *cc)
+{
+	cc->freq_known = 0;
 	cc->max_freq = -CHECK_MAX_FREQ;
 	cc->min_freq = CHECK_MAX_FREQ;
-	return cc;
+	cc->last_ts = 0;
 }
 
 int clockcheck_sample(struct clockcheck *cc, uint64_t ts)
diff --git a/clockcheck.h b/clockcheck.h
index 78aca48..1ff86eb 100644
--- a/clockcheck.h
+++ b/clockcheck.h
@@ -33,6 +33,12 @@ struct clockcheck;
  */
 struct clockcheck *clockcheck_create(int freq_limit);
 
+/**
+ * Reset a clock check.
+ * @param cc Pointer to a clock check obtained via @ref clockcheck_create().
+ */
+void clockcheck_reset(struct clockcheck *cc);
+
 /**
  * Perform the sanity check on a time stamp.
  * @param cc Pointer to a clock check obtained via @ref clockcheck_create().

commit e117e37e379556fa23337db2518bb44d8793e039
Author: Miroslav Lichvar <mlichvar@redhat.com>
Date:   Mon May 31 11:07:54 2021 +0200

    port: Don't check timestamps from non-slave ports.
    
    Don't perform the sanity check on receive timestamps from ports in
    non-slave states to avoid false positives in the jbod mode, where
    the timestamps can be generated by different clocks.
    
    Reviewed-by: Jacob Keller <jacob.e.keller@intel.com>
    Signed-off-by: Miroslav Lichvar <mlichvar@redhat.com>

diff --git a/port.c b/port.c
index b5b775f..ec5c92e 100644
--- a/port.c
+++ b/port.c
@@ -2749,7 +2749,10 @@ static enum fsm_event bc_event(struct port *p, int fd_index)
 	}
 	if (msg_sots_valid(msg)) {
 		ts_add(&msg->hwts.ts, -p->rx_timestamp_offset);
-		clock_check_ts(p->clock, tmv_to_nanoseconds(msg->hwts.ts));
+		if (p->state == PS_SLAVE) {
+			clock_check_ts(p->clock,
+				       tmv_to_nanoseconds(msg->hwts.ts));
+		}
 	}
 
 	switch (msg_type(msg)) {

commit 6df84259647757bc53818a039734f8ff85618c02
Author: Miroslav Lichvar <mlichvar@redhat.com>
Date:   Mon May 31 11:07:55 2021 +0200

    port: Don't renew raw transport.
    
    Renewing of the transport on announce/sync timeout is needed in the
    client-only mode to avoid getting stuck with a broken multicast socket
    when the link goes down.
    
    This shouldn't be necessary with the raw transport. Closing and binding
    of raw sockets can apparently be so slow that it triggers a false
    positive in the clock check.
    
    Reported-by: Amar Subramanyam <asubramanyam@altiostar.com>
    Signed-off-by: Miroslav Lichvar <mlichvar@redhat.com>
    Reviewed-by: Jacob Keller <jacob.e.keller@intel.com>

diff --git a/port.c b/port.c
index ec5c92e..c057591 100644
--- a/port.c
+++ b/port.c
@@ -1811,6 +1811,12 @@ static int port_renew_transport(struct port *p)
 	if (!port_is_enabled(p)) {
 		return 0;
 	}
+
+	/* Closing and binding of raw sockets is too slow and unnecessary */
+	if (transport_type(p->trp) == TRANS_IEEE_802_3) {
+		return 0;
+	}
+
 	transport_close(p->trp, &p->fda);
 	port_clear_fda(p, FD_FIRST_TIMER);
 	res = transport_open(p->trp, p->iface, &p->fda, p->timestamping);

commit a082bcd700e4955ebaa00d7039bf4bce92048ac4
Author: Miroslav Lichvar <mlichvar@redhat.com>
Date:   Mon May 31 11:07:56 2021 +0200

    clockcheck: Increase minimum interval.
    
    Increase the minimum check interval to 1 second to measure the frequency
    offset more accurately and with default configuration make false
    positives less likely due to a heavily overloaded system.
    
    Signed-off-by: Miroslav Lichvar <mlichvar@redhat.com>
    Reviewed-by: Jacob Keller <jacob.e.keller@intel.com>

diff --git a/clockcheck.c b/clockcheck.c
index d0b4714..f0141be 100644
--- a/clockcheck.c
+++ b/clockcheck.c
@@ -23,7 +23,7 @@
 #include "clockcheck.h"
 #include "print.h"
 
-#define CHECK_MIN_INTERVAL 100000000
+#define CHECK_MIN_INTERVAL 1000000000
 #define CHECK_MAX_FREQ 900000000
 
 struct clockcheck {