Blob Blame History Raw
diff -up libgcrypt-1.5.3/src/ath.c.ath-reinstall libgcrypt-1.5.3/src/ath.c
--- libgcrypt-1.5.3/src/ath.c.ath-reinstall	2013-07-25 11:10:04.000000000 +0200
+++ libgcrypt-1.5.3/src/ath.c	2017-02-28 14:37:15.267668432 +0100
@@ -36,7 +36,7 @@
 #include <errno.h>
 
 #include "ath.h"
-
+#include "g10lib.h"
 
 
 /* The interface table.  */
@@ -45,6 +45,13 @@ static struct ath_ops ops;
 /* True if we should use the external callbacks.  */
 static int ops_set;
 
+struct lock_list
+{
+  ath_mutex_t *lock;
+  struct lock_list *next;
+};
+
+static struct lock_list *reinstallable_locks;
 
 /* For the dummy interface.  */
 #define MUTEX_UNLOCKED	((ath_mutex_t) 0)
@@ -62,6 +69,50 @@ static int ops_set;
 /* The lock we take while checking for lazy lock initialization.  */
 static ath_mutex_t check_init_lock = ATH_MUTEX_INITIALIZER;
 
+static void
+add_reinstallable_lock(ath_mutex_t *lock)
+{
+  struct lock_list *ll, *new, **ptr;
+
+  new = gcry_calloc(1, sizeof(*new));
+  if (!new)
+    abort();
+
+  for (ll = reinstallable_locks, ptr = &reinstallable_locks; ll != NULL; ptr = &ll->next, ll = ll->next)
+    {
+      if (ll->lock == lock)
+        {
+          gcry_free(new);
+          return;
+        }
+    }
+
+  new->lock = lock;
+  *ptr = new;
+}
+
+static void
+remove_reinstallable_lock(ath_mutex_t *lock)
+{
+  struct lock_list *ll, **ptr;
+
+  for (ll = reinstallable_locks, ptr = &reinstallable_locks; ll != NULL; ptr = &ll->next, ll = ll->next)
+    {
+      if (ll->lock == lock)
+        {
+          *ptr = ll->next;
+          gcry_free(ll);
+          /* we do not store duplicates */
+          return;
+        }
+    }
+
+#ifndef NDEBUG
+    /* lock not found, should not happen */
+    abort();
+#endif
+}
+
 int
 ath_init (void)
 {
@@ -85,7 +136,9 @@ ath_init (void)
 gpg_err_code_t
 ath_install (struct ath_ops *ath_ops, int check_only)
 {
-  if (check_only)
+  gpg_err_code_t err = 0;
+
+  if (check_only && ops_set)
     {
       unsigned int option = 0;
 
@@ -119,7 +172,25 @@ ath_install (struct ath_ops *ath_ops, in
   else
     ops_set = 0;
 
-  return 0;
+  if (ops_set && reinstallable_locks)
+    {
+      struct lock_list *ll;
+
+      ath_init();
+      for (ll = reinstallable_locks; ll != NULL;)
+        {
+          struct lock_list *prev;
+
+          if (ath_mutex_init(ll->lock))
+            err = GPG_ERR_NOT_SUPPORTED;
+          prev = ll;
+          ll = ll->next;
+          gcry_free(prev);
+        }
+      reinstallable_locks = NULL;
+    }
+
+  return err;
 }
 
 
@@ -143,6 +214,8 @@ ath_mutex_init (ath_mutex_t *lock)
 {
   if (ops_set)
     return mutex_init (lock, 0);
+  else
+    add_reinstallable_lock(lock);
 
 #ifndef NDEBUG
   *lock = MUTEX_UNLOCKED;
@@ -168,6 +241,8 @@ ath_mutex_destroy (ath_mutex_t *lock)
       (*ops.mutex_unlock) (&check_init_lock);
       return (*ops.mutex_destroy) (lock);
     }
+  else
+    remove_reinstallable_lock(lock);
 
 #ifndef NDEBUG
   assert (*lock == MUTEX_UNLOCKED);