diff --git a/SOURCES/0001-Fix-AJP-connector-migration.patch b/SOURCES/0001-Fix-AJP-connector-migration.patch
new file mode 100644
index 0000000..de2299d
--- /dev/null
+++ b/SOURCES/0001-Fix-AJP-connector-migration.patch
@@ -0,0 +1,217 @@
+From 8a8fc41a10ffb20e9e4902a9e9f74b2f05948b7a Mon Sep 17 00:00:00 2001
+From: "Endi S. Dewata" <edewata@redhat.com>
+Date: Wed, 3 Nov 2021 20:46:46 -0500
+Subject: [PATCH] Fix AJP connector migration
+
+In commit e70373ab131aba810f318c1d917896392b49ff4b the AJP
+connector migration code for Tomcat 9.0.31 in pki-server
+migrate CLI was converted into an upgrade script that would
+run regardless of the Tomcat version, and this was causing
+a problem on platforms that only has older Tomcat versions.
+
+To fix the problem, the upgrade script has been converted back
+into pki-server migrate, and it will check the Tomcat version
+before performing the migration. The server.xml has also been
+reverted to have the old AJP connectors by default.
+
+Whenever the server is restarted the pki-server migrate will
+run so it can migrate the AJP connectors automatically in
+case Tomcat is upgraded to a newer version.
+
+https://bugzilla.redhat.com/show_bug.cgi?id=2029023
+---
+ base/server/python/pki/server/cli/migrate.py  | 61 +++++++++++++++++
+ .../upgrade/10.11.0/04-UpdateAJPConnectors.py | 67 -------------------
+ ...lowLinking.py => 04-UpdateAllowLinking.py} |  0
+ ...UpdateJavaHome.py => 05-UpdateJavaHome.py} |  0
+ base/tomcat-9.0/conf/server.xml               |  4 +-
+ 5 files changed, 63 insertions(+), 69 deletions(-)
+ delete mode 100644 base/server/upgrade/10.11.0/04-UpdateAJPConnectors.py
+ rename base/server/upgrade/10.11.0/{05-UpdateAllowLinking.py => 04-UpdateAllowLinking.py} (100%)
+ rename base/server/upgrade/10.11.0/{06-UpdateJavaHome.py => 05-UpdateJavaHome.py} (100%)
+
+diff --git a/base/server/python/pki/server/cli/migrate.py b/base/server/python/pki/server/cli/migrate.py
+index 256b83c845..2005004c4e 100644
+--- a/base/server/python/pki/server/cli/migrate.py
++++ b/base/server/python/pki/server/cli/migrate.py
+@@ -23,6 +23,7 @@ from __future__ import print_function
+ 
+ import getopt
+ import logging
++import re
+ import sys
+ 
+ from lxml import etree
+@@ -96,9 +97,69 @@ class MigrateCLI(pki.cli.CLI):
+ 
+             instance.load()
+             instance.init()
++            instances = [instance]
+ 
+         else:
+             instances = pki.server.instance.PKIInstance.instances()
+ 
+             for instance in instances:
+                 instance.init()
++
++        # update AJP connectors for Tomcat 9.0.31 or later
++
++        tomcat_version = pki.server.Tomcat.get_version()
++        if tomcat_version >= pki.util.Version('9.0.31'):
++
++            for instance in instances:
++                self.update_ajp_connectors(instance)
++
++    def update_ajp_connectors(self, instance):
++
++        logger.info('Updating AJP connectors in %s', instance.server_xml)
++
++        document = etree.parse(instance.server_xml, self.parser)
++        server = document.getroot()
++
++        # replace 'requiredSecret' with 'secret' in comments
++
++        services = server.findall('Service')
++        for service in services:
++
++            children = list(service)
++            for child in children:
++
++                if not isinstance(child, etree._Comment):  # pylint: disable=protected-access
++                    # not a comment -> skip
++                    continue
++
++                if 'protocol="AJP/1.3"' not in child.text:
++                    # not an AJP connector -> skip
++                    continue
++
++                child.text = re.sub(r'requiredSecret=',
++                                    r'secret=',
++                                    child.text,
++                                    flags=re.MULTILINE)
++
++        # replace 'requiredSecret' with 'secret' in Connectors
++
++        connectors = server.findall('Service/Connector')
++        for connector in connectors:
++
++            if connector.get('protocol') != 'AJP/1.3':
++                # not an AJP connector -> skip
++                continue
++
++            if connector.get('secret'):
++                # already has a 'secret' -> skip
++                continue
++
++            if connector.get('requiredSecret') is None:
++                # does not have a 'requiredSecret' -> skip
++                continue
++
++            value = connector.attrib.pop('requiredSecret')
++            connector.set('secret', value)
++
++        with open(instance.server_xml, 'wb') as f:
++            document.write(f, pretty_print=True, encoding='utf-8')
+diff --git a/base/server/upgrade/10.11.0/04-UpdateAJPConnectors.py b/base/server/upgrade/10.11.0/04-UpdateAJPConnectors.py
+deleted file mode 100644
+index 6e7bbdae24..0000000000
+--- a/base/server/upgrade/10.11.0/04-UpdateAJPConnectors.py
++++ /dev/null
+@@ -1,67 +0,0 @@
+-#
+-# Copyright Red Hat, Inc.
+-#
+-# SPDX-License-Identifier: GPL-2.0-or-later
+-#
+-from __future__ import absolute_import
+-import logging
+-from lxml import etree
+-import re
+-
+-import pki
+-
+-logger = logging.getLogger(__name__)
+-
+-
+-class UpdateAJPConnectors(pki.server.upgrade.PKIServerUpgradeScriptlet):
+-
+-    def __init__(self):
+-        super(UpdateAJPConnectors, self).__init__()
+-        self.message = 'Update AJP connectors in server.xml'
+-
+-        self.parser = etree.XMLParser(remove_blank_text=True)
+-
+-    def upgrade_instance(self, instance):
+-
+-        logger.info('Updating %s', instance.server_xml)
+-        self.backup(instance.server_xml)
+-
+-        document = etree.parse(instance.server_xml, self.parser)
+-        server = document.getroot()
+-
+-        logger.info('Renaming requiredSecret to secret')
+-
+-        services = server.findall('Service')
+-        for service in services:
+-
+-            children = list(service)
+-            for child in children:
+-
+-                if isinstance(child, etree._Comment):  # pylint: disable=protected-access
+-                    if 'protocol="AJP/1.3"' in child.text:
+-                        child.text = re.sub(r'requiredSecret=',
+-                                            r'secret=',
+-                                            child.text,
+-                                            flags=re.MULTILINE)
+-
+-        connectors = server.findall('Service/Connector')
+-        for connector in connectors:
+-
+-            if connector.get('protocol') != 'AJP/1.3':
+-                # Only modify AJP connectors.
+-                continue
+-
+-            if connector.get('secret'):
+-                # Nothing to migrate because the secret attribute already
+-                # exists.
+-                continue
+-
+-            if connector.get('requiredSecret') is None:
+-                # No requiredSecret field either; nothing to do.
+-                continue
+-
+-            connector.set('secret', connector.get('requiredSecret'))
+-            connector.attrib.pop('requiredSecret', None)
+-
+-        with open(instance.server_xml, 'wb') as f:
+-            document.write(f, pretty_print=True, encoding='utf-8')
+diff --git a/base/server/upgrade/10.11.0/05-UpdateAllowLinking.py b/base/server/upgrade/10.11.0/04-UpdateAllowLinking.py
+similarity index 100%
+rename from base/server/upgrade/10.11.0/05-UpdateAllowLinking.py
+rename to base/server/upgrade/10.11.0/04-UpdateAllowLinking.py
+diff --git a/base/server/upgrade/10.11.0/06-UpdateJavaHome.py b/base/server/upgrade/10.11.0/05-UpdateJavaHome.py
+similarity index 100%
+rename from base/server/upgrade/10.11.0/06-UpdateJavaHome.py
+rename to base/server/upgrade/10.11.0/05-UpdateJavaHome.py
+diff --git a/base/tomcat-9.0/conf/server.xml b/base/tomcat-9.0/conf/server.xml
+index 528300fd27..d6f3bb7ff0 100644
+--- a/base/tomcat-9.0/conf/server.xml
++++ b/base/tomcat-9.0/conf/server.xml
+@@ -190,12 +190,12 @@ Tomcat Port         = [TOMCAT_SERVER_PORT] (for shutdown)
+                protocol="AJP/1.3"
+                redirectPort="[PKI_AJP_REDIRECT_PORT]"
+                address="[PKI_AJP_HOST_IPv4]"
+-               secret="[PKI_AJP_SECRET]" />
++               requiredSecret="[PKI_AJP_SECRET]" />
+     <Connector port="[PKI_AJP_PORT]"
+                protocol="AJP/1.3"
+                redirectPort="[PKI_AJP_REDIRECT_PORT]"
+                address="[PKI_AJP_HOST_IPv6]"
+-               secret="[PKI_AJP_SECRET]" />
++               requiredSecret="[PKI_AJP_SECRET]" />
+ [PKI_CLOSE_AJP_PORT_COMMENT]
+ 
+ 
+-- 
+2.33.1
+
diff --git a/SOURCES/0001-Fix-replica-reinstallation.patch b/SOURCES/0001-Fix-replica-reinstallation.patch
new file mode 100644
index 0000000..b56a3f2
--- /dev/null
+++ b/SOURCES/0001-Fix-replica-reinstallation.patch
@@ -0,0 +1,289 @@
+From 5d377f31292da71f6ec4a29b13a66a9bea967102 Mon Sep 17 00:00:00 2001
+From: "Endi S. Dewata" <edewata@redhat.com>
+Date: Tue, 2 Nov 2021 14:46:02 -0500
+Subject: [PATCH] Fix replica reinstallation
+
+The pkispawn and pkidestroy have been modified to ignore
+failures caused by adding an entry or attribute that is
+already exists and to check whether a file exists before
+removing it during replica removal and reinstallation.
+
+One of the CA clone tests has been modified to test
+removing and reinstalling a replica.
+
+Resolves: https://github.com/dogtagpki/pki/issues/3544
+---
+ .github/workflows/ca-tests.yml                |  11 ++
+ .../python/pki/server/deployment/__init__.py  |  39 +++++--
+ .../scriptlets/webapp_deployment.py           |  19 +--
+ .../cms/servlet/csadmin/LDAPConfigurator.java | 110 +++++++++++-------
+ 4 files changed, 116 insertions(+), 63 deletions(-)
+
+diff --git a/.github/workflows/ca-tests.yml b/.github/workflows/ca-tests.yml
+index 4832e73c65..fffcb9c3e4 100644
+--- a/.github/workflows/ca-tests.yml
++++ b/.github/workflows/ca-tests.yml
+@@ -1137,6 +1137,17 @@ jobs:
+               --pkcs12-password-file ${PKIDIR}/pkcs12_password.conf
+           docker exec secondary pki -n caadmin ca-user-show caadmin
+ 
++      - name: Remove CA from secondary PKI container
++        run: |
++          docker exec secondary pkidestroy -i pki-tomcat -s CA -v
++
++      - name: Re-install CA in secondary PKI container
++        run: |
++          docker exec secondary pkispawn \
++              -f /usr/share/pki/server/examples/installation/ca-secure-ds-secondary.cfg \
++              -s CA \
++              -v
++
+       - name: Gather artifacts from primary container
+         if: always()
+         run: |
+diff --git a/base/server/python/pki/server/deployment/__init__.py b/base/server/python/pki/server/deployment/__init__.py
+index 6eb5b0a78a..d179718dd6 100644
+--- a/base/server/python/pki/server/deployment/__init__.py
++++ b/base/server/python/pki/server/deployment/__init__.py
+@@ -1074,26 +1074,41 @@ class PKIDeployer:
+         secure_port = server_config.get_secure_port()
+ 
+         uid = 'CA-%s-%s' % (self.mdict['pki_hostname'], secure_port)
+-
+         logger.info('Adding %s', uid)
+-        subsystem.add_user(
+-            uid,
+-            full_name=uid,
+-            user_type='agentType',
+-            state='1')
+ 
+-        logger.info('Adding subsystem certificate into %s', uid)
++        try:
++            subsystem.add_user(
++                uid,
++                full_name=uid,
++                user_type='agentType',
++                state='1')
++        except Exception:    # pylint: disable=W0703
++            logger.warning('Unable to add %s', uid)
++            # TODO: ignore error only if user already exists
++
+         cert_data = pki.nssdb.convert_cert(
+             cert['data'],
+             'base64',
+             'pem')
+-        subsystem.add_user_cert(
+-            uid,
+-            cert_data=cert_data.encode(),
+-            cert_format='PEM')
++
++        logger.info('Adding certificate for %s', uid)
++
++        try:
++            subsystem.add_user_cert(
++                uid,
++                cert_data=cert_data.encode(),
++                cert_format='PEM')
++        except Exception:    # pylint: disable=W0703
++            logger.warning('Unable to add certificate for %s', uid)
++            # TODO: ignore error only if user cert already exists
+ 
+         logger.info('Adding %s into Subsystem Group', uid)
+-        subsystem.add_group_member('Subsystem Group', uid)
++
++        try:
++            subsystem.add_group_member('Subsystem Group', uid)
++        except Exception:    # pylint: disable=W0703
++            logger.warning('Unable to add %s into Subsystem Group', uid)
++            # TODO: ignore error only if user already exists in the group
+ 
+     def backup_keys(self, instance, subsystem):
+ 
+diff --git a/base/server/python/pki/server/deployment/scriptlets/webapp_deployment.py b/base/server/python/pki/server/deployment/scriptlets/webapp_deployment.py
+index 342477028a..f9e73fd069 100644
+--- a/base/server/python/pki/server/deployment/scriptlets/webapp_deployment.py
++++ b/base/server/python/pki/server/deployment/scriptlets/webapp_deployment.py
+@@ -60,12 +60,13 @@ class PkiScriptlet(pkiscriptlet.AbstractBasePkiScriptlet):
+ 
+         logger.info('Undeploying /%s web application', deployer.mdict['pki_subsystem'].lower())
+ 
+-        # Delete <instance>/Catalina/localhost/<subsystem>.xml
+-        pki.util.remove(
+-            path=os.path.join(
+-                deployer.mdict['pki_instance_configuration_path'],
+-                "Catalina",
+-                "localhost",
+-                deployer.mdict['pki_subsystem'].lower() + ".xml"),
+-            force=deployer.mdict['pki_force_destroy']
+-        )
++        # Delete <instance>/Catalina/localhost/<subsystem>.xml if exists
++
++        context_xml = os.path.join(
++            deployer.mdict['pki_instance_configuration_path'],
++            'Catalina',
++            'localhost',
++            deployer.mdict['pki_subsystem'].lower() + '.xml')
++
++        if os.path.exists(context_xml):
++            pki.util.remove(context_xml)
+diff --git a/base/server/src/main/java/com/netscape/cms/servlet/csadmin/LDAPConfigurator.java b/base/server/src/main/java/com/netscape/cms/servlet/csadmin/LDAPConfigurator.java
+index 651d166321..1e0364cfea 100644
+--- a/base/server/src/main/java/com/netscape/cms/servlet/csadmin/LDAPConfigurator.java
++++ b/base/server/src/main/java/com/netscape/cms/servlet/csadmin/LDAPConfigurator.java
+@@ -661,26 +661,35 @@ public class LDAPConfigurator {
+ 
+         try {
+             connection.add(entry);
++            // replication manager added -> done
++            return;
+ 
+         } catch (LDAPException e) {
+-            if (e.getLDAPResultCode() == LDAPException.ENTRY_ALREADY_EXISTS) {
+-                logger.warn("Entry already exists: " + dn);
++            if (e.getLDAPResultCode() != LDAPException.ENTRY_ALREADY_EXISTS) {
++                logger.error("Unable to add " + dn + ": " + e.getMessage(), e);
++                throw e;
++            }
++            logger.warn("Replication manager already exists: " + dn);
++        }
+ 
+-                try {
+-                    logger.info("Deleting " + dn);
+-                    connection.delete(dn);
++        logger.warn("Deleting existing replication manager: " + dn);
+ 
+-                    logger.info("Re-adding " + dn);
+-                    connection.add(entry);
++        try {
++            connection.delete(dn);
+ 
+-                } catch (LDAPException ee) {
+-                    logger.warn("Unable to recreate " + dn + ": " + ee.getMessage());
+-                }
++        } catch (LDAPException e) {
++            logger.error("Unable to delete " + dn + ": " + e.getMessage());
++            throw e;
++        }
+ 
+-            } else {
+-                logger.error("Unable to add " + dn + ": " + e.getMessage(), e);
+-                throw e;
+-            }
++        logger.warn("Adding new replication manager: " + dn);
++
++        try {
++            connection.add(entry);
++
++        } catch (LDAPException e) {
++            logger.error("Unable to add " + dn + ": " + e.getMessage());
++            throw e;
+         }
+     }
+ 
+@@ -799,28 +808,41 @@ public class LDAPConfigurator {
+ 
+         try {
+             connection.add(entry);
++            // replica object added -> done
++            return true;
+ 
+         } catch (LDAPException e) {
+-
+             if (e.getLDAPResultCode() != LDAPException.ENTRY_ALREADY_EXISTS) {
++                logger.error("Unable to add " + replicaDN + ": " + e.getMessage(), e);
+                 throw e;
+             }
++            logger.warn("Replica object already exists: " + replicaDN);
++        }
++
++        logger.info("Adding replica bind DN");
+ 
+-            // BZ 470918: We can't just add the new dn.
+-            // We need to do a replace until the bug is fixed.
+-            logger.warn("Entry already exists, adding bind DN");
++        // BZ 470918: We can't just add the new dn.
++        // We need to do a replace until the bug is fixed.
+ 
+-            entry = connection.read(replicaDN);
+-            LDAPAttribute attr = entry.getAttribute("nsDS5ReplicaBindDN");
+-            attr.addValue(bindDN);
++        entry = connection.read(replicaDN);
++        LDAPAttribute attr = entry.getAttribute("nsDS5ReplicaBindDN");
++        attr.addValue(bindDN);
+ 
+-            LDAPModification mod = new LDAPModification(LDAPModification.REPLACE, attr);
++        LDAPModification mod = new LDAPModification(LDAPModification.REPLACE, attr);
++
++        try {
+             connection.modify(replicaDN, mod);
++            // replica bind DN added -> done
+ 
+-            return false;
++        } catch (LDAPException e) {
++            if (e.getLDAPResultCode() != LDAPException.ATTRIBUTE_OR_VALUE_EXISTS) {
++                logger.error("Unable to add " + bindDN + ": " + e.getMessage(), e);
++                throw e;
++            }
++            logger.warn("Replica bind DN already exists: " + bindDN);
+         }
+ 
+-        return true;
++        return false;
+     }
+ 
+     public void createReplicationAgreement(
+@@ -864,29 +886,33 @@ public class LDAPConfigurator {
+ 
+         try {
+             connection.add(entry);
++            // replication agreement added -> done
++            return;
+ 
+         } catch (LDAPException e) {
+-            if (e.getLDAPResultCode() == LDAPException.ENTRY_ALREADY_EXISTS) {
+-                logger.warn("Entry already exists: " + dn);
+-
+-                try {
+-                    connection.delete(dn);
+-                } catch (LDAPException ee) {
+-                    logger.error("Unable to delete " + dn + ": " + ee.getMessage(), ee);
+-                    throw ee;
+-                }
+-
+-                try {
+-                    connection.add(entry);
+-                } catch (LDAPException ee) {
+-                    logger.error("Unable to add " + dn + ": " + ee.getMessage(), ee);
+-                    throw ee;
+-                }
+-
+-            } else {
++            if (e.getLDAPResultCode() != LDAPException.ENTRY_ALREADY_EXISTS) {
+                 logger.error("Unable to add " + dn + ": " + e.getMessage(), e);
+                 throw e;
+             }
++            logger.warn("Replication agreement already exists: " + dn);
++        }
++
++        logger.warn("Removing existing replication agreement: " + dn);
++
++        try {
++            connection.delete(dn);
++        } catch (LDAPException e) {
++            logger.error("Unable to delete " + dn + ": " + e.getMessage(), e);
++            throw e;
++        }
++
++        logger.warn("Adding new replication agreement: " + dn);
++
++        try {
++            connection.add(entry);
++        } catch (LDAPException e) {
++            logger.error("Unable to add " + dn + ": " + e.getMessage(), e);
++            throw e;
+         }
+     }
+ 
+-- 
+2.31.1
+
diff --git a/SPECS/pki-core.spec b/SPECS/pki-core.spec
index aa5c113..bf8aa96 100644
--- a/SPECS/pki-core.spec
+++ b/SPECS/pki-core.spec
@@ -13,7 +13,7 @@ License:          GPLv2 and LGPLv2
 # For development (i.e. unsupported) releases, use x.y.z-0.n.<phase>.
 # For official (i.e. supported) releases, use x.y.z-r where r >=1.
 Version:          10.11.2
-Release:          2%{?_timestamp}%{?_commit_id}%{?dist}
+Release:          4%{?_timestamp}%{?_commit_id}%{?dist}
 #global           _phase -alpha1
 
 # To create a tarball from a version tag:
@@ -31,6 +31,8 @@ Source: https://github.com/dogtagpki/pki/archive/v%{version}%{?_phase}/pki-%{ver
 #     > pki-VERSION-RELEASE.patch
 # Patch: pki-VERSION-RELEASE.patch
 Patch1: 0001-Fix-Bug-2001576-pki-instance-creation-fails-for-IPA-.patch
+Patch2: 0001-Fix-replica-reinstallation.patch
+Patch3: 0001-Fix-AJP-connector-migration.patch
 
 # md2man isn't available on i686. Additionally, we aren't generally multi-lib
 # compatible (https://fedoraproject.org/wiki/Packaging:Java)
@@ -1363,6 +1365,12 @@ fi
 
 ################################################################################
 %changelog
+* Tue Jan 04 2022 Red Hat PKI Team <rhcs-maint@redhat.com> 10.11.2-4
+- Bug 2029023 - Fix AJP connector migration
+
+* Tue Dec 14 2021 Red Hat PKI Team <rhcs-maint@redhat.com> 10.11.2-3
+- Bug 2024676 - Unable to reinstall PKI clone
+
 * Fri Sep 24 2021 Red Hat PKI Team <rhcs-maint@redhat.com> 10.11.2-2
 - Bug 2001576 - pki instance creation fails for IPA in FIPS mode