diff --git a/SOURCES/php-CVE-2019-10192.patch b/SOURCES/php-CVE-2019-10192.patch
deleted file mode 100644
index 82358e4..0000000
--- a/SOURCES/php-CVE-2019-10192.patch
+++ /dev/null
@@ -1,117 +0,0 @@
-From 9f13b2bd4967334b1701c6eccdf53760cb13f79e Mon Sep 17 00:00:00 2001
-From: John Sully <john@csquare.ca>
-Date: Thu, 14 Mar 2019 14:02:16 -0400
-Subject: [PATCH] Fix hyperloglog corruption
-
----
- src/hyperloglog.c | 6 ++++++
- 1 file changed, 6 insertions(+)
-
-diff --git a/src/hyperloglog.c b/src/hyperloglog.c
-index fc21ea0065d..e993bf26e1d 100644
---- a/src/hyperloglog.c
-+++ b/src/hyperloglog.c
-@@ -614,6 +614,10 @@ int hllSparseToDense(robj *o) {
-         } else {
-             runlen = HLL_SPARSE_VAL_LEN(p);
-             regval = HLL_SPARSE_VAL_VALUE(p);
-+            if ((runlen + idx) > HLL_REGISTERS) {
-+                sdsfree(dense);
-+                return C_ERR;
-+            }
-             while(runlen--) {
-                 HLL_DENSE_SET_REGISTER(hdr->registers,idx,regval);
-                 idx++;
-@@ -1088,6 +1092,8 @@ int hllMerge(uint8_t *max, robj *hll) {
-             } else {
-                 runlen = HLL_SPARSE_VAL_LEN(p);
-                 regval = HLL_SPARSE_VAL_VALUE(p);
-+                if ((runlen + i) > HLL_REGISTERS)
-+                    return C_ERR;
-                 while(runlen--) {
-                     if (regval > max[i]) max[i] = regval;
-                     i++;
-From e216ceaf0e099536fe3658a29dcb725d812364e0 Mon Sep 17 00:00:00 2001
-From: antirez <antirez@gmail.com>
-Date: Fri, 15 Mar 2019 17:16:06 +0100
-Subject: [PATCH] HyperLogLog: handle wrong offset in the base case.
-
----
- src/hyperloglog.c | 8 ++------
- 1 file changed, 2 insertions(+), 6 deletions(-)
-
-diff --git a/src/hyperloglog.c b/src/hyperloglog.c
-index 526510b43b9..1e7ce3dceb7 100644
---- a/src/hyperloglog.c
-+++ b/src/hyperloglog.c
-@@ -614,10 +614,7 @@ int hllSparseToDense(robj *o) {
-         } else {
-             runlen = HLL_SPARSE_VAL_LEN(p);
-             regval = HLL_SPARSE_VAL_VALUE(p);
--            if ((runlen + idx) > HLL_REGISTERS) {
--                sdsfree(dense);
--                return C_ERR;
--            }
-+            if ((runlen + idx) > HLL_REGISTERS) break; /* Overflow. */
-             while(runlen--) {
-                 HLL_DENSE_SET_REGISTER(hdr->registers,idx,regval);
-                 idx++;
-@@ -1097,8 +1094,7 @@ int hllMerge(uint8_t *max, robj *hll) {
-             } else {
-                 runlen = HLL_SPARSE_VAL_LEN(p);
-                 regval = HLL_SPARSE_VAL_VALUE(p);
--                if ((runlen + i) > HLL_REGISTERS)
--                    return C_ERR;
-+                if ((runlen + i) > HLL_REGISTERS) break; /* Overflow. */
-                 while(runlen--) {
-                     if (regval > max[i]) max[i] = regval;
-                     i++;
-From 4208666797b5831eefc022ae46ab5747200cd671 Mon Sep 17 00:00:00 2001
-From: antirez <antirez@gmail.com>
-Date: Fri, 15 Mar 2019 13:52:29 +0100
-Subject: [PATCH] HyperLogLog: dense/sparse repr parsing fuzz test.
-
----
- tests/unit/hyperloglog.tcl | 29 +++++++++++++++++++++++++++++
- 1 file changed, 29 insertions(+)
-
-diff --git a/tests/unit/hyperloglog.tcl b/tests/unit/hyperloglog.tcl
-index 7d36b7a351f..6a9c47b11c5 100644
---- a/tests/unit/hyperloglog.tcl
-+++ b/tests/unit/hyperloglog.tcl
-@@ -115,6 +115,35 @@ start_server {tags {"hll"}} {
-         set e
-     } {*WRONGTYPE*}
- 
-+    test {Fuzzing dense/sparse encoding: Redis should always detect errors} {
-+        for {set j 0} {$j < 10000} {incr j} {
-+            r del hll
-+            set items {}
-+            set numitems [randomInt 3000]
-+            for {set i 0} {$i < $numitems} {incr i} {
-+                lappend items [expr {rand()}]
-+            }
-+            r pfadd hll {*}$items
-+
-+            # Corrupt it in some random way.
-+            for {set i 0} {$i < 5} {incr i} {
-+                set len [r strlen hll]
-+                set pos [randomInt $len]
-+                set byte [randstring 1 1 binary]
-+                r setrange hll $pos $byte
-+                # Don't modify more bytes 50% of times
-+                if {rand() < 0.5} break
-+            }
-+
-+            # Use the hyperloglog to check if it crashes
-+            # Redis in some way.
-+            catch {
-+                r pfcount hll
-+                r pfdebug getreg hll
-+            }
-+        }
-+    }
-+
-     test {PFADD, PFCOUNT, PFMERGE type checking works} {
-         r set foo bar
-         catch {r pfadd foo 1} e
diff --git a/SOURCES/php-CVE-2019-10193.patch b/SOURCES/php-CVE-2019-10193.patch
deleted file mode 100644
index 967625c..0000000
--- a/SOURCES/php-CVE-2019-10193.patch
+++ /dev/null
@@ -1,27 +0,0 @@
-From a4b90be9fcd5e1668ac941cabce3b1ab38dbe326 Mon Sep 17 00:00:00 2001
-From: antirez <antirez@gmail.com>
-Date: Fri, 15 Mar 2019 17:10:16 +0100
-Subject: [PATCH] HyperLogLog: enlarge reghisto variable for safety.
-
----
- src/hyperloglog.c | 7 ++++++-
- 1 file changed, 6 insertions(+), 1 deletion(-)
-
-diff --git a/src/hyperloglog.c b/src/hyperloglog.c
-index e993bf26e1d..526510b43b9 100644
---- a/src/hyperloglog.c
-+++ b/src/hyperloglog.c
-@@ -1017,7 +1017,12 @@ uint64_t hllCount(struct hllhdr *hdr, int *invalid) {
-     double m = HLL_REGISTERS;
-     double E;
-     int j;
--    int reghisto[HLL_Q+2] = {0};
-+    /* Note that reghisto could be just HLL_Q+1, becuase this is the
-+     * maximum frequency of the "000...1" sequence the hash function is
-+     * able to return. However it is slow to check for sanity of the
-+     * input: instead we history array at a safe size: overflows will
-+     * just write data to wrong, but correctly allocated, places. */
-+    int reghisto[64] = {0};
- 
-     /* Compute register histogram */
-     if (hdr->encoding == HLL_DENSE) {
diff --git a/SOURCES/redis-CVE-2019-10192.patch b/SOURCES/redis-CVE-2019-10192.patch
new file mode 100644
index 0000000..82358e4
--- /dev/null
+++ b/SOURCES/redis-CVE-2019-10192.patch
@@ -0,0 +1,117 @@
+From 9f13b2bd4967334b1701c6eccdf53760cb13f79e Mon Sep 17 00:00:00 2001
+From: John Sully <john@csquare.ca>
+Date: Thu, 14 Mar 2019 14:02:16 -0400
+Subject: [PATCH] Fix hyperloglog corruption
+
+---
+ src/hyperloglog.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+diff --git a/src/hyperloglog.c b/src/hyperloglog.c
+index fc21ea0065d..e993bf26e1d 100644
+--- a/src/hyperloglog.c
++++ b/src/hyperloglog.c
+@@ -614,6 +614,10 @@ int hllSparseToDense(robj *o) {
+         } else {
+             runlen = HLL_SPARSE_VAL_LEN(p);
+             regval = HLL_SPARSE_VAL_VALUE(p);
++            if ((runlen + idx) > HLL_REGISTERS) {
++                sdsfree(dense);
++                return C_ERR;
++            }
+             while(runlen--) {
+                 HLL_DENSE_SET_REGISTER(hdr->registers,idx,regval);
+                 idx++;
+@@ -1088,6 +1092,8 @@ int hllMerge(uint8_t *max, robj *hll) {
+             } else {
+                 runlen = HLL_SPARSE_VAL_LEN(p);
+                 regval = HLL_SPARSE_VAL_VALUE(p);
++                if ((runlen + i) > HLL_REGISTERS)
++                    return C_ERR;
+                 while(runlen--) {
+                     if (regval > max[i]) max[i] = regval;
+                     i++;
+From e216ceaf0e099536fe3658a29dcb725d812364e0 Mon Sep 17 00:00:00 2001
+From: antirez <antirez@gmail.com>
+Date: Fri, 15 Mar 2019 17:16:06 +0100
+Subject: [PATCH] HyperLogLog: handle wrong offset in the base case.
+
+---
+ src/hyperloglog.c | 8 ++------
+ 1 file changed, 2 insertions(+), 6 deletions(-)
+
+diff --git a/src/hyperloglog.c b/src/hyperloglog.c
+index 526510b43b9..1e7ce3dceb7 100644
+--- a/src/hyperloglog.c
++++ b/src/hyperloglog.c
+@@ -614,10 +614,7 @@ int hllSparseToDense(robj *o) {
+         } else {
+             runlen = HLL_SPARSE_VAL_LEN(p);
+             regval = HLL_SPARSE_VAL_VALUE(p);
+-            if ((runlen + idx) > HLL_REGISTERS) {
+-                sdsfree(dense);
+-                return C_ERR;
+-            }
++            if ((runlen + idx) > HLL_REGISTERS) break; /* Overflow. */
+             while(runlen--) {
+                 HLL_DENSE_SET_REGISTER(hdr->registers,idx,regval);
+                 idx++;
+@@ -1097,8 +1094,7 @@ int hllMerge(uint8_t *max, robj *hll) {
+             } else {
+                 runlen = HLL_SPARSE_VAL_LEN(p);
+                 regval = HLL_SPARSE_VAL_VALUE(p);
+-                if ((runlen + i) > HLL_REGISTERS)
+-                    return C_ERR;
++                if ((runlen + i) > HLL_REGISTERS) break; /* Overflow. */
+                 while(runlen--) {
+                     if (regval > max[i]) max[i] = regval;
+                     i++;
+From 4208666797b5831eefc022ae46ab5747200cd671 Mon Sep 17 00:00:00 2001
+From: antirez <antirez@gmail.com>
+Date: Fri, 15 Mar 2019 13:52:29 +0100
+Subject: [PATCH] HyperLogLog: dense/sparse repr parsing fuzz test.
+
+---
+ tests/unit/hyperloglog.tcl | 29 +++++++++++++++++++++++++++++
+ 1 file changed, 29 insertions(+)
+
+diff --git a/tests/unit/hyperloglog.tcl b/tests/unit/hyperloglog.tcl
+index 7d36b7a351f..6a9c47b11c5 100644
+--- a/tests/unit/hyperloglog.tcl
++++ b/tests/unit/hyperloglog.tcl
+@@ -115,6 +115,35 @@ start_server {tags {"hll"}} {
+         set e
+     } {*WRONGTYPE*}
+ 
++    test {Fuzzing dense/sparse encoding: Redis should always detect errors} {
++        for {set j 0} {$j < 10000} {incr j} {
++            r del hll
++            set items {}
++            set numitems [randomInt 3000]
++            for {set i 0} {$i < $numitems} {incr i} {
++                lappend items [expr {rand()}]
++            }
++            r pfadd hll {*}$items
++
++            # Corrupt it in some random way.
++            for {set i 0} {$i < 5} {incr i} {
++                set len [r strlen hll]
++                set pos [randomInt $len]
++                set byte [randstring 1 1 binary]
++                r setrange hll $pos $byte
++                # Don't modify more bytes 50% of times
++                if {rand() < 0.5} break
++            }
++
++            # Use the hyperloglog to check if it crashes
++            # Redis in some way.
++            catch {
++                r pfcount hll
++                r pfdebug getreg hll
++            }
++        }
++    }
++
+     test {PFADD, PFCOUNT, PFMERGE type checking works} {
+         r set foo bar
+         catch {r pfadd foo 1} e
diff --git a/SOURCES/redis-CVE-2019-10193.patch b/SOURCES/redis-CVE-2019-10193.patch
new file mode 100644
index 0000000..967625c
--- /dev/null
+++ b/SOURCES/redis-CVE-2019-10193.patch
@@ -0,0 +1,27 @@
+From a4b90be9fcd5e1668ac941cabce3b1ab38dbe326 Mon Sep 17 00:00:00 2001
+From: antirez <antirez@gmail.com>
+Date: Fri, 15 Mar 2019 17:10:16 +0100
+Subject: [PATCH] HyperLogLog: enlarge reghisto variable for safety.
+
+---
+ src/hyperloglog.c | 7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+diff --git a/src/hyperloglog.c b/src/hyperloglog.c
+index e993bf26e1d..526510b43b9 100644
+--- a/src/hyperloglog.c
++++ b/src/hyperloglog.c
+@@ -1017,7 +1017,12 @@ uint64_t hllCount(struct hllhdr *hdr, int *invalid) {
+     double m = HLL_REGISTERS;
+     double E;
+     int j;
+-    int reghisto[HLL_Q+2] = {0};
++    /* Note that reghisto could be just HLL_Q+1, becuase this is the
++     * maximum frequency of the "000...1" sequence the hash function is
++     * able to return. However it is slow to check for sanity of the
++     * input: instead we history array at a safe size: overflows will
++     * just write data to wrong, but correctly allocated, places. */
++    int reghisto[64] = {0};
+ 
+     /* Compute register histogram */
+     if (hdr->encoding == HLL_DENSE) {
diff --git a/SOURCES/redis-CVE-2021-32626.patch b/SOURCES/redis-CVE-2021-32626.patch
new file mode 100644
index 0000000..6f8063b
--- /dev/null
+++ b/SOURCES/redis-CVE-2021-32626.patch
@@ -0,0 +1,120 @@
+Backported for 5.0.3
+
+
+
+From a4b813d8b844094fcd77c511af596866043b20c8 Mon Sep 17 00:00:00 2001
+From: "meir@redislabs.com" <meir@redislabs.com>
+Date: Sun, 13 Jun 2021 14:27:18 +0300
+Subject: [PATCH] Fix invalid memory write on lua stack overflow
+ {CVE-2021-32626}
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+When LUA call our C code, by default, the LUA stack has room for 20
+elements. In most cases, this is more than enough but sometimes it's not
+and the caller must verify the LUA stack size before he pushes elements.
+
+On 3 places in the code, there was no verification of the LUA stack size.
+On specific inputs this missing verification could have lead to invalid
+memory write:
+1. On 'luaReplyToRedisReply', one might return a nested reply that will
+   explode the LUA stack.
+2. On 'redisProtocolToLuaType', the Redis reply might be deep enough
+   to explode the LUA stack (notice that currently there is no such
+   command in Redis that returns such a nested reply, but modules might
+   do it)
+3. On 'ldbRedis', one might give a command with enough arguments to
+   explode the LUA stack (all the arguments will be pushed to the LUA
+   stack)
+
+This commit is solving all those 3 issues by calling 'lua_checkstack' and
+verify that there is enough room in the LUA stack to push elements. In
+case 'lua_checkstack' returns an error (there is not enough room in the
+LUA stack and it's not possible to increase the stack), we will do the
+following:
+1. On 'luaReplyToRedisReply', we will return an error to the user.
+2. On 'redisProtocolToLuaType' we will exit with panic (we assume this
+   scenario is rare because it can only happen with a module).
+3. On 'ldbRedis', we return an error.
+
+(cherry picked from commit d32a3f74f2a343846b50920e95754a955c1a10a9)
+---
+ src/scripting.c | 36 ++++++++++++++++++++++++++++++++++++
+ 1 file changed, 36 insertions(+)
+
+diff --git a/src/scripting.c b/src/scripting.c
+index db1e4d4b5f1..153b942404e 100644
+--- a/src/scripting.c
++++ b/src/scripting.c
+@@ -125,6 +125,16 @@ void sha1hex(char *digest, char *script, size_t len) {
+  */
+ 
+ char *redisProtocolToLuaType(lua_State *lua, char* reply) {
++
++    if (!lua_checkstack(lua, 5)) {
++        /*
++         * Increase the Lua stack if needed, to make sure there is enough room
++         * to push 5 elements to the stack. On failure, exit with panic.
++         * Notice that we need, in the worst case, 5 elements because redisProtocolToLuaType_Aggregate
++         * might push 5 elements to the Lua stack.*/
++        serverPanic("lua stack limit reach when parsing redis.call reply");
++    }
++
+     char *p = reply;
+ 
+     switch(*p) {
+@@ -275,6 +285,17 @@ void luaSortArray(lua_State *lua) {
+  * ------------------------------------------------------------------------- */
+ 
+ void luaReplyToRedisReply(client *c, lua_State *lua) {
++
++    if (!lua_checkstack(lua, 4)) {
++        /* Increase the Lua stack if needed to make sure there is enough room
++         * to push 4 elements to the stack. On failure, return error.
++         * Notice that we need, in the worst case, 4 elements because returning a map might
++         * require push 4 elements to the Lua stack.*/
++        addReplyErrorFormat(c, "reached lua stack limit");
++        lua_pop(lua,1); // pop the element from the stack
++        return;
++    }
++
+     int t = lua_type(lua,-1);
+ 
+     switch(t) {
+@@ -292,6 +313,9 @@ void luaReplyToRedisReply(client *c, lua_State *lua) {
+          * Error are returned as a single element table with 'err' field.
+          * Status replies are returned as single element table with 'ok'
+          * field. */
++
++        /* Handle error reply. */
++        /* we took care of the stack size on function start */
+         lua_pushstring(lua,"err");
+         lua_gettable(lua,-2);
+         t = lua_type(lua,-1);
+@@ -320,6 +344,7 @@ void luaReplyToRedisReply(client *c, lua_State *lua) {
+ 
+             lua_pop(lua,1); /* Discard the 'ok' field value we popped */
+             while(1) {
++                /* we took care of the stack size on function start */
+                 lua_pushnumber(lua,j++);
+                 lua_gettable(lua,-2);
+                 t = lua_type(lua,-1);
+@@ -2231,6 +2256,17 @@ void ldbEval(lua_State *lua, sds *argv, int argc) {
+ void ldbRedis(lua_State *lua, sds *argv, int argc) {
+     int j, saved_rc = server.lua_replicate_commands;
+ 
++    if (!lua_checkstack(lua, argc + 1)) {
++        /* Increase the Lua stack if needed to make sure there is enough room
++         * to push 'argc + 1' elements to the stack. On failure, return error.
++         * Notice that we need, in worst case, 'argc + 1' elements because we push all the arguments
++         * given by the user (without the first argument) and we also push the 'redis' global table and
++         * 'redis.call' function so:
++         * (1 (redis table)) + (1 (redis.call function)) + (argc - 1 (all arguments without the first)) = argc + 1*/
++        ldbLogRedisReply("max lua stack reached");
++        return;
++    }
++
+     lua_getglobal(lua,"redis");
+     lua_pushstring(lua,"call");
+     lua_gettable(lua,-2);       /* Stack: redis, redis.call */
diff --git a/SOURCES/redis-CVE-2021-32627.patch b/SOURCES/redis-CVE-2021-32627.patch
new file mode 100644
index 0000000..4e86892
--- /dev/null
+++ b/SOURCES/redis-CVE-2021-32627.patch
@@ -0,0 +1,775 @@
+Backported for 5.0.3
+
+
+
+From 6facfb7a103b26b9a602253a738b2130afb7c5d3 Mon Sep 17 00:00:00 2001
+From: Oran Agra <oran@redislabs.com>
+Date: Thu, 3 Jun 2021 12:10:02 +0300
+Subject: [PATCH] Fix ziplist and listpack overflows and truncations
+ (CVE-2021-32627, CVE-2021-32628)
+
+- fix possible heap corruption in ziplist and listpack resulting by trying to
+  allocate more than the maximum size of 4GB.
+- prevent ziplist (hash and zset) from reaching size of above 1GB, will be
+  converted to HT encoding, that's not a useful size.
+- prevent listpack (stream) from reaching size of above 1GB.
+- XADD will start a new listpack if the new record may cause the previous
+  listpack to grow over 1GB.
+- XADD will respond with an error if a single stream record is over 1GB
+- List type (ziplist in quicklist) was truncating strings that were over 4GB,
+  now it'll respond with an error.
+
+(cherry picked from commit 68e221a3f98a427805d31c1760b4cdf37ba810ab)
+---
+ src/geo.c                 |   5 +-
+ src/listpack.c            |   2 +-
+ src/quicklist.c           |  17 ++++-
+ src/rdb.c                 |  36 ++++++---
+ src/server.h              |   2 +-
+ src/t_hash.c              |  13 +++-
+ src/t_list.c              |  25 ++++++
+ src/t_stream.c            |  48 +++++++++---
+ src/t_zset.c              |  43 +++++++----
+ src/ziplist.c             |  17 ++++-
+ src/ziplist.h             |   1 +
+ tests/support/util.tcl    |  21 +++++
+ tests/unit/violations.tcl | 156 ++++++++++++++++++++++++++++++++++++++
+ 13 files changed, 338 insertions(+), 48 deletions(-)
+ create mode 100644 tests/unit/violations.tcl
+
+diff --git a/src/geo.c b/src/geo.c
+index f1d3f18d46e7..b94fcc1b3d70 100644
+--- a/src/geo.c
++++ b/src/geo.c
+@@ -635,7 +635,7 @@ void georadiusGeneric(client *c, int fla
+         robj *zobj;
+         zset *zs;
+         int i;
+-        size_t maxelelen = 0;
++        size_t maxelelen = 0, totelelen = 0;
+ 
+         if (returned_items) {
+             zobj = createZsetObject();
+@@ -650,13 +650,14 @@ void georadiusGeneric(client *c, int fla
+             size_t elelen = sdslen(gp->member);
+ 
+             if (maxelelen < elelen) maxelelen = elelen;
++            totelelen += elelen;
+             znode = zslInsert(zs->zsl,score,gp->member);
+             serverAssert(dictAdd(zs->dict,gp->member,&znode->score) == DICT_OK);
+             gp->member = NULL;
+         }
+ 
+         if (returned_items) {
+-            zsetConvertToZiplistIfNeeded(zobj,maxelelen);
++            zsetConvertToZiplistIfNeeded(zobj,maxelelen,totelelen);
+             setKey(c->db,storekey,zobj);
+             decrRefCount(zobj);
+             notifyKeyspaceEvent(NOTIFY_LIST,"georadiusstore",storekey,
+diff --git a/src/listpack.c b/src/listpack.c
+index e1f4d9a02ee8..cd5583ccb258 100644
+--- a/src/listpack.c
++++ b/src/listpack.c
+@@ -283,7 +283,7 @@ int lpEncodeGetType(unsigned char *ele, uint32_t size, unsigned char *intenc, ui
+     } else {
+         if (size < 64) *enclen = 1+size;
+         else if (size < 4096) *enclen = 2+size;
+-        else *enclen = 5+size;
++        else *enclen = 5+(uint64_t)size;
+         return LP_ENCODING_STRING;
+     }
+ }
+diff --git a/src/quicklist.c b/src/quicklist.c
+index 7b5484116785..d5cc758b2fa0 100644
+--- a/src/quicklist.c
++++ b/src/quicklist.c
+@@ -29,6 +29,7 @@
+  */
+ 
+ #include <string.h> /* for memcpy */
++#include "redisassert.h"
+ #include "quicklist.h"
+ #include "zmalloc.h"
+ #include "ziplist.h"
+@@ -43,11 +44,16 @@
+ #define REDIS_STATIC static
+ #endif
+ 
+-/* Optimization levels for size-based filling */
++/* Optimization levels for size-based filling.
++ * Note that the largest possible limit is 16k, so even if each record takes
++ * just one byte, it still won't overflow the 16 bit count field. */
+ static const size_t optimization_level[] = {4096, 8192, 16384, 32768, 65536};
+ 
+ /* Maximum size in bytes of any multi-element ziplist.
+- * Larger values will live in their own isolated ziplists. */
++ * Larger values will live in their own isolated ziplists.
++ * This is used only if we're limited by record count. when we're limited by
++ * size, the maximum limit is bigger, but still safe.
++ * 8k is a recommended / default size limit */
+ #define SIZE_SAFETY_LIMIT 8192
+ 
+ /* Minimum ziplist size in bytes for attempting compression. */
+@@ -441,6 +447,8 @@ REDIS_STATIC int _quicklistNodeAllowInsert(const quicklistNode *node,
+     unsigned int new_sz = node->sz + sz + ziplist_overhead;
+     if (likely(_quicklistNodeSizeMeetsOptimizationRequirement(new_sz, fill)))
+         return 1;
++    /* when we return 1 above we know that the limit is a size limit (which is
++     * safe, see comments next to optimization_level and SIZE_SAFETY_LIMIT) */
+     else if (!sizeMeetsSafetyLimit(new_sz))
+         return 0;
+     else if ((int)node->count < fill)
+@@ -460,6 +468,8 @@ REDIS_STATIC int _quicklistNodeAllowMerge(const quicklistNode *a,
+     unsigned int merge_sz = a->sz + b->sz - 11;
+     if (likely(_quicklistNodeSizeMeetsOptimizationRequirement(merge_sz, fill)))
+         return 1;
++    /* when we return 1 above we know that the limit is a size limit (which is
++     * safe, see comments next to optimization_level and SIZE_SAFETY_LIMIT) */
+     else if (!sizeMeetsSafetyLimit(merge_sz))
+         return 0;
+     else if ((int)(a->count + b->count) <= fill)
+@@ -479,6 +489,7 @@ REDIS_STATIC int _quicklistNodeAllowMerge(const quicklistNode *a,
+  * Returns 1 if new head created. */
+ int quicklistPushHead(quicklist *quicklist, void *value, size_t sz) {
+     quicklistNode *orig_head = quicklist->head;
++    assert(sz < UINT32_MAX); /* TODO: add support for quicklist nodes that are sds encoded (not zipped) */
+     if (likely(
+             _quicklistNodeAllowInsert(quicklist->head, quicklist->fill, sz))) {
+         quicklist->head->zl =
+@@ -502,6 +513,7 @@ int quicklistPushHead(quicklist *quicklist, void *value, size_t sz) {
+  * Returns 1 if new tail created. */
+ int quicklistPushTail(quicklist *quicklist, void *value, size_t sz) {
+     quicklistNode *orig_tail = quicklist->tail;
++    assert(sz < UINT32_MAX); /* TODO: add support for quicklist nodes that are sds encoded (not zipped) */
+     if (likely(
+             _quicklistNodeAllowInsert(quicklist->tail, quicklist->fill, sz))) {
+         quicklist->tail->zl =
+@@ -835,6 +847,7 @@ REDIS_STATIC void _quicklistInsert(quicklist *quicklist, quicklistEntry *entry,
+     int fill = quicklist->fill;
+     quicklistNode *node = entry->node;
+     quicklistNode *new_node = NULL;
++    assert(sz < UINT32_MAX); /* TODO: add support for quicklist nodes that are sds encoded (not zipped) */
+ 
+     if (!node) {
+         /* we have no reference node, so let's create only node in the list */
+diff --git a/src/rdb.c b/src/rdb.c
+index 3c58a1eaf7fb..c7dc724f3df6 100644
+--- a/src/rdb.c
++++ b/src/rdb.c
+@@ -1452,7 +1452,7 @@ robj *rdbLoadObject(int rdbtype, rio *rd
+     } else if (rdbtype == RDB_TYPE_ZSET_2 || rdbtype == RDB_TYPE_ZSET) {
+         /* Read list/set value. */
+         uint64_t zsetlen;
+-        size_t maxelelen = 0;
++        size_t maxelelen = 0, totelelen = 0;
+         zset *zs;
+ 
+         if ((zsetlen = rdbLoadLen(rdb,NULL)) == RDB_LENERR) return NULL;
+@@ -1479,6 +1479,7 @@ robj *rdbLoadObject(int rdbtype, rio *rd
+ 
+             /* Don't care about integer-encoded strings. */
+             if (sdslen(sdsele) > maxelelen) maxelelen = sdslen(sdsele);
++            totelelen += sdslen(sdsele);
+ 
+             znode = zslInsert(zs->zsl,score,sdsele);
+             dictAdd(zs->dict,sdsele,&znode->score);
+@@ -1486,8 +1487,11 @@ robj *rdbLoadObject(int rdbtype, rio *rd
+ 
+         /* Convert *after* loading, since sorted sets are not stored ordered. */
+         if (zsetLength(o) <= server.zset_max_ziplist_entries &&
+-            maxelelen <= server.zset_max_ziplist_value)
+-                zsetConvert(o,OBJ_ENCODING_ZIPLIST);
++            maxelelen <= server.zset_max_ziplist_value &&
++            ziplistSafeToAdd(NULL, totelelen))
++        {
++            zsetConvert(o,OBJ_ENCODING_ZIPLIST);
++        }
+     } else if (rdbtype == RDB_TYPE_HASH) {
+         uint64_t len;
+         int ret;
+@@ -1511,21 +1515,25 @@ robj *rdbLoadObject(int rdbtype, rio *rd
+             if ((value = rdbGenericLoadStringObject(rdb,RDB_LOAD_SDS,NULL))
+                 == NULL) return NULL;
+ 
+-            /* Add pair to ziplist */
+-            o->ptr = ziplistPush(o->ptr, (unsigned char*)field,
+-                    sdslen(field), ZIPLIST_TAIL);
+-            o->ptr = ziplistPush(o->ptr, (unsigned char*)value,
+-                    sdslen(value), ZIPLIST_TAIL);
+-
+             /* Convert to hash table if size threshold is exceeded */
+             if (sdslen(field) > server.hash_max_ziplist_value ||
+-                sdslen(value) > server.hash_max_ziplist_value)
++                sdslen(value) > server.hash_max_ziplist_value ||
++                !ziplistSafeToAdd(o->ptr, sdslen(field)+sdslen(value)))
+             {
+-                sdsfree(field);
+-                sdsfree(value);
+                 hashTypeConvert(o, OBJ_ENCODING_HT);
++                ret = dictAdd((dict*)o->ptr, field, value);
++                if (ret == DICT_ERR) {
++                    rdbExitReportCorruptRDB("Duplicate hash fields detected");
++                }
+                 break;
+             }
++
++            /* Add pair to ziplist */
++            o->ptr = ziplistPush(o->ptr, (unsigned char*)field,
++                    sdslen(field), ZIPLIST_TAIL);
++            o->ptr = ziplistPush(o->ptr, (unsigned char*)value,
++                    sdslen(value), ZIPLIST_TAIL);
++
+             sdsfree(field);
+             sdsfree(value);
+         }
+@@ -1594,6 +1602,10 @@ robj *rdbLoadObject(int rdbtype, rio *rd
+                     while ((zi = zipmapNext(zi, &fstr, &flen, &vstr, &vlen)) != NULL) {
+                         if (flen > maxlen) maxlen = flen;
+                         if (vlen > maxlen) maxlen = vlen;
++                        if (!ziplistSafeToAdd(zl, (size_t)flen + vlen)) {
++                            rdbExitReportCorruptRDB("Hash zipmap too big (%u)", flen);
++                        }
++
+                         zl = ziplistPush(zl, fstr, flen, ZIPLIST_TAIL);
+                         zl = ziplistPush(zl, vstr, vlen, ZIPLIST_TAIL);
+                     }
+diff --git a/src/server.h b/src/server.h
+index ca868939cf6d..164a82271f44 100644
+--- a/src/server.h
++++ b/src/server.h
+@@ -1677,7 +1677,7 @@ unsigned char *zzlFirstInRange(unsigned char *zl, zrangespec *range);
+ unsigned char *zzlLastInRange(unsigned char *zl, zrangespec *range);
+ unsigned long zsetLength(const robj *zobj);
+ void zsetConvert(robj *zobj, int encoding);
+-void zsetConvertToZiplistIfNeeded(robj *zobj, size_t maxelelen);
++void zsetConvertToZiplistIfNeeded(robj *zobj, size_t maxelelen, size_t totelelen);
+ int zsetScore(robj *zobj, sds member, double *score);
+ unsigned long zslGetRank(zskiplist *zsl, double score, sds o);
+ int zsetAdd(robj *zobj, double score, sds ele, int *flags, double *newscore);
+diff --git a/src/t_hash.c b/src/t_hash.c
+index 0ca152df78cc..109522c1322f 100644
+--- a/src/t_hash.c
++++ b/src/t_hash.c
+@@ -39,17 +39,22 @@
+  * as their string length can be queried in constant time. */
+ void hashTypeTryConversion(robj *o, robj **argv, int start, int end) {
+     int i;
++    size_t sum = 0;
+ 
+     if (o->encoding != OBJ_ENCODING_ZIPLIST) return;
+ 
+     for (i = start; i <= end; i++) {
+-        if (sdsEncodedObject(argv[i]) &&
+-            sdslen(argv[i]->ptr) > server.hash_max_ziplist_value)
+-        {
++        if (!sdsEncodedObject(argv[i]))
++            continue;
++        size_t len = sdslen(argv[i]->ptr);
++        if (len > server.hash_max_ziplist_value) {
+             hashTypeConvert(o, OBJ_ENCODING_HT);
+-            break;
++            return;
+         }
++        sum += len;
+     }
++    if (!ziplistSafeToAdd(o->ptr, sum))
++        hashTypeConvert(o, OBJ_ENCODING_HT);
+ }
+ 
+ /* Get the value from a ziplist encoded hash, identified by field.
+diff --git a/src/t_list.c b/src/t_list.c
+index de417f4705f4..67541554f616 100644
+--- a/src/t_list.c
++++ b/src/t_list.c
+@@ -29,6 +29,8 @@
+ 
+ #include "server.h"
+ 
++#define LIST_MAX_ITEM_SIZE ((1ull<<32)-1024)
++
+ /*-----------------------------------------------------------------------------
+  * List API
+  *----------------------------------------------------------------------------*/
+@@ -196,6 +198,14 @@ void listTypeConvert(robj *subject, int enc) {
+ 
+ void pushGenericCommand(client *c, int where) {
+     int j, pushed = 0;
++
++    for (j = 2; j < c->argc; j++) {
++        if (sdslen(c->argv[j]->ptr) > LIST_MAX_ITEM_SIZE) {
++            addReplyError(c, "Element too large");
++            return;
++        }
++    }
++
+     robj *lobj = lookupKeyWrite(c->db,c->argv[1]);
+ 
+     if (lobj && lobj->type != OBJ_LIST) {
+@@ -277,6 +287,11 @@ void linsertCommand(client *c) {
+         return;
+     }
+ 
++    if (sdslen(c->argv[4]->ptr) > LIST_MAX_ITEM_SIZE) {
++        addReplyError(c, "Element too large");
++        return;
++    }
++
+     if ((subject = lookupKeyWriteOrReply(c,c->argv[1],shared.czero)) == NULL ||
+         checkType(c,subject,OBJ_LIST)) return;
+ 
+@@ -344,6 +359,11 @@ void lsetCommand(client *c) {
+     long index;
+     robj *value = c->argv[3];
+ 
++    if (sdslen(value->ptr) > LIST_MAX_ITEM_SIZE) {
++        addReplyError(c, "Element too large");
++        return;
++    }
++
+     if ((getLongFromObjectOrReply(c, c->argv[2], &index, NULL) != C_OK))
+         return;
+ 
+@@ -493,6 +513,11 @@ void lremCommand(client *c) {
+     long toremove;
+     long removed = 0;
+ 
++    if (sdslen(obj->ptr) > LIST_MAX_ITEM_SIZE) {
++        addReplyError(c, "Element too large");
++        return;
++    }
++
+     if ((getLongFromObjectOrReply(c, c->argv[2], &toremove, NULL) != C_OK))
+         return;
+ 
+diff --git a/src/t_stream.c b/src/t_stream.c
+index d7754985dd03..e7263d68a28f 100644
+--- a/src/t_stream.c
++++ b/src/t_stream.c
+@@ -40,6 +40,12 @@
+ #define STREAM_ITEM_FLAG_DELETED (1<<0)     /* Entry is delted. Skip it. */
+ #define STREAM_ITEM_FLAG_SAMEFIELDS (1<<1)  /* Same fields as master entry. */
+ 
++/* Don't let listpacks grow too big, even if the user config allows it.
++ * doing so can lead to an overflow (trying to store more than 32bit length
++ * into the listpack header), or actually an assertion since lpInsert
++ * will return NULL. */
++#define STREAM_LISTPACK_MAX_SIZE (1<<30)
++
+ void streamFreeCG(streamCG *cg);
+ void streamFreeNACK(streamNACK *na);
+ size_t streamReplyWithRangeFromConsumerPEL(client *c, stream *s, streamID *start, streamID *end, size_t count, streamConsumer *consumer);
+@@ -170,12 +176,31 @@ int streamCompareID(streamID *a, streamI
+  *
+  * The function returns C_OK if the item was added, this is always true
+  * if the ID was generated by the function. However the function may return
+- * C_ERR if an ID was given via 'use_id', but adding it failed since the
+- * current top ID is greater or equal. */
++ * C_ERR in several cases:
++ * 1. If an ID was given via 'use_id', but adding it failed since the
++ *    current top ID is greater or equal. errno will be set to EDOM.
++ * 2. If a size of a single element or the sum of the elements is too big to
++ *    be stored into the stream. errno will be set to ERANGE. */
+ int streamAppendItem(stream *s, robj **argv, int64_t numfields, streamID *added_id, streamID *use_id) {
+     /* If an ID was given, check that it's greater than the last entry ID
+      * or return an error. */
+-    if (use_id && streamCompareID(use_id,&s->last_id) <= 0) return C_ERR;
++    if (use_id && streamCompareID(use_id,&s->last_id) <= 0) {
++        errno = EDOM;
++        return C_ERR;
++    }
++
++    /* Avoid overflow when trying to add an element to the stream (listpack
++     * can only host up to 32bit length sttrings, and also a total listpack size
++     * can't be bigger than 32bit length. */
++    size_t totelelen = 0;
++    for (int64_t i = 0; i < numfields*2; i++) {
++        sds ele = argv[i]->ptr;
++        totelelen += sdslen(ele);
++    }
++    if (totelelen > STREAM_LISTPACK_MAX_SIZE) {
++        errno = ERANGE;
++        return C_ERR;
++    }
+ 
+     /* Add the new entry. */
+     raxIterator ri;
+@@ -241,9 +266,10 @@ int streamAppendItem(stream *s, robj **a
+      * if we need to switch to the next one. 'lp' will be set to NULL if
+      * the current node is full. */
+     if (lp != NULL) {
+-        if (server.stream_node_max_bytes &&
+-            lp_bytes > server.stream_node_max_bytes)
+-        {
++        size_t node_max_bytes = server.stream_node_max_bytes;
++        if (node_max_bytes == 0 || node_max_bytes > STREAM_LISTPACK_MAX_SIZE)
++            node_max_bytes = STREAM_LISTPACK_MAX_SIZE;
++        if (lp_bytes + totelelen >= node_max_bytes) {
+             lp = NULL;
+         } else if (server.stream_node_max_entries) {
+             int64_t count = lpGetInteger(lpFirst(lp));
+@@ -1224,11 +1250,13 @@ void xaddCommand(client *c) {
+ 
+     /* Append using the low level function and return the ID. */
+     if (streamAppendItem(s,c->argv+field_pos,(c->argc-field_pos)/2,
+-        &id, id_given ? &id : NULL)
+-        == C_ERR)
++        &id, id_given ? &id : NULL) == C_ERR)
+     {
+-        addReplyError(c,"The ID specified in XADD is equal or smaller than the "
+-                        "target stream top item");
++        if (errno == EDOM)
++            addReplyError(c,"The ID specified in XADD is equal or smaller than "
++                            "the target stream top item");
++        else
++            addReplyError(c,"Elements are too large to be stored");
+         return;
+     }
+     addReplyStreamID(c,&id);
+diff --git a/src/t_zset.c b/src/t_zset.c
+index 56ea39607b52..989d5855e1ea 100644
+--- a/src/t_zset.c
++++ b/src/t_zset.c
+@@ -1237,15 +1237,18 @@ void zsetConvert(robj *zobj, int encodin
+ }
+ 
+ /* Convert the sorted set object into a ziplist if it is not already a ziplist
+- * and if the number of elements and the maximum element size is within the
+- * expected ranges. */
+-void zsetConvertToZiplistIfNeeded(robj *zobj, size_t maxelelen) {
++ * and if the number of elements and the maximum element size and total elements size
++ * are within the expected ranges. */
++void zsetConvertToZiplistIfNeeded(robj *zobj, size_t maxelelen, size_t totelelen) {
+     if (zobj->encoding == OBJ_ENCODING_ZIPLIST) return;
+     zset *zset = zobj->ptr;
+ 
+     if (zset->zsl->length <= server.zset_max_ziplist_entries &&
+-        maxelelen <= server.zset_max_ziplist_value)
+-            zsetConvert(zobj,OBJ_ENCODING_ZIPLIST);
++        maxelelen <= server.zset_max_ziplist_value &&
++        ziplistSafeToAdd(NULL, totelelen))
++    {
++        zsetConvert(zobj,OBJ_ENCODING_ZIPLIST);
++    }
+ }
+ 
+ /* Return (by reference) the score of the specified member of the sorted set
+@@ -1354,21 +1357,28 @@ int zsetAdd(robj *zobj, double score, sd
+             }
+             return 1;
+         } else if (!xx) {
+-            /* Optimize: check if the element is too large or the list
++            /* check if the element is too large or the list
+              * becomes too long *before* executing zzlInsert. */
+-            zobj->ptr = zzlInsert(zobj->ptr,ele,score);
+-            if (zzlLength(zobj->ptr) > server.zset_max_ziplist_entries)
+-                zsetConvert(zobj,OBJ_ENCODING_SKIPLIST);
+-            if (sdslen(ele) > server.zset_max_ziplist_value)
++            if (zzlLength(zobj->ptr)+1 > server.zset_max_ziplist_entries ||
++                sdslen(ele) > server.zset_max_ziplist_value ||
++                !ziplistSafeToAdd(zobj->ptr, sdslen(ele)))
++            {
+                 zsetConvert(zobj,OBJ_ENCODING_SKIPLIST);
+-            if (newscore) *newscore = score;
+-            *flags |= ZADD_ADDED;
+-            return 1;
++            } else {
++                zobj->ptr = zzlInsert(zobj->ptr,ele,score);
++                if (newscore) *newscore = score;
++                *flags |= ZADD_ADDED;
++                return 1;
++            }
+         } else {
+             *flags |= ZADD_NOP;
+             return 1;
+         }
+-    } else if (zobj->encoding == OBJ_ENCODING_SKIPLIST) {
++    }
++
++    /* Note that the above block handling ziplist would have either returned or
++     * converted the key to skiplist. */
++    if (zobj->encoding == OBJ_ENCODING_SKIPLIST) {
+         zset *zs = zobj->ptr;
+         zskiplistNode *znode;
+         dictEntry *de;
+@@ -2180,7 +2190,7 @@ void zunionInterGenericCommand(client *c
+     zsetopsrc *src;
+     zsetopval zval;
+     sds tmp;
+-    size_t maxelelen = 0;
++    size_t maxelelen = 0, totelelen = 0;
+     robj *dstobj;
+     zset *dstzset;
+     zskiplistNode *znode;
+@@ -2304,6 +2314,7 @@ void zunionInterGenericCommand(client *c
+                     tmp = zuiNewSdsFromValue(&zval);
+                     znode = zslInsert(dstzset->zsl,score,tmp);
+                     dictAdd(dstzset->dict,tmp,&znode->score);
++                    totelelen += sdslen(tmp);
+                     if (sdslen(tmp) > maxelelen) maxelelen = sdslen(tmp);
+                 }
+             }
+@@ -2340,6 +2351,7 @@ void zunionInterGenericCommand(client *c
+                     /* Remember the longest single element encountered,
+                      * to understand if it's possible to convert to ziplist
+                      * at the end. */
++                     totelelen += sdslen(tmp);
+                      if (sdslen(tmp) > maxelelen) maxelelen = sdslen(tmp);
+                     /* Update the element with its initial score. */
+                     dictSetKey(accumulator, de, tmp);
+@@ -2380,7 +2392,7 @@ void zunionInterGenericCommand(client *c
+     if (dbDelete(c->db,dstkey))
+         touched = 1;
+     if (dstzset->zsl->length) {
+-        zsetConvertToZiplistIfNeeded(dstobj,maxelelen);
++        zsetConvertToZiplistIfNeeded(dstobj,maxelelen,totelelen);
+         dbAdd(c->db,dstkey,dstobj);
+         addReplyLongLong(c,zsetLength(dstobj));
+         signalModifiedKey(c->db,dstkey);
+diff --git a/src/ziplist.c b/src/ziplist.c
+index dbd804b11dfc..1a8566698972 100644
+--- a/src/ziplist.c
++++ b/src/ziplist.c
+@@ -265,6 +265,17 @@
+         ZIPLIST_LENGTH(zl) = intrev16ifbe(intrev16ifbe(ZIPLIST_LENGTH(zl))+incr); \
+ }
+ 
++/* Don't let ziplists grow over 1GB in any case, don't wanna risk overflow in
++ * zlbytes*/
++#define ZIPLIST_MAX_SAFETY_SIZE (1<<30)
++int ziplistSafeToAdd(unsigned char* zl, size_t add) {
++    size_t len = zl? ziplistBlobLen(zl): 0;
++    if (len + add > ZIPLIST_MAX_SAFETY_SIZE)
++        return 0;
++    return 1;
++}
++
++
+ /* We use this function to receive information about a ziplist entry.
+  * Note that this is not how the data is actually encoded, is just what we
+  * get filled by a function in order to operate more easily. */
+@@ -586,7 +597,8 @@ unsigned char *ziplistNew(void) {
+ }
+ 
+ /* Resize the ziplist. */
+-unsigned char *ziplistResize(unsigned char *zl, unsigned int len) {
++unsigned char *ziplistResize(unsigned char *zl, size_t len) {
++    assert(len < UINT32_MAX);
+     zl = zrealloc(zl,len);
+     ZIPLIST_BYTES(zl) = intrev32ifbe(len);
+     zl[len-1] = ZIP_END;
+@@ -898,6 +910,9 @@ unsigned char *ziplistMerge(unsigned char **first, unsigned char **second) {
+     /* Combined zl length should be limited within UINT16_MAX */
+     zllength = zllength < UINT16_MAX ? zllength : UINT16_MAX;
+ 
++    /* larger values can't be stored into ZIPLIST_BYTES */
++    assert(zlbytes < UINT32_MAX);
++
+     /* Save offset positions before we start ripping memory apart. */
+     size_t first_offset = intrev32ifbe(ZIPLIST_TAIL_OFFSET(*first));
+     size_t second_offset = intrev32ifbe(ZIPLIST_TAIL_OFFSET(*second));
+diff --git a/src/ziplist.h b/src/ziplist.h
+index 964a47f6dc29..f6ba6c8be47d 100644
+--- a/src/ziplist.h
++++ b/src/ziplist.h
+@@ -49,6 +49,7 @@ unsigned char *ziplistFind(unsigned char *p, unsigned char *vstr, unsigned int v
+ unsigned int ziplistLen(unsigned char *zl);
+ size_t ziplistBlobLen(unsigned char *zl);
+ void ziplistRepr(unsigned char *zl);
++int ziplistSafeToAdd(unsigned char* zl, size_t add);
+ 
+ #ifdef REDIS_TEST
+ int ziplistTest(int argc, char *argv[]);
+diff --git a/tests/support/util.tcl b/tests/support/util.tcl
+index 74f491e483a5..46b56cc2822a 100644
+--- a/tests/support/util.tcl
++++ b/tests/support/util.tcl
+@@ -99,6 +99,27 @@ proc wait_for_ofs_sync {r1 r2} {
+     }
+ }
+ 
++# count current log lines in server's stdout
++proc count_log_lines {srv_idx} {
++    set _ [string trim [exec wc -l < [srv $srv_idx stdout]]]
++}
++
++# returns the number of times a line with that pattern appears in a file
++proc count_message_lines {file pattern} {
++    set res 0
++    # exec fails when grep exists with status other than 0 (when the patter wasn't found)
++    catch {
++        set res [string trim [exec grep $pattern $file 2> /dev/null | wc -l]]
++    }
++    return $res
++}
++
++# returns the number of times a line with that pattern appears in the log
++proc count_log_message {srv_idx pattern} {
++    set stdout [srv $srv_idx stdout]
++    return [count_message_lines $stdout $pattern]
++}
++
+ # Random integer between 0 and max (excluded).
+ proc randomInt {max} {
+     expr {int(rand()*$max)}
+diff --git a/tests/unit/violations.tcl b/tests/unit/violations.tcl
+new file mode 100644
+index 000000000000..d87b9236528e
+--- /dev/null
++++ b/tests/unit/violations.tcl
+@@ -0,0 +1,156 @@
++# These tests consume massive amounts of memory, and are not
++# suitable to be executed as part of the normal test suite
++set ::str500 [string repeat x 500000000] ;# 500mb
++
++# Utility function to write big argument into redis client connection
++proc write_big_bulk {size} {
++    r write "\$$size\r\n"
++    while {$size >= 500000000} {
++        r write $::str500
++        incr size -500000000
++    }
++    if {$size > 0} {
++        r write [string repeat x $size]
++    }
++    r write "\r\n"
++}
++
++# One XADD with one huge 5GB field
++# Expected to fail resulting in an empty stream
++start_server [list overrides [list save ""] ] {
++    test {XADD one huge field} {
++        r config set proto-max-bulk-len 10000000000 ;#10gb
++        r config set client-query-buffer-limit 10000000000 ;#10gb
++        r write "*5\r\n\$4\r\nXADD\r\n\$2\r\nS1\r\n\$1\r\n*\r\n"
++        r write "\$1\r\nA\r\n"
++        write_big_bulk 5000000000 ;#5gb
++        r flush
++        catch {r read} err
++        assert_match {*too large*} $err
++        r xlen S1
++    } {0}
++}
++
++# One XADD with one huge (exactly nearly) 4GB field
++# This uncovers the overflow in lpEncodeGetType
++# Expected to fail resulting in an empty stream
++start_server [list overrides [list save ""] ] {
++    test {XADD one huge field - 1} {
++        r config set proto-max-bulk-len 10000000000 ;#10gb
++        r config set client-query-buffer-limit 10000000000 ;#10gb
++        r write "*5\r\n\$4\r\nXADD\r\n\$2\r\nS1\r\n\$1\r\n*\r\n"
++        r write "\$1\r\nA\r\n"
++        write_big_bulk 4294967295 ;#4gb-1
++        r flush
++        catch {r read} err
++        assert_match {*too large*} $err
++        r xlen S1
++    } {0}
++}
++
++# Gradually add big stream fields using repeated XADD calls
++start_server [list overrides [list save ""] ] {
++    test {several XADD big fields} {
++        r config set stream-node-max-bytes 0
++        for {set j 0} {$j<10} {incr j} {
++            r xadd stream * 1 $::str500 2 $::str500
++        }
++        r ping
++        r xlen stream
++    } {10}
++}
++
++# Add over 4GB to a single stream listpack (one XADD command)
++# Expected to fail resulting in an empty stream
++start_server [list overrides [list save ""] ] {
++    test {single XADD big fields} {
++        r write "*23\r\n\$4\r\nXADD\r\n\$1\r\nS\r\n\$1\r\n*\r\n"
++        for {set j 0} {$j<10} {incr j} {
++            r write "\$1\r\n$j\r\n"
++            write_big_bulk 500000000 ;#500mb
++        }
++        r flush
++        catch {r read} err
++        assert_match {*too large*} $err
++        r xlen S
++    } {0}
++}
++
++# Gradually add big hash fields using repeated HSET calls
++# This reproduces the overflow in the call to ziplistResize
++# Object will be converted to hashtable encoding
++start_server [list overrides [list save ""] ] {
++    r config set hash-max-ziplist-value 1000000000 ;#1gb
++    test {hash with many big fields} {
++        for {set j 0} {$j<10} {incr j} {
++            r hset h $j $::str500
++        }
++        r object encoding h
++    } {hashtable}
++}
++
++# Add over 4GB to a single hash field (one HSET command)
++# Object will be converted to hashtable encoding
++start_server [list overrides [list save ""] ] {
++    test {hash with one huge field} {
++        catch {r config set hash-max-ziplist-value 10000000000} ;#10gb
++        r config set proto-max-bulk-len 10000000000 ;#10gb
++        r config set client-query-buffer-limit 10000000000 ;#10gb
++        r write "*4\r\n\$4\r\nHSET\r\n\$2\r\nH1\r\n"
++        r write "\$1\r\nA\r\n"
++        write_big_bulk 5000000000 ;#5gb
++        r flush
++        r read
++        r object encoding H1
++    } {hashtable}
++}
++
++# Add over 4GB to a single list member (one LPUSH command)
++# Currently unsupported, and expected to fail rather than being truncated
++# Expected to fail resulting in a non-existing list
++start_server [list overrides [list save ""] ] {
++    test {list with one huge field} {
++        r config set proto-max-bulk-len 10000000000 ;#10gb
++        r config set client-query-buffer-limit 10000000000 ;#10gb
++        r write "*3\r\n\$5\r\nLPUSH\r\n\$2\r\nL1\r\n"
++        write_big_bulk 5000000000 ;#5gb
++        r flush
++        catch {r read} err
++        assert_match {*too large*} $err
++        r exists L1
++    } {0}
++}
++
++# SORT which attempts to store an element larger than 4GB into a list.
++# Currently unsupported and results in an assertion instead of truncation
++start_server [list overrides [list save ""] ] {
++    test {SORT adds huge field to list} {
++        r config set proto-max-bulk-len 10000000000 ;#10gb
++        r config set client-query-buffer-limit 10000000000 ;#10gb
++        r write "*3\r\n\$3\r\nSET\r\n\$2\r\nS1\r\n"
++        write_big_bulk 5000000000 ;#5gb
++        r flush
++        r read
++        assert_equal [r strlen S1] 5000000000
++        r set S2 asdf
++        r sadd myset 1 2
++        r mset D1 1 D2 2
++        catch {r sort myset by D* get S* store mylist}
++        # assert_equal [count_log_message 0 "crashed by signal"] 0   - not suitable for 6.0
++        assert_equal [count_log_message 0 "ASSERTION FAILED"] 1
++    }
++}
++
++# SORT which stores an integer encoded element into a list.
++# Just for coverage, no news here.
++start_server [list overrides [list save ""] ] {
++    test {SORT adds integer field to list} {
++        r set S1 asdf
++        r set S2 123 ;# integer encoded
++        assert_encoding "int" S2
++        r sadd myset 1 2
++        r mset D1 1 D2 2
++        r sort myset by D* get S* store mylist
++        r llen mylist
++    } {2}
++}
diff --git a/SOURCES/redis-CVE-2021-32675.patch b/SOURCES/redis-CVE-2021-32675.patch
new file mode 100644
index 0000000..6e1e0f9
--- /dev/null
+++ b/SOURCES/redis-CVE-2021-32675.patch
@@ -0,0 +1,69 @@
+From 71be97294abf3657710a044157ebbc8a21489da3 Mon Sep 17 00:00:00 2001
+From: Oran Agra <oran@redislabs.com>
+Date: Wed, 9 Jun 2021 17:31:39 +0300
+Subject: [PATCH] Prevent unauthenticated client from easily consuming lots of
+ memory (CVE-2021-32675)
+
+This change sets a low limit for multibulk and bulk length in the
+protocol for unauthenticated connections, so that they can't easily
+cause redis to allocate massive amounts of memory by sending just a few
+characters on the network.
+The new limits are 10 arguments of 16kb each (instead of 1m of 512mb)
+
+(cherry picked from commit 3d221e81f3b680543e34942579af190b049ff283)
+---
+ src/networking.c    |  8 ++++++++
+ tests/unit/auth.tcl | 16 ++++++++++++++++
+ 2 files changed, 24 insertions(+)
+
+diff --git a/src/networking.c b/src/networking.c
+index bfaded9b4d0..2b8588094d2 100644
+--- a/src/networking.c
++++ b/src/networking.c
+@@ -1309,6 +1309,10 @@ int processMultibulkBuffer(client *c) {
+             addReplyError(c,"Protocol error: invalid multibulk length");
+             setProtocolError("invalid mbulk count",c);
+             return C_ERR;
++        } else if (ll > 10 && server.requirepass && !c->authenticated) {
++            addReplyError(c, "Protocol error: unauthenticated multibulk length");
++            setProtocolError("unauth mbulk count", c);
++            return C_ERR;
+         }
+ 
+         c->qb_pos = (newline-c->querybuf)+2;
+@@ -1354,6 +1358,10 @@ int processMultibulkBuffer(client *c) {
+                 addReplyError(c,"Protocol error: invalid bulk length");
+                 setProtocolError("invalid bulk length",c);
+                 return C_ERR;
++            } else if (ll > 16384 && server.requirepass && !c->authenticated) {
++                addReplyError(c, "Protocol error: unauthenticated bulk length");
++                setProtocolError("unauth bulk length", c);
++                return C_ERR;
+             }
+ 
+             c->qb_pos = newline-c->querybuf+2;
+diff --git a/tests/unit/auth.tcl b/tests/unit/auth.tcl
+index 633cda95c92..f5da728e845 100644
+--- a/tests/unit/auth.tcl
++++ b/tests/unit/auth.tcl
+@@ -24,4 +24,20 @@ start_server {tags {"auth"} overrides {requirepass foobar}} {
+         r set foo 100
+         r incr foo
+     } {101}
++
++    test {For unauthenticated clients multibulk and bulk length are limited} {
++        set rr [redis [srv "host"] [srv "port"] 0]
++        $rr write "*100\r\n"
++        $rr flush
++        catch {[$rr read]} e
++        assert_match {*unauthenticated multibulk length*} $e
++        $rr close
++
++        set rr [redis [srv "host"] [srv "port"] 0]
++        $rr write "*1\r\n\$100000000\r\n"
++        $rr flush
++        catch {[$rr read]} e
++        assert_match {*unauthenticated bulk length*} $e
++        $rr close
++    }
+ }
diff --git a/SOURCES/redis-CVE-2021-32687.patch b/SOURCES/redis-CVE-2021-32687.patch
new file mode 100644
index 0000000..8b99907
--- /dev/null
+++ b/SOURCES/redis-CVE-2021-32687.patch
@@ -0,0 +1,73 @@
+Backported for 5.0.3
+
+
+
+From c043ba77cf9bbf73e964fd9b8681c0cc4bd2662e Mon Sep 17 00:00:00 2001
+From: Oran Agra <oran@redislabs.com>
+Date: Sun, 26 Sep 2021 15:42:17 +0300
+Subject: [PATCH] Fix Integer overflow issue with intsets (CVE-2021-32687)
+
+The vulnerability involves changing the default set-max-intset-entries
+configuration parameter to a very large value and constructing specially
+crafted commands to manipulate sets
+
+(cherry picked from commit 4cb7075edaaf0584c74eb080d838ca8f56c190e3)
+---
+ src/intset.c | 4 +++-
+ src/rdb.c    | 4 +++-
+ src/t_set.c  | 5 ++++-
+ 3 files changed, 10 insertions(+), 3 deletions(-)
+
+diff --git a/src/intset.c b/src/intset.c
+index 4445a5ca6c56..288e19adff18 100644
+--- a/src/intset.c
++++ b/src/intset.c
+@@ -34,6 +34,7 @@
+ #include "intset.h"
+ #include "zmalloc.h"
+ #include "endianconv.h"
++#include "redisassert.h"
+ 
+ /* Note that these encodings are ordered, so:
+  * INTSET_ENC_INT16 < INTSET_ENC_INT32 < INTSET_ENC_INT64. */
+@@ -103,7 +104,8 @@ intset *intsetNew(void) {
+ 
+ /* Resize the intset */
+ static intset *intsetResize(intset *is, uint32_t len) {
+-    uint32_t size = len*intrev32ifbe(is->encoding);
++    uint64_t size = (uint64_t)len*intrev32ifbe(is->encoding);
++    assert(size <= SIZE_MAX - sizeof(intset));
+     is = zrealloc(is,sizeof(intset)+size);
+     return is;
+ }
+diff --git a/src/rdb.c b/src/rdb.c
+index afbbd8ca450c..3c58a1eaf7fb 100644
+--- a/src/rdb.c
++++ b/src/rdb.c
+@@ -1411,7 +1411,9 @@ robj *rdbLoadObject(int rdbtype, rio *rdb, robj *key) {
+         if ((len = rdbLoadLen(rdb,NULL)) == RDB_LENERR) return NULL;
+ 
+         /* Use a regular set when there are too many entries. */
+-        if (len > server.set_max_intset_entries) {
++        size_t max_entries = server.set_max_intset_entries;
++        if (max_entries >= 1<<30) max_entries = 1<<30;
++        if (len > max_entries) {
+             o = createSetObject();
+             /* It's faster to expand the dict to the right size asap in order
+              * to avoid rehashing */
+diff --git a/src/t_set.c b/src/t_set.c
+index f67073fe6bb1..db5a8cb757bb 100644
+--- a/src/t_set.c
++++ b/src/t_set.c
+@@ -66,7 +66,10 @@ int setTypeAdd(robj *subject, sds value) {
+             if (success) {
+                 /* Convert to regular set when the intset contains
+                  * too many entries. */
+-                if (intsetLen(subject->ptr) > server.set_max_intset_entries)
++                size_t max_entries = server.set_max_intset_entries;
++                /* limit to 1G entries due to intset internals. */
++                if (max_entries >= 1<<30) max_entries = 1<<30;
++                if (intsetLen(subject->ptr) > max_entries)
+                     setTypeConvert(subject,OBJ_ENCODING_HT);
+                 return 1;
+             }
diff --git a/SOURCES/redis-CVE-2021-41099.patch b/SOURCES/redis-CVE-2021-41099.patch
new file mode 100644
index 0000000..8881c69
--- /dev/null
+++ b/SOURCES/redis-CVE-2021-41099.patch
@@ -0,0 +1,94 @@
+Backported for 5.0.3
+
+
+
+From 48f04a82a0ac542341fb644a4cfbebadd5c59a33 Mon Sep 17 00:00:00 2001
+From: Yossi Gottlieb <yossigo@gmail.com>
+Date: Mon, 22 Feb 2021 15:41:32 +0200
+Subject: [PATCH] Fix integer overflow (CVE-2021-21309). (#8522)
+
+On 32-bit systems, setting the proto-max-bulk-len config parameter to a high value may result with integer overflow and a subsequent heap overflow when parsing an input bulk (CVE-2021-21309).
+
+This fix has two parts:
+
+Set a reasonable limit to the config parameter.
+Add additional checks to prevent the problem in other potential but unknown code paths.
+
+(cherry picked from commit d32f2e9999ce003bad0bd2c3bca29f64dcce4433)
+
+Fix MSVR reported issue.
+---
+ src/config.c  | 16 ++++++++--------
+ src/sds.c     |  3 +++
+ src/zmalloc.c | 10 ++++++++++
+ 3 files changed, 21 insertions(+), 8 deletions(-)
+
+diff --git a/src/sds.c b/src/sds.c
+index cd60946bdd32..12c9da356d9b 100644
+--- a/src/sds.c
++++ b/src/sds.c
+@@ -96,6 +96,7 @@ sds sdsnewlen(const void *init, size_t initlen) {
+     int hdrlen = sdsHdrSize(type);
+     unsigned char *fp; /* flags pointer. */
+ 
++    assert(hdrlen+initlen+1 > initlen); /* Catch size_t overflow */
+     sh = s_malloc(hdrlen+initlen+1);
+     if (init==SDS_NOINIT)
+         init = NULL;
+@@ -214,6 +215,7 @@ sds sdsMakeRoomFor(sds s, size_t addlen) {
+     len = sdslen(s);
+     sh = (char*)s-sdsHdrSize(oldtype);
+     newlen = (len+addlen);
++    assert(newlen > len);   /* Catch size_t overflow */
+     if (newlen < SDS_MAX_PREALLOC)
+         newlen *= 2;
+     else
+@@ -227,6 +229,7 @@ sds sdsMakeRoomFor(sds s, size_t addlen) {
+     if (type == SDS_TYPE_5) type = SDS_TYPE_8;
+ 
+     hdrlen = sdsHdrSize(type);
++    assert(hdrlen+newlen+1 > len);  /* Catch size_t overflow */
+     if (oldtype==type) {
+         newsh = s_realloc(sh, hdrlen+newlen+1);
+         if (newsh == NULL) return NULL;
+
+From 2b0ac7427ba5a6e1bc89380e960b138af893bbdd Mon Sep 17 00:00:00 2001
+From: YiyuanGUO <yguoaz@gmail.com>
+Date: Wed, 29 Sep 2021 10:20:35 +0300
+Subject: [PATCH] Fix integer overflow in _sdsMakeRoomFor (CVE-2021-41099)
+
+---
+ src/sds.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/src/sds.c b/src/sds.c
+index 12c9da356d9b..73d9807ae3c0 100644
+--- a/src/sds.c
++++ b/src/sds.c
+@@ -205,7 +205,7 @@ void sdsclear(sds s) {
+ sds sdsMakeRoomFor(sds s, size_t addlen) {
+     void *sh, *newsh;
+     size_t avail = sdsavail(s);
+-    size_t len, newlen;
++    size_t len, newlen, reqlen;
+     char type, oldtype = s[-1] & SDS_TYPE_MASK;
+     int hdrlen;
+ 
+@@ -214,7 +214,7 @@ sds sdsMakeRoomFor(sds s, size_t addlen) {
+ 
+     len = sdslen(s);
+     sh = (char*)s-sdsHdrSize(oldtype);
+-    newlen = (len+addlen);
++    reqlen = newlen = (len+addlen);
+     assert(newlen > len);   /* Catch size_t overflow */
+     if (newlen < SDS_MAX_PREALLOC)
+         newlen *= 2;
+@@ -229,7 +229,7 @@ sds sdsMakeRoomFor(sds s, size_t addlen) {
+     if (type == SDS_TYPE_5) type = SDS_TYPE_8;
+ 
+     hdrlen = sdsHdrSize(type);
+-    assert(hdrlen+newlen+1 > len);  /* Catch size_t overflow */
++    assert(hdrlen + newlen + 1 > reqlen);  /* Catch size_t overflow */
+     if (oldtype==type) {
+         newsh = s_realloc(sh, hdrlen+newlen+1);
+         if (newsh == NULL) return NULL;
diff --git a/SPECS/redis.spec b/SPECS/redis.spec
index a1e3f82..a0ac1ef 100644
--- a/SPECS/redis.spec
+++ b/SPECS/redis.spec
@@ -20,7 +20,7 @@
 
 Name:              redis
 Version:           5.0.3
-Release:           2%{?dist}
+Release:           5%{?dist}
 Summary:           A persistent key-value database
 # redis, jemalloc, linenoise, lzf, hiredis are BSD
 # lua is MIT
@@ -50,8 +50,13 @@ Patch0001:         0001-1st-man-pageis-for-redis-cli-redis-benchmark-redis-c.pat
 Patch0002:         0002-install-redis-check-rdb-as-a-symlink-instead-of-dupl.patch
 
 # Security patches
-Patch100:          php-CVE-2019-10192.patch
-Patch101:          php-CVE-2019-10193.patch
+Patch100:          redis-CVE-2019-10192.patch
+Patch101:          redis-CVE-2019-10193.patch
+Patch102:          redis-CVE-2021-41099.patch
+Patch103:          redis-CVE-2021-32687.patch
+Patch104:          redis-CVE-2021-32626.patch
+Patch105:          redis-CVE-2021-32627.patch
+Patch106:          redis-CVE-2021-32675.patch
 
 %if 0%{?with_tests}
 BuildRequires:     procps-ng
@@ -131,6 +136,11 @@ mv ../%{name}-doc-%{doc_commit} doc
 
 %patch100 -p1 -b .cve-2019-10192
 %patch101 -p1 -b .cve-2019-10193
+%patch102 -p1 -b .cve-2021-41099
+%patch103 -p1 -b .cve-2021-32687
+%patch104 -p1 -b .cve-2021-32626
+%patch105 -p1 -b .cve-2021-32627
+%patch106 -p1 -b .cve-2021-32675
 
 mv deps/lua/COPYRIGHT    COPYRIGHT-lua
 mv deps/jemalloc/COPYING COPYING-jemalloc
@@ -276,6 +286,22 @@ exit 0
 
 
 %changelog
+* Mon Oct 11 2021 Remi Collet <rcollet@redhat.com> - 5.0.3-5
+- fix denial of service via Redis Standard Protocol (RESP) request
+  CVE-2021-32675
+
+* Thu Oct  7 2021 Remi Collet <rcollet@redhat.com> - 5.0.3-4
+- fix lua scripts can overflow the heap-based Lua stack
+  CVE-2021-32626
+- fix integer overflow issue with Streams
+  CVE-2021-32627
+- fix integer overflow bug in the ziplist data structure
+  CVE-2021-32628
+- fix integer overflow issue with intsets
+  CVE-2021-32687
+- fix integer overflow issue with strings
+  CVE-2021-41099
+
 * Thu Jul 11 2019 Remi Collet <rcollet@redhat.com> - 5.0.3-2
 - fix Heap buffer overflow in HyperLogLog triggered by malicious client
   CVE-2019-10192