diff --git a/.curl.metadata b/.curl.metadata
new file mode 100644
index 0000000..a1dafc9
--- /dev/null
+++ b/.curl.metadata
@@ -0,0 +1 @@
+8b56123714b4e061f0f71005c5be598b12f82483 SOURCES/curl-7.61.1.tar.xz
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..cfbf426
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1 @@
+SOURCES/curl-7.61.1.tar.xz
diff --git a/SOURCES/0001-curl-7.61.1-test320-gnutls.patch b/SOURCES/0001-curl-7.61.1-test320-gnutls.patch
new file mode 100644
index 0000000..a9cbaac
--- /dev/null
+++ b/SOURCES/0001-curl-7.61.1-test320-gnutls.patch
@@ -0,0 +1,63 @@
+From 3cd5b375e31fb98e4782dc3a77e7316ad9eb26cf Mon Sep 17 00:00:00 2001
+From: Daniel Stenberg <daniel@haxx.se>
+Date: Thu, 4 Oct 2018 15:34:13 +0200
+Subject: [PATCH] test320: strip out more HTML when comparing
+
+To make the test case work with different gnutls-serv versions better.
+
+Reported-by: Kamil Dudka
+Fixes #3093
+Closes #3094
+
+Upstream-commit: 94ad57b0246b5658c2a9139dbe6a80efa4c4e2f3
+Signed-off-by: Kamil Dudka <kdudka@redhat.com>
+---
+ tests/data/test320 | 24 ++++--------------------
+ 1 file changed, 4 insertions(+), 20 deletions(-)
+
+diff --git a/tests/data/test320 b/tests/data/test320
+index 457a11eb2..87311d4f2 100644
+--- a/tests/data/test320
++++ b/tests/data/test320
+@@ -62,34 +62,18 @@ simple TLS-SRP HTTPS GET, check user in response
+ HTTP/1.0 200 OK
+ Content-type: text/html
+ 
+-
+-<HTML><BODY>
+-<CENTER><H1>This is <a href="http://www.gnu.org/software/gnutls">GnuTLS</a></H1></CENTER>
+-
+-
+-
+-<h5>If your browser supports session resuming, then you should see the same session ID, when you press the <b>reload</b> button.</h5>
+-<p>Connected as user 'jsmith'.</p>
+-<P>
+-<TABLE border=1><TR><TD></TD></TR>
+-<TR><TD>Key Exchange:</TD><TD>SRP</TD></TR>
+-<TR><TD>Compression</TD><TD>NULL</TD></TR>
+-<TR><TD>Cipher</TD><TD>AES-NNN-CBC</TD></TR>
+-<TR><TD>MAC</TD><TD>SHA1</TD></TR>
+-<TR><TD>Ciphersuite</TD><TD>SRP_SHA_AES_NNN_CBC_SHA1</TD></TR></p></TABLE>
+-<hr><P>Your HTTP header was:<PRE>Host: %HOSTIP:%HTTPTLSPORT
++FINE
+ User-Agent: curl-test-suite
+ Accept: */*
+ 
+-</PRE></P>
+-</BODY></HTML>
+-
+ </file>
+ <stripfile>
+-s/^<p>Session ID:.*//
++s/^<p>Connected as user 'jsmith'.*/FINE/
+ s/Protocol version:.*[0-9]//
+ s/GNUTLS/GnuTLS/
+ s/(AES[-_])\d\d\d([-_]CBC)/$1NNN$2/
++s/^<.*\n//
++s/^\n//
+ </stripfile>
+ </verify>
+ 
+-- 
+2.17.1
+
diff --git a/SOURCES/0002-curl-7.61.1-tlsv1.0-man.patch b/SOURCES/0002-curl-7.61.1-tlsv1.0-man.patch
new file mode 100644
index 0000000..f384366
--- /dev/null
+++ b/SOURCES/0002-curl-7.61.1-tlsv1.0-man.patch
@@ -0,0 +1,28 @@
+From c574e05b0035f0d78e6bf6040d3f80430112ab4f Mon Sep 17 00:00:00 2001
+From: Kamil Dudka <kdudka@redhat.com>
+Date: Fri, 7 Sep 2018 16:50:45 +0200
+Subject: [PATCH] docs/cmdline-opts: update the documentation of --tlsv1.0
+
+... to reflect the changes in 6015cefb1b2cfde4b4850121c42405275e5e77d9
+
+Closes #2955
+
+Upstream-commit: 9ba22ce6b52751ed1e2abdd177b0a1d241819b4e
+Signed-off-by: Kamil Dudka <kdudka@redhat.com>
+---
+ docs/cmdline-opts/tlsv1.0.d | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/docs/cmdline-opts/tlsv1.0.d b/docs/cmdline-opts/tlsv1.0.d
+index 8789025e0..54e259682 100644
+--- a/docs/cmdline-opts/tlsv1.0.d
++++ b/docs/cmdline-opts/tlsv1.0.d
+@@ -3,4 +3,4 @@ Help: Use TLSv1.0
+ Protocols: TLS
+ Added: 7.34.0
+ ---
+-Forces curl to use TLS version 1.0 when connecting to a remote TLS server.
++Forces curl to use TLS version 1.0 or later when connecting to a remote TLS server.
+-- 
+2.17.1
+
diff --git a/SOURCES/0003-curl-7.61.1-TLS-1.3-PHA.patch b/SOURCES/0003-curl-7.61.1-TLS-1.3-PHA.patch
new file mode 100644
index 0000000..99273ac
--- /dev/null
+++ b/SOURCES/0003-curl-7.61.1-TLS-1.3-PHA.patch
@@ -0,0 +1,46 @@
+From bb8ad3da3fb4ab3f6556daa1f67b259c12a3c7de Mon Sep 17 00:00:00 2001
+From: Christian Heimes <christian@python.org>
+Date: Fri, 21 Sep 2018 10:37:43 +0200
+Subject: [PATCH] OpenSSL: enable TLS 1.3 post-handshake auth
+
+OpenSSL 1.1.1 requires clients to opt-in for post-handshake
+authentication.
+
+Fixes: https://github.com/curl/curl/issues/3026
+Signed-off-by: Christian Heimes <christian@python.org>
+
+Closes https://github.com/curl/curl/pull/3027
+
+Upstream-commit: b939bc47b27cd57c6ebb852ad653933e4124b452
+Signed-off-by: Kamil Dudka <kdudka@redhat.com>
+---
+ lib/vtls/openssl.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+diff --git a/lib/vtls/openssl.c b/lib/vtls/openssl.c
+index a487f55..78970d1 100644
+--- a/lib/vtls/openssl.c
++++ b/lib/vtls/openssl.c
+@@ -178,6 +178,7 @@ static unsigned long OpenSSL_version_num(void)
+      !defined(LIBRESSL_VERSION_NUMBER) &&       \
+      !defined(OPENSSL_IS_BORINGSSL))
+ #define HAVE_SSL_CTX_SET_CIPHERSUITES
++#define HAVE_SSL_CTX_SET_POST_HANDSHAKE_AUTH
+ #endif
+ 
+ #if defined(LIBRESSL_VERSION_NUMBER)
+@@ -2467,6 +2468,11 @@ static CURLcode ossl_connect_step1(struct connectdata *conn, int sockindex)
+   }
+ #endif
+ 
++#ifdef HAVE_SSL_CTX_SET_POST_HANDSHAKE_AUTH
++  /* OpenSSL 1.1.1 requires clients to opt-in for PHA */
++  SSL_CTX_set_post_handshake_auth(BACKEND->ctx, 1);
++#endif
++
+ #ifdef USE_TLS_SRP
+   if(ssl_authtype == CURL_TLSAUTH_SRP) {
+     char * const ssl_username = SSL_SET_OPTION(username);
+-- 
+2.17.1
+
diff --git a/SOURCES/0004-curl-7.61.1-CVE-2018-16842.patch b/SOURCES/0004-curl-7.61.1-CVE-2018-16842.patch
new file mode 100644
index 0000000..1b8a198
--- /dev/null
+++ b/SOURCES/0004-curl-7.61.1-CVE-2018-16842.patch
@@ -0,0 +1,81 @@
+From 27d6c92acdac671ddf8f77f72956b2181561f774 Mon Sep 17 00:00:00 2001
+From: Daniel Stenberg <daniel@haxx.se>
+Date: Sun, 28 Oct 2018 01:33:23 +0200
+Subject: [PATCH 1/2] voutf: fix bad arethmetic when outputting warnings to
+ stderr
+
+CVE-2018-16842
+Reported-by: Brian Carpenter
+Bug: https://curl.haxx.se/docs/CVE-2018-16842.html
+
+Upstream-commit: d530e92f59ae9bb2d47066c3c460b25d2ffeb211
+Signed-off-by: Kamil Dudka <kdudka@redhat.com>
+---
+ src/tool_msgs.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/src/tool_msgs.c b/src/tool_msgs.c
+index 9cce806..05bec39 100644
+--- a/src/tool_msgs.c
++++ b/src/tool_msgs.c
+@@ -67,7 +67,7 @@ static void voutf(struct GlobalConfig *config,
+         (void)fwrite(ptr, cut + 1, 1, config->errors);
+         fputs("\n", config->errors);
+         ptr += cut + 1; /* skip the space too */
+-        len -= cut;
++        len -= cut + 1;
+       }
+       else {
+         fputs(ptr, config->errors);
+-- 
+2.17.2
+
+
+From 23f8c641b02e6c302d0e8cc5a5ee225a33b01f28 Mon Sep 17 00:00:00 2001
+From: Daniel Stenberg <daniel@haxx.se>
+Date: Sun, 28 Oct 2018 10:43:57 +0100
+Subject: [PATCH 2/2] test2080: verify the fix for CVE-2018-16842
+
+Upstream-commit: 350306e4726b71b5b386fc30e3fecc039a807157
+Signed-off-by: Kamil Dudka <kdudka@redhat.com>
+---
+ tests/data/Makefile.inc |   4 ++--
+ tests/data/test2080     | Bin 0 -> 20659 bytes
+ 2 files changed, 2 insertions(+), 2 deletions(-)
+ create mode 100644 tests/data/test2080
+
+diff --git a/tests/data/Makefile.inc b/tests/data/Makefile.inc
+index e045748..aa5fff0 100644
+--- a/tests/data/Makefile.inc
++++ b/tests/data/Makefile.inc
+@@ -198,7 +198,7 @@ test2048 test2049 test2050 test2051 test2052 test2053 test2054 test2055 \
+ test2056 test2057 test2058 test2059 test2060 test2061 test2062 test2063 \
+ test2064 test2065 test2066 test2067 test2068 test2069 \
+ \
+-test2070 test2071 test2072 test2073 \
+-test2074 test2075 \
++test2070 test2071 test2072 test2073 test2074 test2075 \
++test2080 \
+ \
+ test3000 test3001
+diff --git a/tests/data/test2080 b/tests/data/test2080
+new file mode 100644
+index 0000000000000000000000000000000000000000..47e376ecb5d7879c0a98e392bff48ccc52e9db0a
+GIT binary patch
+literal 20659
+zcmeI)Pj3@35QkyT{uI*`iBshYE(n>u@JB+F3kdG+t~asjwJY0gl}``eO+)FONU8ef
+zl6Ca+%<OZ|nCeRHZE>A4K8~q<UAgUD%0ubY=PwtZRG;GL*UIRJ-;Lfy)u}p_A1>dz
+zd{+G6l*#ToY+DU||F9%J1n*+KPxQ;7MapuoQ!&MMQSXmpqMh0_yS6g=;N;HNjilBk
+zY$c?)mULZxib{;$g~jw~nrs|8b@sJI)_QmS_4(WLrNld}2Y0LEO$e>m->_NA&o$n!
+z9^YDZ>cvMs2q1s}0tg_000PG)@a?$9VHyMwKmY**5I_I{1Q0m1z~!MEP#*yV5I_I{
+z1Q0*~0R#|0009ILKmY**4ldvh-hl=PAb<b@2q1s}0tg`Rgaqum{m<+P&C93=Ab<b@
+z2q1s}0tg_0z|jf3Ji3V(2mu5TK;StGoIK~3=iL!N0D=D{@VjlsoA=?(>-+Xw`j-8D
+zzg+g?Rt8(G*s;1Sb>n1S94H%G<kGn)tFlRTrA%AW*RoyP3pi(fe!mc3WU^sQd2)l4
+jB)+~1L0rx$OS-AbERTH}TH`mZ^*=|W_vMU!*i-li)g+9V
+
+literal 0
+HcmV?d00001
+
+-- 
+2.17.2
+
diff --git a/SOURCES/0005-curl-7.61.1-CVE-2018-16840.patch b/SOURCES/0005-curl-7.61.1-CVE-2018-16840.patch
new file mode 100644
index 0000000..de546c0
--- /dev/null
+++ b/SOURCES/0005-curl-7.61.1-CVE-2018-16840.patch
@@ -0,0 +1,39 @@
+From 235f209a0e62edee654be441a50bb0c154edeaa5 Mon Sep 17 00:00:00 2001
+From: Daniel Stenberg <daniel@haxx.se>
+Date: Thu, 18 Oct 2018 15:07:15 +0200
+Subject: [PATCH] Curl_close: clear data->multi_easy on free to avoid
+ use-after-free
+
+Regression from b46cfbc068 (7.59.0)
+CVE-2018-16840
+Reported-by: Brian Carpenter (Geeknik Labs)
+
+Bug: https://curl.haxx.se/docs/CVE-2018-16840.html
+
+Upstream-commit: 81d135d67155c5295b1033679c606165d4e28f3f
+Signed-off-by: Kamil Dudka <kdudka@redhat.com>
+---
+ lib/url.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/lib/url.c b/lib/url.c
+index f159008..dcc1ecc 100644
+--- a/lib/url.c
++++ b/lib/url.c
+@@ -319,10 +319,12 @@ CURLcode Curl_close(struct Curl_easy *data)
+        and detach this handle from there. */
+     curl_multi_remove_handle(data->multi, data);
+ 
+-  if(data->multi_easy)
++  if(data->multi_easy) {
+     /* when curl_easy_perform() is used, it creates its own multi handle to
+        use and this is the one */
+     curl_multi_cleanup(data->multi_easy);
++    data->multi_easy = NULL;
++  }
+ 
+   /* Destroy the timeout list that is held in the easy handle. It is
+      /normally/ done by curl_multi_remove_handle() but this is "just in
+-- 
+2.17.2
+
diff --git a/SOURCES/0006-curl-7.61.1-CVE-2018-16839.patch b/SOURCES/0006-curl-7.61.1-CVE-2018-16839.patch
new file mode 100644
index 0000000..949254f
--- /dev/null
+++ b/SOURCES/0006-curl-7.61.1-CVE-2018-16839.patch
@@ -0,0 +1,31 @@
+From ad9943254ded9a983af7d581e8a1f3317e8a8781 Mon Sep 17 00:00:00 2001
+From: Daniel Stenberg <daniel@haxx.se>
+Date: Fri, 28 Sep 2018 16:08:16 +0200
+Subject: [PATCH] Curl_auth_create_plain_message: fix too-large-input-check
+
+CVE-2018-16839
+Reported-by: Harry Sintonen
+Bug: https://curl.haxx.se/docs/CVE-2018-16839.html
+
+Upstream-commit: f3a24d7916b9173c69a3e0ee790102993833d6c5
+Signed-off-by: Kamil Dudka <kdudka@redhat.com>
+---
+ lib/vauth/cleartext.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/lib/vauth/cleartext.c b/lib/vauth/cleartext.c
+index 5d61ce6..1367143 100644
+--- a/lib/vauth/cleartext.c
++++ b/lib/vauth/cleartext.c
+@@ -74,7 +74,7 @@ CURLcode Curl_auth_create_plain_message(struct Curl_easy *data,
+   plen = strlen(passwdp);
+ 
+   /* Compute binary message length. Check for overflows. */
+-  if((ulen > SIZE_T_MAX/2) || (plen > (SIZE_T_MAX/2 - 2)))
++  if((ulen > SIZE_T_MAX/4) || (plen > (SIZE_T_MAX/2 - 2)))
+     return CURLE_OUT_OF_MEMORY;
+   plainlen = 2 * ulen + plen + 2;
+ 
+-- 
+2.17.2
+
diff --git a/SOURCES/0007-curl-7.63.0-JO-preserve-local-file.patch b/SOURCES/0007-curl-7.63.0-JO-preserve-local-file.patch
new file mode 100644
index 0000000..6799dfa
--- /dev/null
+++ b/SOURCES/0007-curl-7.63.0-JO-preserve-local-file.patch
@@ -0,0 +1,116 @@
+From ff74657fb645e7175971128a171ef7d5ece40d77 Mon Sep 17 00:00:00 2001
+From: Daniel Stenberg <daniel@haxx.se>
+Date: Mon, 17 Dec 2018 12:51:51 +0100
+Subject: [PATCH] curl -J: do not append to the destination file
+
+Reported-by: Kamil Dudka
+Fixes #3380
+Closes #3381
+
+Upstream-commit: 4849267197682e69cfa056c2bd7a44acd123a917
+Signed-off-by: Kamil Dudka <kdudka@redhat.com>
+---
+ src/tool_cb_hdr.c  | 6 +++---
+ src/tool_cb_wrt.c  | 9 ++++-----
+ src/tool_cb_wrt.h  | 2 +-
+ src/tool_operate.c | 2 +-
+ 4 files changed, 9 insertions(+), 10 deletions(-)
+
+diff --git a/src/tool_cb_hdr.c b/src/tool_cb_hdr.c
+index 84b0d9c..3844904 100644
+--- a/src/tool_cb_hdr.c
++++ b/src/tool_cb_hdr.c
+@@ -148,12 +148,12 @@ size_t tool_header_cb(char *ptr, size_t size, size_t nmemb, void *userdata)
+         outs->filename = filename;
+         outs->alloc_filename = TRUE;
+         hdrcbdata->honor_cd_filename = FALSE; /* done now! */
+-        if(!tool_create_output_file(outs, TRUE))
++        if(!tool_create_output_file(outs))
+           return failure;
+       }
+       break;
+     }
+-    if(!outs->stream && !tool_create_output_file(outs, FALSE))
++    if(!outs->stream && !tool_create_output_file(outs))
+       return failure;
+   }
+ 
+@@ -162,7 +162,7 @@ size_t tool_header_cb(char *ptr, size_t size, size_t nmemb, void *userdata)
+     /* bold headers only happen for HTTP(S) and RTSP */
+     char *value = NULL;
+ 
+-    if(!outs->stream && !tool_create_output_file(outs, FALSE))
++    if(!outs->stream && !tool_create_output_file(outs))
+       return failure;
+ 
+     if(hdrcbdata->global->isatty && hdrcbdata->global->styled_output)
+diff --git a/src/tool_cb_wrt.c b/src/tool_cb_wrt.c
+index 2cb5e1b..195d6e7 100644
+--- a/src/tool_cb_wrt.c
++++ b/src/tool_cb_wrt.c
+@@ -32,8 +32,7 @@
+ #include "memdebug.h" /* keep this as LAST include */
+ 
+ /* create a local file for writing, return TRUE on success */
+-bool tool_create_output_file(struct OutStruct *outs,
+-                             bool append)
++bool tool_create_output_file(struct OutStruct *outs)
+ {
+   struct GlobalConfig *global = outs->config->global;
+   FILE *file;
+@@ -43,7 +42,7 @@ bool tool_create_output_file(struct OutStruct *outs,
+     return FALSE;
+   }
+ 
+-  if(outs->is_cd_filename && !append) {
++  if(outs->is_cd_filename) {
+     /* don't overwrite existing files */
+     file = fopen(outs->filename, "rb");
+     if(file) {
+@@ -55,7 +54,7 @@ bool tool_create_output_file(struct OutStruct *outs,
+   }
+ 
+   /* open file for writing */
+-  file = fopen(outs->filename, append?"ab":"wb");
++  file = fopen(outs->filename, "wb");
+   if(!file) {
+     warnf(global, "Failed to create the file %s: %s\n", outs->filename,
+           strerror(errno));
+@@ -142,7 +141,7 @@ size_t tool_write_cb(char *buffer, size_t sz, size_t nmemb, void *userdata)
+   }
+ #endif
+ 
+-  if(!outs->stream && !tool_create_output_file(outs, FALSE))
++  if(!outs->stream && !tool_create_output_file(outs))
+     return failure;
+ 
+   if(is_tty && (outs->bytes < 2000) && !config->terminal_binary_ok) {
+diff --git a/src/tool_cb_wrt.h b/src/tool_cb_wrt.h
+index 51e002b..188d3ea 100644
+--- a/src/tool_cb_wrt.h
++++ b/src/tool_cb_wrt.h
+@@ -30,7 +30,7 @@
+ size_t tool_write_cb(char *buffer, size_t sz, size_t nmemb, void *userdata);
+ 
+ /* create a local file for writing, return TRUE on success */
+-bool tool_create_output_file(struct OutStruct *outs, bool append);
++bool tool_create_output_file(struct OutStruct *outs);
+ 
+ #endif /* HEADER_CURL_TOOL_CB_WRT_H */
+ 
+diff --git a/src/tool_operate.c b/src/tool_operate.c
+index e53a9d8..429e9cf 100644
+--- a/src/tool_operate.c
++++ b/src/tool_operate.c
+@@ -1581,7 +1581,7 @@ static CURLcode operate_do(struct GlobalConfig *global,
+             /* do not create (or even overwrite) the file in case we get no
+                data because of unmet condition */
+             curl_easy_getinfo(curl, CURLINFO_CONDITION_UNMET, &cond_unmet);
+-            if(!cond_unmet && !tool_create_output_file(&outs, FALSE))
++            if(!cond_unmet && !tool_create_output_file(&outs))
+               result = CURLE_WRITE_ERROR;
+           }
+ 
+-- 
+2.17.2
+
diff --git a/SOURCES/0008-curl-7.61.1-CVE-2018-20483.patch b/SOURCES/0008-curl-7.61.1-CVE-2018-20483.patch
new file mode 100644
index 0000000..8b20ff6
--- /dev/null
+++ b/SOURCES/0008-curl-7.61.1-CVE-2018-20483.patch
@@ -0,0 +1,4776 @@
+From 907da069c450ca20442839d9e95e3661a5c06b61 Mon Sep 17 00:00:00 2001
+From: Daniel Stenberg <daniel@haxx.se>
+Date: Sun, 5 Aug 2018 11:51:07 +0200
+Subject: [PATCH 01/14] URL-API
+
+See header file and man pages for API. All documented API details work
+and are tested in the 1560 test case.
+
+Closes #2842
+
+Upstream-commit: fb30ac5a2d63773c529c19259754e2b306ac2e2e
+Signed-off-by: Kamil Dudka <kdudka@redhat.com>
+---
+ docs/libcurl/Makefile.inc        |    1 +
+ docs/libcurl/curl_url.3          |   61 ++
+ docs/libcurl/curl_url_cleanup.3  |   44 +
+ docs/libcurl/curl_url_dup.3      |   52 ++
+ docs/libcurl/curl_url_get.3      |  110 +++
+ docs/libcurl/curl_url_set.3      |  120 +++
+ docs/libcurl/symbols-in-versions |   30 +
+ include/curl/Makefile.am         |    4 +-
+ include/curl/curl.h              |    1 +
+ include/curl/urlapi.h            |  121 +++
+ lib/Makefile.inc                 |    5 +-
+ lib/escape.c                     |   20 +-
+ lib/escape.h                     |    3 +-
+ lib/imap.c                       |    3 +-
+ lib/transfer.c                   |  314 +------
+ lib/url.c                        |   44 +-
+ lib/url.h                        |    2 +
+ lib/{escape.h => urlapi-int.h}   |   22 +-
+ lib/urlapi.c                     | 1315 ++++++++++++++++++++++++++++++
+ tests/data/Makefile.inc          |    2 +
+ tests/data/test1560              |   28 +
+ tests/libtest/Makefile.am        |    5 +
+ tests/libtest/Makefile.inc       |    4 +
+ tests/libtest/lib1560.c          |  760 +++++++++++++++++
+ 24 files changed, 2716 insertions(+), 355 deletions(-)
+ create mode 100644 docs/libcurl/curl_url.3
+ create mode 100644 docs/libcurl/curl_url_cleanup.3
+ create mode 100644 docs/libcurl/curl_url_dup.3
+ create mode 100644 docs/libcurl/curl_url_get.3
+ create mode 100644 docs/libcurl/curl_url_set.3
+ create mode 100644 include/curl/urlapi.h
+ copy lib/{escape.h => urlapi-int.h} (66%)
+ create mode 100644 lib/urlapi.c
+ create mode 100644 tests/data/test1560
+ create mode 100644 tests/libtest/lib1560.c
+
+diff --git a/docs/libcurl/Makefile.inc b/docs/libcurl/Makefile.inc
+index eea48c4..955492c 100644
+--- a/docs/libcurl/Makefile.inc
++++ b/docs/libcurl/Makefile.inc
+@@ -22,4 +22,5 @@ man_MANS = curl_easy_cleanup.3 curl_easy_getinfo.3 curl_easy_init.3      \
+   curl_mime_data.3 curl_mime_data_cb.3 curl_mime_filedata.3              \
+   curl_mime_filename.3 curl_mime_subparts.3                              \
+   curl_mime_type.3 curl_mime_headers.3 curl_mime_encoder.3 libcurl-env.3 \
++  curl_url.3 curl_url_cleanup.3 curl_url_dup.3 curl_url_get.3 curl_url_set.3 \
+   libcurl-security.3
+diff --git a/docs/libcurl/curl_url.3 b/docs/libcurl/curl_url.3
+new file mode 100644
+index 0000000..0a56264
+--- /dev/null
++++ b/docs/libcurl/curl_url.3
+@@ -0,0 +1,61 @@
++.\" **************************************************************************
++.\" *                                  _   _ ____  _
++.\" *  Project                     ___| | | |  _ \| |
++.\" *                             / __| | | | |_) | |
++.\" *                            | (__| |_| |  _ <| |___
++.\" *                             \___|\___/|_| \_\_____|
++.\" *
++.\" * Copyright (C) 1998 - 2018, Daniel Stenberg, <daniel@haxx.se>, et al.
++.\" *
++.\" * This software is licensed as described in the file COPYING, which
++.\" * you should have received as part of this distribution. The terms
++.\" * are also available at https://curl.haxx.se/docs/copyright.html.
++.\" *
++.\" * You may opt to use, copy, modify, merge, publish, distribute and/or sell
++.\" * copies of the Software, and permit persons to whom the Software is
++.\" * furnished to do so, under the terms of the COPYING file.
++.\" *
++.\" * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
++.\" * KIND, either express or implied.
++.\" *
++.\" **************************************************************************
++.TH curl_url 3 "6 Aug 2018" "libcurl" "libcurl Manual"
++.SH NAME
++curl_url - returns a new CURLU handle
++.SH SYNOPSIS
++.B #include <curl/curl.h>
++
++CURLU *curl_url();
++.SH EXPERIMENTAL
++The URL API is considered \fBEXPERIMENTAL\fP until further notice. Please test
++it, report bugs and help us perfect it. Once proven to be reliable, the
++experimental label will be removed.
++
++While this API is marked experimental, we reserve the right to modify the API
++slightly if we deem it necessary and it makes it notably better or easier to
++use.
++.SH DESCRIPTION
++This function will allocates and returns a pointer to a fresh CURLU handle, to
++be used for further use of the URL API.
++.SH RETURN VALUE
++Returns a \fBCURLU *\fP if successful, or NULL if out of memory.
++.SH EXAMPLE
++.nf
++  CURLUcode rc;
++  CURLU *url = curl_url();
++  rc = curl_url_set(url, CURLUPART_URL, "https://example.com", 0);
++  if(!rc) {
++    char *scheme;
++    rc = curl_url_get(url, CURLUPART_SCHEME, &scheme, 0);
++    if(!rc) {
++      printf("the scheme is %s\n", scheme);
++      curl_free(scheme);
++    }
++    curl_url_cleanup(url);
++  }
++.fi
++.SH AVAILABILITY
++Added in curl 7.63.0
++.SH "SEE ALSO"
++.BR curl_url_cleanup "(3), " curl_url_get "(3), " curl_url_set "(3), "
++.BR curl_url_dup "(3), "
+diff --git a/docs/libcurl/curl_url_cleanup.3 b/docs/libcurl/curl_url_cleanup.3
+new file mode 100644
+index 0000000..a8158b7
+--- /dev/null
++++ b/docs/libcurl/curl_url_cleanup.3
+@@ -0,0 +1,44 @@
++.\" **************************************************************************
++.\" *                                  _   _ ____  _
++.\" *  Project                     ___| | | |  _ \| |
++.\" *                             / __| | | | |_) | |
++.\" *                            | (__| |_| |  _ <| |___
++.\" *                             \___|\___/|_| \_\_____|
++.\" *
++.\" * Copyright (C) 1998 - 2018, Daniel Stenberg, <daniel@haxx.se>, et al.
++.\" *
++.\" * This software is licensed as described in the file COPYING, which
++.\" * you should have received as part of this distribution. The terms
++.\" * are also available at https://curl.haxx.se/docs/copyright.html.
++.\" *
++.\" * You may opt to use, copy, modify, merge, publish, distribute and/or sell
++.\" * copies of the Software, and permit persons to whom the Software is
++.\" * furnished to do so, under the terms of the COPYING file.
++.\" *
++.\" * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
++.\" * KIND, either express or implied.
++.\" *
++.\" **************************************************************************
++.TH curl_url_cleanup 3 "6 Aug 2018" "libcurl" "libcurl Manual"
++.SH NAME
++curl_url_cleanup - free a CURLU handle
++.SH SYNOPSIS
++.B #include <curl/curl.h>
++
++void curl_url_cleanup(CURLU *handle);
++.fi
++.SH DESCRIPTION
++Frees all the resources associated with the given CURLU handle!
++.SH RETURN VALUE
++none
++.SH EXAMPLE
++.nf
++  CURLU *url = curl_url();
++  curl_url_set(url, CURLUPART_URL, "https://example.com", 0);
++  curl_url_cleanup(url);
++.fi
++.SH AVAILABILITY
++Added in curl 7.63.0
++.SH "SEE ALSO"
++.BR curl_url_dup "(3), " curl_url "(3), " curl_url_set "(3), "
++.BR curl_url_get "(3), "
+diff --git a/docs/libcurl/curl_url_dup.3 b/docs/libcurl/curl_url_dup.3
+new file mode 100644
+index 0000000..4815dbd
+--- /dev/null
++++ b/docs/libcurl/curl_url_dup.3
+@@ -0,0 +1,52 @@
++.\" **************************************************************************
++.\" *                                  _   _ ____  _
++.\" *  Project                     ___| | | |  _ \| |
++.\" *                             / __| | | | |_) | |
++.\" *                            | (__| |_| |  _ <| |___
++.\" *                             \___|\___/|_| \_\_____|
++.\" *
++.\" * Copyright (C) 1998 - 2018, Daniel Stenberg, <daniel@haxx.se>, et al.
++.\" *
++.\" * This software is licensed as described in the file COPYING, which
++.\" * you should have received as part of this distribution. The terms
++.\" * are also available at https://curl.haxx.se/docs/copyright.html.
++.\" *
++.\" * You may opt to use, copy, modify, merge, publish, distribute and/or sell
++.\" * copies of the Software, and permit persons to whom the Software is
++.\" * furnished to do so, under the terms of the COPYING file.
++.\" *
++.\" * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
++.\" * KIND, either express or implied.
++.\" *
++.\" **************************************************************************
++.TH curl_url_dup 3 "6 Aug 2018" "libcurl" "libcurl Manual"
++.SH NAME
++curl_url_dup - duplicate a CURLU handle
++.SH SYNOPSIS
++.B #include <curl/curl.h>
++
++CURLU *curl_url_dup(CURLU *inhandle);
++.fi
++.SH DESCRIPTION
++Duplicates a given CURLU \fIinhandle\fP and all its contents and returns a
++pointer to a new CURLU handle. The new handle also needs to be freed with
++\fIcurl_url_cleanup(3)\fP.
++.SH RETURN VALUE
++Returns a new handle or NULL if out of memory.
++.SH EXAMPLE
++.nf
++  CURLUcode rc;
++  CURLU *url = curl_url();
++  CURLU *url2;
++  rc = curl_url_set(url, CURLUPART_URL, "https://example.com", 0);
++  if(!rc) {
++    url2 = curl_url_dup(url); /* clone it! */
++    curl_url_cleanup(url2);
++  }
++  curl_url_cleanup(url);
++.fi
++.SH AVAILABILITY
++Added in curl 7.63.0
++.SH "SEE ALSO"
++.BR curl_url_cleanup "(3), " curl_url "(3), " curl_url_set "(3), "
++.BR curl_url_get "(3), "
+diff --git a/docs/libcurl/curl_url_get.3 b/docs/libcurl/curl_url_get.3
+new file mode 100644
+index 0000000..824d496
+--- /dev/null
++++ b/docs/libcurl/curl_url_get.3
+@@ -0,0 +1,110 @@
++.\" **************************************************************************
++.\" *                                  _   _ ____  _
++.\" *  Project                     ___| | | |  _ \| |
++.\" *                             / __| | | | |_) | |
++.\" *                            | (__| |_| |  _ <| |___
++.\" *                             \___|\___/|_| \_\_____|
++.\" *
++.\" * Copyright (C) 1998 - 2018, Daniel Stenberg, <daniel@haxx.se>, et al.
++.\" *
++.\" * This software is licensed as described in the file COPYING, which
++.\" * you should have received as part of this distribution. The terms
++.\" * are also available at https://curl.haxx.se/docs/copyright.html.
++.\" *
++.\" * You may opt to use, copy, modify, merge, publish, distribute and/or sell
++.\" * copies of the Software, and permit persons to whom the Software is
++.\" * furnished to do so, under the terms of the COPYING file.
++.\" *
++.\" * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
++.\" * KIND, either express or implied.
++.\" *
++.\" **************************************************************************
++.TH curl_url_get 3 "6 Aug 2018" "libcurl" "libcurl Manual"
++.SH NAME
++curl_url_get - extract a part from a URL
++.SH SYNOPSIS
++.B #include <curl/curl.h>
++
++.nf
++CURLUcode curl_url_get(CURLU *url,
++                       CURLUPart what,
++                       char **part,
++                       unsigned int flags)
++.fi
++.SH DESCRIPTION
++Given the \fIurl\fP handle of an already parsed URL, this function lets the
++user extract individual pieces from it.
++
++The \fIwhat\fP argument should be the particular part to extract (see list
++below) and \fIpart\fP points to a 'char *' to get updated to point to a newly
++allocated string with the contents.
++
++The \fIflags\fP argument is a bitmask with individual features.
++
++The returned part pointer must be freed with \fIcurl_free(3)\fP after use.
++.SH FLAGS
++The flags argument is zero, one or more bits set in a bitmask.
++.IP CURLU_DEFAULT_PORT
++If the handle has no port stored, this option will make \fIcurl_url_get(3)\fP
++return the default port for the used scheme.
++.IP CURLU_DEFAULT_SCHEME
++If the handle has no scheme stored, this option will make
++\fIcurl_url_get(3)\fP return the default scheme instead of error.
++.IP CURLU_NO_DEFAULT_PORT
++Instructs \fIcurl_url_get(3)\fP to not return a port number if it matches the
++default port for the scheme.
++.IP CURLU_URLDECODE
++Asks \fIcurl_url_get(3)\fP to URL decode the contents before returning it. It
++will not attempt to decode the scheme, the port number or the full URL.
++
++The query component will also get plus-to-space convertion as a bonus when
++this bit is set.
++
++Note that this URL decoding is charset unaware and you will get a zero
++terminated string back with data that could be intended for a particular
++encoding.
++
++If there's any byte values lower than 32 in the decoded string, the get
++operation will return an error instead.
++.SH PARTS
++.IP CURLUPART_URL
++When asked to return the full URL, \fIcurl_url_get(3)\fP will return a
++normalized and possibly cleaned up version of what was previously parsed.
++.IP CURLUPART_SCHEME
++Scheme cannot be URL decoded on get.
++.IP CURLUPART_USER
++.IP CURLUPART_PASSWORD
++.IP CURLUPART_OPTIONS
++.IP CURLUPART_HOST
++.IP CURLUPART_PORT
++Port cannot be URL decoded on get.
++.IP CURLUPART_PATH
++.IP CURLUPART_QUERY
++The query part will also get pluses converted to space when asked to URL
++decode on get with the CURLU_URLDECODE bit.
++.IP CURLUPART_FRAGMENT
++.SH RETURN VALUE
++Returns a CURLUcode error value, which is CURLUE_OK (0) if everything went
++fine.
++
++If this function returns an error, no URL part is returned.
++.SH EXAMPLE
++.nf
++  CURLUcode rc;
++  CURLU *url = curl_url();
++  rc = curl_url_set(url, CURLUPART_URL, "https://example.com", 0);
++  if(!rc) {
++    char *scheme;
++    rc = curl_url_get(url, CURLUPART_SCHEME, &scheme, 0);
++    if(!rc) {
++      printf("the scheme is %s\n", scheme);
++      curl_free(scheme);
++    }
++    curl_url_cleanup(url);
++  }
++.fi
++.SH AVAILABILITY
++Added in curl 7.63.0
++.SH "SEE ALSO"
++.BR curl_url_cleanup "(3), " curl_url "(3), " curl_url_set "(3), "
++.BR curl_url_dup "(3), "
+diff --git a/docs/libcurl/curl_url_set.3 b/docs/libcurl/curl_url_set.3
+new file mode 100644
+index 0000000..75fc0d9
+--- /dev/null
++++ b/docs/libcurl/curl_url_set.3
+@@ -0,0 +1,120 @@
++.\" **************************************************************************
++.\" *                                  _   _ ____  _
++.\" *  Project                     ___| | | |  _ \| |
++.\" *                             / __| | | | |_) | |
++.\" *                            | (__| |_| |  _ <| |___
++.\" *                             \___|\___/|_| \_\_____|
++.\" *
++.\" * Copyright (C) 1998 - 2018, Daniel Stenberg, <daniel@haxx.se>, et al.
++.\" *
++.\" * This software is licensed as described in the file COPYING, which
++.\" * you should have received as part of this distribution. The terms
++.\" * are also available at https://curl.haxx.se/docs/copyright.html.
++.\" *
++.\" * You may opt to use, copy, modify, merge, publish, distribute and/or sell
++.\" * copies of the Software, and permit persons to whom the Software is
++.\" * furnished to do so, under the terms of the COPYING file.
++.\" *
++.\" * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
++.\" * KIND, either express or implied.
++.\" *
++.\" **************************************************************************
++.TH curl_url_set 3 "6 Aug 2018" "libcurl" "libcurl Manual"
++.SH NAME
++curl_url_set - set a part from a URL
++.SH SYNOPSIS
++.B #include <curl/curl.h>
++
++CURLUcode curl_url_set(CURLU *url,
++                       CURLUPart part,
++                       const char *content,
++                       unsigned int flags)
++.fi
++.SH DESCRIPTION
++Given the \fIurl\fP handle of an already parsed URL, this function lets the
++user set/update individual pieces of it.
++
++The \fIpart\fP argument should identify the particular URL part (see list
++below) to set or change, with \fIcontent\fP pointing to a zero terminated
++string with the new contents for that URL part. The contents should be in the
++form and encoding they'd use in a URL: URL encoded.
++
++Setting a part to a NULL pointer will effectively remove that part's contents
++from the CURLU handle.
++
++The \fIflags\fP argument is a bitmask with independent features.
++.SH PARTS
++.IP CURLUPART_URL
++Allows the full URL of the handle to be replaced. If the handle already is
++populated with a URL, the new URL can be relative to the previous.
++
++When successfully setting a new URL, relative or absolute, the handle contents
++will be replaced with the information of the newly set URL.
++
++Pass a pointer to a zero terminated string to the \fIurl\fP parameter. The
++string must point to a correctly formatted "RFC 3986+" URL or be a NULL
++pointer.
++.IP CURLUPART_SCHEME
++Scheme cannot be URL decoded on set.
++.IP CURLUPART_USER
++.IP CURLUPART_PASSWORD
++.IP CURLUPART_OPTIONS
++.IP CURLUPART_HOST
++The host name can use IDNA. The string must then be encoded as your locale
++says or UTF-8 (when winidn is used).
++.IP CURLUPART_PORT
++Port cannot be URL encoded on set.
++.IP CURLUPART_PATH
++If a path is set in the URL without a leading slash, a slash will be inserted
++automatically when this URL is read from the handle.
++.IP CURLUPART_QUERY
++The query part will also get spaces converted to pluses when asked to URL
++encode on set with the CURLU_URLENCODE bit.
++
++If used in with \fICURLU_APPENDQUERY\fP, the provided part will be appended on
++the end of the existing query - and if the previous part didn't end with an
++ampersand (&), an ampersand will be inserted before the new appended part.
++
++When \fCURLU_APPENDQUERY\fP is used together with \fICURLU_URLENCODE\fP,
++the '=' symbols will not be URL encoded.
++
++The question mark in the URL is not part of the actual query contents.
++.IP CURLUPART_FRAGMENT
++The hash sign in the URL is not part of the actual fragment contents.
++.SH FLAGS
++The flags argument is zero, one or more bits set in a bitmask.
++.IP CURLU_NON_SUPPORT_SCHEME
++If set, allows \fIcurl_url_set(3)\fP to set a non-supported scheme.
++.IP CURLU_URLENCODE
++When set, \fIcurl_url_set(3)\fP URL encodes the part on entry, except for
++scheme, port and URL.
++
++When setting the path component with URL encoding enabled, the slash character
++will be skipped.
++
++The query part gets space-to-plus conversion before the URL conversion.
++
++This URL encoding is charset unaware and will convert the input on a
++byte-by-byte manner.
++.SH RETURN VALUE
++Returns a CURLUcode error value, which is CURLUE_OK (0) if everything went
++fine.
++
++If this function returns an error, no URL part is returned.
++.SH EXAMPLE
++.nf
++  CURLUcode rc;
++  CURLU *url = curl_url();
++  rc = curl_url_set(url, CURLUPART_URL, "https://example.com", 0);
++  if(!rc) {
++    char *scheme;
++    /* change it to an FTP URL */
++    rc = curl_url_set(url, CURLUPART_SCHEME, "ftp", 0);
++  }
++  curl_url_cleanup(url);
++.fi
++.SH AVAILABILITY
++Added in curl 7.63.0
++.SH "SEE ALSO"
++.BR curl_url_cleanup "(3), " curl_url "(3), " curl_url_get "(3), "
++.BR curl_url_dup "(3), "
+diff --git a/docs/libcurl/symbols-in-versions b/docs/libcurl/symbols-in-versions
+index 7448b4f..c797cb7 100644
+--- a/docs/libcurl/symbols-in-versions
++++ b/docs/libcurl/symbols-in-versions
+@@ -718,6 +718,36 @@ CURLSSLSET_NO_BACKENDS          7.56.0
+ CURLSSLSET_OK                   7.56.0
+ CURLSSLSET_TOO_LATE             7.56.0
+ CURLSSLSET_UNKNOWN_BACKEND      7.56.0
++CURLUPART_FRAGMENT              7.62.0
++CURLUPART_HOST                  7.62.0
++CURLUPART_OPTIONS               7.62.0
++CURLUPART_PASSWORD              7.62.0
++CURLUPART_PATH                  7.62.0
++CURLUPART_PORT                  7.62.0
++CURLUPART_QUERY                 7.62.0
++CURLUPART_SCHEME                7.62.0
++CURLUPART_URL                   7.62.0
++CURLUPART_USER                  7.62.0
++CURLUE_BAD_HANDLE               7.62.0
++CURLUE_BAD_PARTPOINTER          7.62.0
++CURLUE_BAD_PORT_NUMBER          7.62.0
++CURLUE_MALFORMED_INPUT          7.62.0
++CURLUE_NO_FRAGMENT              7.62.0
++CURLUE_NO_HOST                  7.62.0
++CURLUE_NO_OPTIONS               7.62.0
++CURLUE_NO_PASSWORD              7.62.0
++CURLUE_NO_PATH                  7.62.0
++CURLUE_NO_PORT                  7.62.0
++CURLUE_NO_QUERY                 7.62.0
++CURLUE_NO_SCHEME                7.62.0
++CURLUE_NO_USER                  7.62.0
++CURLUE_OK                       7.62.0
++CURLUE_OUT_OF_MEMORY            7.62.0
++CURLUE_RELATIVE                 7.62.0
++CURLUE_UNKNOWN_PART             7.62.0
++CURLUE_UNSUPPORTED_SCHEME       7.62.0
++CURLUE_URLDECODE                7.62.0
++CURLUE_USER_NOT_ALLOWED         7.62.0
+ CURLUSESSL_ALL                  7.17.0
+ CURLUSESSL_CONTROL              7.17.0
+ CURLUSESSL_NONE                 7.17.0
+diff --git a/include/curl/Makefile.am b/include/curl/Makefile.am
+index 989d4a2..bf5f061 100644
+--- a/include/curl/Makefile.am
++++ b/include/curl/Makefile.am
+@@ -5,7 +5,7 @@
+ #                            | (__| |_| |  _ <| |___
+ #                             \___|\___/|_| \_\_____|
+ #
+-# Copyright (C) 1998 - 2017, Daniel Stenberg, <daniel@haxx.se>, et al.
++# Copyright (C) 1998 - 2018, Daniel Stenberg, <daniel@haxx.se>, et al.
+ #
+ # This software is licensed as described in the file COPYING, which
+ # you should have received as part of this distribution. The terms
+@@ -21,7 +21,7 @@
+ ###########################################################################
+ pkginclude_HEADERS = \
+ 	curl.h curlver.h easy.h mprintf.h stdcheaders.h multi.h \
+-	typecheck-gcc.h system.h
++	typecheck-gcc.h system.h urlapi.h
+ 
+ pkgincludedir= $(includedir)/curl
+ 
+diff --git a/include/curl/curl.h b/include/curl/curl.h
+index 067b34d..8f473e2 100644
+--- a/include/curl/curl.h
++++ b/include/curl/curl.h
+@@ -2779,6 +2779,7 @@ CURL_EXTERN CURLcode curl_easy_pause(CURL *handle, int bitmask);
+   stuff before they can be included! */
+ #include "easy.h" /* nothing in curl is fun without the easy stuff */
+ #include "multi.h"
++#include "urlapi.h"
+ 
+ /* the typechecker doesn't work in C++ (yet) */
+ #if defined(__GNUC__) && defined(__GNUC_MINOR__) && \
+diff --git a/include/curl/urlapi.h b/include/curl/urlapi.h
+new file mode 100644
+index 0000000..b16cfce
+--- /dev/null
++++ b/include/curl/urlapi.h
+@@ -0,0 +1,121 @@
++#ifndef __CURL_URLAPI_H
++#define __CURL_URLAPI_H
++/***************************************************************************
++ *                                  _   _ ____  _
++ *  Project                     ___| | | |  _ \| |
++ *                             / __| | | | |_) | |
++ *                            | (__| |_| |  _ <| |___
++ *                             \___|\___/|_| \_\_____|
++ *
++ * Copyright (C) 2018, Daniel Stenberg, <daniel@haxx.se>, et al.
++ *
++ * This software is licensed as described in the file COPYING, which
++ * you should have received as part of this distribution. The terms
++ * are also available at https://curl.haxx.se/docs/copyright.html.
++ *
++ * You may opt to use, copy, modify, merge, publish, distribute and/or sell
++ * copies of the Software, and permit persons to whom the Software is
++ * furnished to do so, under the terms of the COPYING file.
++ *
++ * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
++ * KIND, either express or implied.
++ *
++ ***************************************************************************/
++
++#ifdef  __cplusplus
++extern "C" {
++#endif
++
++/* the error codes for the URL API */
++typedef enum {
++  CURLUE_OK,
++  CURLUE_BAD_HANDLE,          /* 1 */
++  CURLUE_BAD_PARTPOINTER,     /* 2 */
++  CURLUE_MALFORMED_INPUT,     /* 3 */
++  CURLUE_BAD_PORT_NUMBER,     /* 4 */
++  CURLUE_UNSUPPORTED_SCHEME,  /* 5 */
++  CURLUE_URLDECODE,           /* 6 */
++  CURLUE_RELATIVE,            /* 7 */
++  CURLUE_USER_NOT_ALLOWED,    /* 8 */
++  CURLUE_UNKNOWN_PART,        /* 9 */
++  CURLUE_NO_SCHEME,           /* 10 */
++  CURLUE_NO_USER,             /* 11 */
++  CURLUE_NO_PASSWORD,         /* 12 */
++  CURLUE_NO_OPTIONS,          /* 13 */
++  CURLUE_NO_HOST,             /* 14 */
++  CURLUE_NO_PORT,             /* 15 */
++  CURLUE_NO_PATH,             /* 16 */
++  CURLUE_NO_QUERY,            /* 17 */
++  CURLUE_NO_FRAGMENT,         /* 18 */
++  CURLUE_OUT_OF_MEMORY        /* 19 */
++} CURLUcode;
++
++typedef enum {
++  CURLUPART_URL,
++  CURLUPART_SCHEME,
++  CURLUPART_USER,
++  CURLUPART_PASSWORD,
++  CURLUPART_OPTIONS,
++  CURLUPART_HOST,
++  CURLUPART_PORT,
++  CURLUPART_PATH,
++  CURLUPART_QUERY,
++  CURLUPART_FRAGMENT
++} CURLUPart;
++
++#define CURLU_DEFAULT_PORT (1<<0)       /* return default port number */
++#define CURLU_NO_DEFAULT_PORT (1<<1)    /* act as if no port number was set,
++                                           if the port number matches the
++                                           default for the scheme */
++#define CURLU_DEFAULT_SCHEME (1<<2)     /* return default scheme if
++                                           missing */
++#define CURLU_NON_SUPPORT_SCHEME (1<<3) /* allow non-supported scheme */
++#define CURLU_PATH_AS_IS (1<<4)         /* leave dot sequences */
++#define CURLU_DISALLOW_USER (1<<5)      /* no user+password allowed */
++#define CURLU_URLDECODE (1<<6)          /* URL decode on get */
++#define CURLU_URLENCODE (1<<7)          /* URL encode on set */
++#define CURLU_APPENDQUERY (1<<8)        /* append a form style part */
++
++typedef struct Curl_URL CURLU;
++
++/*
++ * curl_url() creates a new CURLU handle and returns a pointer to it.
++ * Must be freed with curl_url_cleanup().
++ */
++CURL_EXTERN CURLU *curl_url(void);
++
++/*
++ * curl_url_cleanup() frees the CURLU handle and related resources used for
++ * the URL parsing. It will not free strings previously returned with the URL
++ * API.
++ */
++CURL_EXTERN void curl_url_cleanup(CURLU *handle);
++
++/*
++ * curl_url_dup() duplicates a CURLU handle and returns a new copy. The new
++ * handle must also be freed with curl_url_cleanup().
++ */
++CURL_EXTERN CURLU *curl_url_dup(CURLU *in);
++
++/*
++ * curl_url_get() extracts a specific part of the URL from a CURLU
++ * handle. Returns error code. The returned pointer MUST be freed with
++ * curl_free() afterwards.
++ */
++CURL_EXTERN CURLUcode curl_url_get(CURLU *handle, CURLUPart what,
++                                   char **part, unsigned int flags);
++
++/*
++ * curl_url_set() sets a specific part of the URL in a CURLU handle. Returns
++ * error code. The passed in string will be copied. Passing a NULL instead of
++ * a part string, clears that part.
++ */
++CURL_EXTERN CURLUcode curl_url_set(CURLU *handle, CURLUPart what,
++                                   const char *part, unsigned int flags);
++
++
++#ifdef __cplusplus
++} /* end of extern "C" */
++#endif
++
++#endif
+diff --git a/lib/Makefile.inc b/lib/Makefile.inc
+index 76ca6d0..1ff82e1 100644
+--- a/lib/Makefile.inc
++++ b/lib/Makefile.inc
+@@ -54,7 +54,8 @@ LIB_CFILES = file.c timeval.c base64.c hostip.c progress.c formdata.c   \
+   http_ntlm.c curl_ntlm_wb.c curl_ntlm_core.c curl_sasl.c rand.c        \
+   curl_multibyte.c hostcheck.c conncache.c pipeline.c dotdot.c          \
+   x509asn1.c http2.c smb.c curl_endian.c curl_des.c system_win32.c      \
+-  mime.c sha256.c setopt.c curl_path.c curl_ctype.c curl_range.c psl.c
++  mime.c sha256.c setopt.c curl_path.c curl_ctype.c curl_range.c psl.c  \
++  urlapi.c
+ 
+ LIB_HFILES = arpa_telnet.h netrc.h file.h timeval.h hostip.h progress.h \
+   formdata.h cookie.h http.h sendf.h ftp.h url.h dict.h if2ip.h         \
+@@ -74,7 +75,7 @@ LIB_HFILES = arpa_telnet.h netrc.h file.h timeval.h hostip.h progress.h \
+   curl_setup_once.h multihandle.h setup-vms.h pipeline.h dotdot.h       \
+   x509asn1.h http2.h sigpipe.h smb.h curl_endian.h curl_des.h           \
+   curl_printf.h system_win32.h rand.h mime.h curl_sha256.h setopt.h     \
+-  curl_path.h curl_ctype.h curl_range.h psl.h
++  curl_path.h curl_ctype.h curl_range.h psl.h urlapi-int.h
+ 
+ LIB_RCFILES = libcurl.rc
+ 
+diff --git a/lib/escape.c b/lib/escape.c
+index 10774f0..afd3899 100644
+--- a/lib/escape.c
++++ b/lib/escape.c
+@@ -5,7 +5,7 @@
+  *                            | (__| |_| |  _ <| |___
+  *                             \___|\___/|_| \_\_____|
+  *
+- * Copyright (C) 1998 - 2017, Daniel Stenberg, <daniel@haxx.se>, et al.
++ * Copyright (C) 1998 - 2018, Daniel Stenberg, <daniel@haxx.se>, et al.
+  *
+  * This software is licensed as described in the file COPYING, which
+  * you should have received as part of this distribution. The terms
+@@ -41,7 +41,7 @@
+    its behavior is altered by the current locale.
+    See https://tools.ietf.org/html/rfc3986#section-2.3
+ */
+-static bool Curl_isunreserved(unsigned char in)
++bool Curl_isunreserved(unsigned char in)
+ {
+   switch(in) {
+     case '0': case '1': case '2': case '3': case '4':
+@@ -141,6 +141,8 @@ char *curl_easy_escape(struct Curl_easy *data, const char *string,
+  * Returns a pointer to a malloced string in *ostring with length given in
+  * *olen. If length == 0, the length is assumed to be strlen(string).
+  *
++ * 'data' can be set to NULL but then this function can't convert network
++ * data to host for non-ascii.
+  */
+ CURLcode Curl_urldecode(struct Curl_easy *data,
+                         const char *string, size_t length,
+@@ -151,7 +153,7 @@ CURLcode Curl_urldecode(struct Curl_easy *data,
+   char *ns = malloc(alloc);
+   size_t strindex = 0;
+   unsigned long hex;
+-  CURLcode result;
++  CURLcode result = CURLE_OK;
+ 
+   if(!ns)
+     return CURLE_OUT_OF_MEMORY;
+@@ -171,11 +173,13 @@ CURLcode Curl_urldecode(struct Curl_easy *data,
+ 
+       in = curlx_ultouc(hex); /* this long is never bigger than 255 anyway */
+ 
+-      result = Curl_convert_from_network(data, (char *)&in, 1);
+-      if(result) {
+-        /* Curl_convert_from_network calls failf if unsuccessful */
+-        free(ns);
+-        return result;
++      if(data) {
++        result = Curl_convert_from_network(data, (char *)&in, 1);
++        if(result) {
++          /* Curl_convert_from_network calls failf if unsuccessful */
++          free(ns);
++          return result;
++        }
+       }
+ 
+       string += 2;
+diff --git a/lib/escape.h b/lib/escape.h
+index 638666f..666f1ad 100644
+--- a/lib/escape.h
++++ b/lib/escape.h
+@@ -7,7 +7,7 @@
+  *                            | (__| |_| |  _ <| |___
+  *                             \___|\___/|_| \_\_____|
+  *
+- * Copyright (C) 1998 - 2011, Daniel Stenberg, <daniel@haxx.se>, et al.
++ * Copyright (C) 1998 - 2018, Daniel Stenberg, <daniel@haxx.se>, et al.
+  *
+  * This software is licensed as described in the file COPYING, which
+  * you should have received as part of this distribution. The terms
+@@ -24,6 +24,7 @@
+ /* Escape and unescape URL encoding in strings. The functions return a new
+  * allocated string or NULL if an error occurred.  */
+ 
++bool Curl_isunreserved(unsigned char in);
+ CURLcode Curl_urldecode(struct Curl_easy *data,
+                         const char *string, size_t length,
+                         char **ostring, size_t *olen,
+diff --git a/lib/imap.c b/lib/imap.c
+index 942fe7d..28962c1 100644
+--- a/lib/imap.c
++++ b/lib/imap.c
+@@ -159,7 +159,8 @@ const struct Curl_handler Curl_handler_imaps = {
+   ZERO_NULL,                        /* connection_check */
+   PORT_IMAPS,                       /* defport */
+   CURLPROTO_IMAPS,                  /* protocol */
+-  PROTOPT_CLOSEACTION | PROTOPT_SSL /* flags */
++  PROTOPT_CLOSEACTION | PROTOPT_SSL | /* flags */
++  PROTOPT_URLOPTIONS
+ };
+ #endif
+ 
+diff --git a/lib/transfer.c b/lib/transfer.c
+index 7159d5c..ecd1063 100644
+--- a/lib/transfer.c
++++ b/lib/transfer.c
+@@ -75,6 +75,7 @@
+ #include "http2.h"
+ #include "mime.h"
+ #include "strcase.h"
++#include "urlapi-int.h"
+ 
+ /* The last 3 #include files should be in this order */
+ #include "curl_printf.h"
+@@ -1453,311 +1454,6 @@ CURLcode Curl_posttransfer(struct Curl_easy *data)
+   return CURLE_OK;
+ }
+ 
+-#ifndef CURL_DISABLE_HTTP
+-/*
+- * Find the separator at the end of the host name, or the '?' in cases like
+- * http://www.url.com?id=2380
+- */
+-static const char *find_host_sep(const char *url)
+-{
+-  const char *sep;
+-  const char *query;
+-
+-  /* Find the start of the hostname */
+-  sep = strstr(url, "//");
+-  if(!sep)
+-    sep = url;
+-  else
+-    sep += 2;
+-
+-  query = strchr(sep, '?');
+-  sep = strchr(sep, '/');
+-
+-  if(!sep)
+-    sep = url + strlen(url);
+-
+-  if(!query)
+-    query = url + strlen(url);
+-
+-  return sep < query ? sep : query;
+-}
+-
+-/*
+- * Decide in an encoding-independent manner whether a character in an
+- * URL must be escaped. The same criterion must be used in strlen_url()
+- * and strcpy_url().
+- */
+-static bool urlchar_needs_escaping(int c)
+-{
+-    return !(ISCNTRL(c) || ISSPACE(c) || ISGRAPH(c));
+-}
+-
+-/*
+- * strlen_url() returns the length of the given URL if the spaces within the
+- * URL were properly URL encoded.
+- * URL encoding should be skipped for host names, otherwise IDN resolution
+- * will fail.
+- */
+-static size_t strlen_url(const char *url, bool relative)
+-{
+-  const unsigned char *ptr;
+-  size_t newlen = 0;
+-  bool left = TRUE; /* left side of the ? */
+-  const unsigned char *host_sep = (const unsigned char *) url;
+-
+-  if(!relative)
+-    host_sep = (const unsigned char *) find_host_sep(url);
+-
+-  for(ptr = (unsigned char *)url; *ptr; ptr++) {
+-
+-    if(ptr < host_sep) {
+-      ++newlen;
+-      continue;
+-    }
+-
+-    switch(*ptr) {
+-    case '?':
+-      left = FALSE;
+-      /* FALLTHROUGH */
+-    default:
+-      if(urlchar_needs_escaping(*ptr))
+-        newlen += 2;
+-      newlen++;
+-      break;
+-    case ' ':
+-      if(left)
+-        newlen += 3;
+-      else
+-        newlen++;
+-      break;
+-    }
+-  }
+-  return newlen;
+-}
+-
+-/* strcpy_url() copies a url to a output buffer and URL-encodes the spaces in
+- * the source URL accordingly.
+- * URL encoding should be skipped for host names, otherwise IDN resolution
+- * will fail.
+- */
+-static void strcpy_url(char *output, const char *url, bool relative)
+-{
+-  /* we must add this with whitespace-replacing */
+-  bool left = TRUE;
+-  const unsigned char *iptr;
+-  char *optr = output;
+-  const unsigned char *host_sep = (const unsigned char *) url;
+-
+-  if(!relative)
+-    host_sep = (const unsigned char *) find_host_sep(url);
+-
+-  for(iptr = (unsigned char *)url;    /* read from here */
+-      *iptr;         /* until zero byte */
+-      iptr++) {
+-
+-    if(iptr < host_sep) {
+-      *optr++ = *iptr;
+-      continue;
+-    }
+-
+-    switch(*iptr) {
+-    case '?':
+-      left = FALSE;
+-      /* FALLTHROUGH */
+-    default:
+-      if(urlchar_needs_escaping(*iptr)) {
+-        snprintf(optr, 4, "%%%02x", *iptr);
+-        optr += 3;
+-      }
+-      else
+-        *optr++=*iptr;
+-      break;
+-    case ' ':
+-      if(left) {
+-        *optr++='%'; /* add a '%' */
+-        *optr++='2'; /* add a '2' */
+-        *optr++='0'; /* add a '0' */
+-      }
+-      else
+-        *optr++='+'; /* add a '+' here */
+-      break;
+-    }
+-  }
+-  *optr = 0; /* zero terminate output buffer */
+-
+-}
+-
+-/*
+- * Returns true if the given URL is absolute (as opposed to relative)
+- */
+-static bool is_absolute_url(const char *url)
+-{
+-  char prot[16]; /* URL protocol string storage */
+-  char letter;   /* used for a silly sscanf */
+-
+-  return (2 == sscanf(url, "%15[^?&/:]://%c", prot, &letter)) ? TRUE : FALSE;
+-}
+-
+-/*
+- * Concatenate a relative URL to a base URL making it absolute.
+- * URL-encodes any spaces.
+- * The returned pointer must be freed by the caller unless NULL
+- * (returns NULL on out of memory).
+- */
+-static char *concat_url(const char *base, const char *relurl)
+-{
+-  /***
+-   TRY to append this new path to the old URL
+-   to the right of the host part. Oh crap, this is doomed to cause
+-   problems in the future...
+-  */
+-  char *newest;
+-  char *protsep;
+-  char *pathsep;
+-  size_t newlen;
+-  bool host_changed = FALSE;
+-
+-  const char *useurl = relurl;
+-  size_t urllen;
+-
+-  /* we must make our own copy of the URL to play with, as it may
+-     point to read-only data */
+-  char *url_clone = strdup(base);
+-
+-  if(!url_clone)
+-    return NULL; /* skip out of this NOW */
+-
+-  /* protsep points to the start of the host name */
+-  protsep = strstr(url_clone, "//");
+-  if(!protsep)
+-    protsep = url_clone;
+-  else
+-    protsep += 2; /* pass the slashes */
+-
+-  if('/' != relurl[0]) {
+-    int level = 0;
+-
+-    /* First we need to find out if there's a ?-letter in the URL,
+-       and cut it and the right-side of that off */
+-    pathsep = strchr(protsep, '?');
+-    if(pathsep)
+-      *pathsep = 0;
+-
+-    /* we have a relative path to append to the last slash if there's one
+-       available, or if the new URL is just a query string (starts with a
+-       '?')  we append the new one at the end of the entire currently worked
+-       out URL */
+-    if(useurl[0] != '?') {
+-      pathsep = strrchr(protsep, '/');
+-      if(pathsep)
+-        *pathsep = 0;
+-    }
+-
+-    /* Check if there's any slash after the host name, and if so, remember
+-       that position instead */
+-    pathsep = strchr(protsep, '/');
+-    if(pathsep)
+-      protsep = pathsep + 1;
+-    else
+-      protsep = NULL;
+-
+-    /* now deal with one "./" or any amount of "../" in the newurl
+-       and act accordingly */
+-
+-    if((useurl[0] == '.') && (useurl[1] == '/'))
+-      useurl += 2; /* just skip the "./" */
+-
+-    while((useurl[0] == '.') &&
+-          (useurl[1] == '.') &&
+-          (useurl[2] == '/')) {
+-      level++;
+-      useurl += 3; /* pass the "../" */
+-    }
+-
+-    if(protsep) {
+-      while(level--) {
+-        /* cut off one more level from the right of the original URL */
+-        pathsep = strrchr(protsep, '/');
+-        if(pathsep)
+-          *pathsep = 0;
+-        else {
+-          *protsep = 0;
+-          break;
+-        }
+-      }
+-    }
+-  }
+-  else {
+-    /* We got a new absolute path for this server */
+-
+-    if((relurl[0] == '/') && (relurl[1] == '/')) {
+-      /* the new URL starts with //, just keep the protocol part from the
+-         original one */
+-      *protsep = 0;
+-      useurl = &relurl[2]; /* we keep the slashes from the original, so we
+-                              skip the new ones */
+-      host_changed = TRUE;
+-    }
+-    else {
+-      /* cut off the original URL from the first slash, or deal with URLs
+-         without slash */
+-      pathsep = strchr(protsep, '/');
+-      if(pathsep) {
+-        /* When people use badly formatted URLs, such as
+-           "http://www.url.com?dir=/home/daniel" we must not use the first
+-           slash, if there's a ?-letter before it! */
+-        char *sep = strchr(protsep, '?');
+-        if(sep && (sep < pathsep))
+-          pathsep = sep;
+-        *pathsep = 0;
+-      }
+-      else {
+-        /* There was no slash. Now, since we might be operating on a badly
+-           formatted URL, such as "http://www.url.com?id=2380" which doesn't
+-           use a slash separator as it is supposed to, we need to check for a
+-           ?-letter as well! */
+-        pathsep = strchr(protsep, '?');
+-        if(pathsep)
+-          *pathsep = 0;
+-      }
+-    }
+-  }
+-
+-  /* If the new part contains a space, this is a mighty stupid redirect
+-     but we still make an effort to do "right". To the left of a '?'
+-     letter we replace each space with %20 while it is replaced with '+'
+-     on the right side of the '?' letter.
+-  */
+-  newlen = strlen_url(useurl, !host_changed);
+-
+-  urllen = strlen(url_clone);
+-
+-  newest = malloc(urllen + 1 + /* possible slash */
+-                  newlen + 1 /* zero byte */);
+-
+-  if(!newest) {
+-    free(url_clone); /* don't leak this */
+-    return NULL;
+-  }
+-
+-  /* copy over the root url part */
+-  memcpy(newest, url_clone, urllen);
+-
+-  /* check if we need to append a slash */
+-  if(('/' == useurl[0]) || (protsep && !*protsep) || ('?' == useurl[0]))
+-    ;
+-  else
+-    newest[urllen++]='/';
+-
+-  /* then append the new piece on the right side */
+-  strcpy_url(&newest[urllen], useurl, !host_changed);
+-
+-  free(url_clone);
+-
+-  return newest;
+-}
+-#endif /* CURL_DISABLE_HTTP */
+-
+ /*
+  * Curl_follow() handles the URL redirect magic. Pass in the 'newurl' string
+  * as given by the remote server and set up the new URL to request.
+@@ -1809,12 +1505,12 @@ CURLcode Curl_follow(struct Curl_easy *data,
+     }
+   }
+ 
+-  if(!is_absolute_url(newurl)) {
++  if(!Curl_is_absolute_url(newurl, NULL, 8)) {
+     /***
+      *DANG* this is an RFC 2068 violation. The URL is supposed
+      to be absolute and this doesn't seem to be that!
+      */
+-    char *absolute = concat_url(data->change.url, newurl);
++    char *absolute = Curl_concat_url(data->change.url, newurl);
+     if(!absolute)
+       return CURLE_OUT_OF_MEMORY;
+     newurl = absolute;
+@@ -1823,7 +1519,7 @@ CURLcode Curl_follow(struct Curl_easy *data,
+     /* The new URL MAY contain space or high byte values, that means a mighty
+        stupid redirect URL but we still make an effort to do "right". */
+     char *newest;
+-    size_t newlen = strlen_url(newurl, FALSE);
++    size_t newlen = Curl_strlen_url(newurl, FALSE);
+ 
+     /* This is an absolute URL, don't allow the custom port number */
+     disallowport = TRUE;
+@@ -1832,7 +1528,7 @@ CURLcode Curl_follow(struct Curl_easy *data,
+     if(!newest)
+       return CURLE_OUT_OF_MEMORY;
+ 
+-    strcpy_url(newest, newurl, FALSE); /* create a space-free URL */
++    Curl_strcpy_url(newest, newurl, FALSE); /* create a space-free URL */
+     newurl = newest; /* use this instead now */
+ 
+   }
+diff --git a/lib/url.c b/lib/url.c
+index dcc1ecc..4f75f11 100644
+--- a/lib/url.c
++++ b/lib/url.c
+@@ -1939,30 +1939,37 @@ static struct connectdata *allocate_conn(struct Curl_easy *data)
+   return NULL;
+ }
+ 
+-static CURLcode findprotocol(struct Curl_easy *data,
+-                             struct connectdata *conn,
+-                             const char *protostr)
++/* returns the handdler if the given scheme is built-in */
++const struct Curl_handler *Curl_builtin_scheme(const char *scheme)
+ {
+   const struct Curl_handler * const *pp;
+   const struct Curl_handler *p;
+-
+-  /* Scan protocol handler table and match against 'protostr' to set a few
+-     variables based on the URL. Now that the handler may be changed later
+-     when the protocol specific setup function is called. */
+-  for(pp = protocols; (p = *pp) != NULL; pp++) {
+-    if(strcasecompare(p->scheme, protostr)) {
++  /* Scan protocol handler table and match against 'scheme'. The handler may
++     be changed later when the protocol specific setup function is called. */
++  for(pp = protocols; (p = *pp) != NULL; pp++)
++    if(strcasecompare(p->scheme, scheme))
+       /* Protocol found in table. Check if allowed */
+-      if(!(data->set.allowed_protocols & p->protocol))
+-        /* nope, get out */
+-        break;
++      return p;
++  return NULL; /* not found */
++}
+ 
+-      /* it is allowed for "normal" request, now do an extra check if this is
+-         the result of a redirect */
+-      if(data->state.this_is_a_follow &&
+-         !(data->set.redir_protocols & p->protocol))
+-        /* nope, get out */
+-        break;
+ 
++static CURLcode findprotocol(struct Curl_easy *data,
++                             struct connectdata *conn,
++                             const char *protostr)
++{
++  const struct Curl_handler *p = Curl_builtin_scheme(protostr);
++
++  if(p && /* Protocol found in table. Check if allowed */
++     (data->set.allowed_protocols & p->protocol)) {
++
++    /* it is allowed for "normal" request, now do an extra check if this is
++       the result of a redirect */
++    if(data->state.this_is_a_follow &&
++       !(data->set.redir_protocols & p->protocol))
++      /* nope, get out */
++      ;
++    else {
+       /* Perform setup complement if some. */
+       conn->handler = conn->given = p;
+ 
+@@ -1971,7 +1978,6 @@ static CURLcode findprotocol(struct Curl_easy *data,
+     }
+   }
+ 
+-
+   /* The protocol was not found in the table, but we don't have to assign it
+      to anything since it is already assigned to a dummy-struct in the
+      create_conn() function when the connectdata struct is allocated. */
+diff --git a/lib/url.h b/lib/url.h
+index ef3ebf0..0034f82 100644
+--- a/lib/url.h
++++ b/lib/url.h
+@@ -69,6 +69,8 @@ void Curl_getoff_all_pipelines(struct Curl_easy *data,
+ 
+ void Curl_close_connections(struct Curl_easy *data);
+ 
++const struct Curl_handler *Curl_builtin_scheme(const char *scheme);
++
+ #define CURL_DEFAULT_PROXY_PORT 1080 /* default proxy port unless specified */
+ #define CURL_DEFAULT_HTTPS_PROXY_PORT 443 /* default https proxy port unless
+                                              specified */
+diff --git a/lib/escape.h b/lib/urlapi-int.h
+similarity index 66%
+copy from lib/escape.h
+copy to lib/urlapi-int.h
+index 638666f..7ac09fd 100644
+--- a/lib/escape.h
++++ b/lib/urlapi-int.h
+@@ -1,5 +1,5 @@
+-#ifndef HEADER_CURL_ESCAPE_H
+-#define HEADER_CURL_ESCAPE_H
++#ifndef HEADER_CURL_URLAPI_INT_H
++#define HEADER_CURL_URLAPI_INT_H
+ /***************************************************************************
+  *                                  _   _ ____  _
+  *  Project                     ___| | | |  _ \| |
+@@ -7,7 +7,7 @@
+  *                            | (__| |_| |  _ <| |___
+  *                             \___|\___/|_| \_\_____|
+  *
+- * Copyright (C) 1998 - 2011, Daniel Stenberg, <daniel@haxx.se>, et al.
++ * Copyright (C) 1998 - 2018, Daniel Stenberg, <daniel@haxx.se>, et al.
+  *
+  * This software is licensed as described in the file COPYING, which
+  * you should have received as part of this distribution. The terms
+@@ -21,13 +21,9 @@
+  * KIND, either express or implied.
+  *
+  ***************************************************************************/
+-/* Escape and unescape URL encoding in strings. The functions return a new
+- * allocated string or NULL if an error occurred.  */
+-
+-CURLcode Curl_urldecode(struct Curl_easy *data,
+-                        const char *string, size_t length,
+-                        char **ostring, size_t *olen,
+-                        bool reject_crlf);
+-
+-#endif /* HEADER_CURL_ESCAPE_H */
+-
++#include "curl_setup.h"
++bool Curl_is_absolute_url(const char *url, char *scheme, size_t buflen);
++char *Curl_concat_url(const char *base, const char *relurl);
++size_t Curl_strlen_url(const char *url, bool relative);
++void Curl_strcpy_url(char *output, const char *url, bool relative);
++#endif
+diff --git a/lib/urlapi.c b/lib/urlapi.c
+new file mode 100644
+index 0000000..8287861
+--- /dev/null
++++ b/lib/urlapi.c
+@@ -0,0 +1,1315 @@
++/***************************************************************************
++ *                                  _   _ ____  _
++ *  Project                     ___| | | |  _ \| |
++ *                             / __| | | | |_) | |
++ *                            | (__| |_| |  _ <| |___
++ *                             \___|\___/|_| \_\_____|
++ *
++ * Copyright (C) 1998 - 2018, Daniel Stenberg, <daniel@haxx.se>, et al.
++ *
++ * This software is licensed as described in the file COPYING, which
++ * you should have received as part of this distribution. The terms
++ * are also available at https://curl.haxx.se/docs/copyright.html.
++ *
++ * You may opt to use, copy, modify, merge, publish, distribute and/or sell
++ * copies of the Software, and permit persons to whom the Software is
++ * furnished to do so, under the terms of the COPYING file.
++ *
++ * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
++ * KIND, either express or implied.
++ *
++ ***************************************************************************/
++
++#include "curl_setup.h"
++
++#include "urldata.h"
++#include "urlapi-int.h"
++#include "strcase.h"
++#include "dotdot.h"
++#include "url.h"
++#include "escape.h"
++#include "curl_ctype.h"
++
++/* The last 3 #include files should be in this order */
++#include "curl_printf.h"
++#include "curl_memory.h"
++#include "memdebug.h"
++
++/* Internal representation of CURLU. Point to URL-encoded strings. */
++struct Curl_URL {
++  char *scheme;
++  char *user;
++  char *password;
++  char *options; /* IMAP only? */
++  char *host;
++  char *port;
++  char *path;
++  char *query;
++  char *fragment;
++
++  char *scratch; /* temporary scratch area */
++  long portnum; /* the numerical version */
++};
++
++#define DEFAULT_SCHEME "https"
++
++/* scheme is not URL encoded, the longest libcurl supported ones are 6
++   letters */
++#define MAX_SCHEME_LEN 8
++
++static void free_urlhandle(struct Curl_URL *u)
++{
++  free(u->scheme);
++  free(u->user);
++  free(u->password);
++  free(u->options);
++  free(u->host);
++  free(u->port);
++  free(u->path);
++  free(u->query);
++  free(u->fragment);
++  free(u->scratch);
++}
++
++/* move the full contents of one handle onto another and
++   free the original */
++static void mv_urlhandle(struct Curl_URL *from,
++                         struct Curl_URL *to)
++{
++  free_urlhandle(to);
++  *to = *from;
++  free(from);
++}
++
++/*
++ * Find the separator at the end of the host name, or the '?' in cases like
++ * http://www.url.com?id=2380
++ */
++static const char *find_host_sep(const char *url)
++{
++  const char *sep;
++  const char *query;
++
++  /* Find the start of the hostname */
++  sep = strstr(url, "//");
++  if(!sep)
++    sep = url;
++  else
++    sep += 2;
++
++  query = strchr(sep, '?');
++  sep = strchr(sep, '/');
++
++  if(!sep)
++    sep = url + strlen(url);
++
++  if(!query)
++    query = url + strlen(url);
++
++  return sep < query ? sep : query;
++}
++
++/*
++ * Decide in an encoding-independent manner whether a character in an
++ * URL must be escaped. The same criterion must be used in strlen_url()
++ * and strcpy_url().
++ */
++static bool urlchar_needs_escaping(int c)
++{
++    return !(ISCNTRL(c) || ISSPACE(c) || ISGRAPH(c));
++}
++
++/*
++ * strlen_url() returns the length of the given URL if the spaces within the
++ * URL were properly URL encoded.
++ * URL encoding should be skipped for host names, otherwise IDN resolution
++ * will fail.
++ */
++size_t Curl_strlen_url(const char *url, bool relative)
++{
++  const unsigned char *ptr;
++  size_t newlen = 0;
++  bool left = TRUE; /* left side of the ? */
++  const unsigned char *host_sep = (const unsigned char *) url;
++
++  if(!relative)
++    host_sep = (const unsigned char *) find_host_sep(url);
++
++  for(ptr = (unsigned char *)url; *ptr; ptr++) {
++
++    if(ptr < host_sep) {
++      ++newlen;
++      continue;
++    }
++
++    switch(*ptr) {
++    case '?':
++      left = FALSE;
++      /* FALLTHROUGH */
++    default:
++      if(urlchar_needs_escaping(*ptr))
++        newlen += 2;
++      newlen++;
++      break;
++    case ' ':
++      if(left)
++        newlen += 3;
++      else
++        newlen++;
++      break;
++    }
++  }
++  return newlen;
++}
++
++/* strcpy_url() copies a url to a output buffer and URL-encodes the spaces in
++ * the source URL accordingly.
++ * URL encoding should be skipped for host names, otherwise IDN resolution
++ * will fail.
++ */
++void Curl_strcpy_url(char *output, const char *url, bool relative)
++{
++  /* we must add this with whitespace-replacing */
++  bool left = TRUE;
++  const unsigned char *iptr;
++  char *optr = output;
++  const unsigned char *host_sep = (const unsigned char *) url;
++
++  if(!relative)
++    host_sep = (const unsigned char *) find_host_sep(url);
++
++  for(iptr = (unsigned char *)url;    /* read from here */
++      *iptr;         /* until zero byte */
++      iptr++) {
++
++    if(iptr < host_sep) {
++      *optr++ = *iptr;
++      continue;
++    }
++
++    switch(*iptr) {
++    case '?':
++      left = FALSE;
++      /* FALLTHROUGH */
++    default:
++      if(urlchar_needs_escaping(*iptr)) {
++        snprintf(optr, 4, "%%%02x", *iptr);
++        optr += 3;
++      }
++      else
++        *optr++=*iptr;
++      break;
++    case ' ':
++      if(left) {
++        *optr++='%'; /* add a '%' */
++        *optr++='2'; /* add a '2' */
++        *optr++='0'; /* add a '0' */
++      }
++      else
++        *optr++='+'; /* add a '+' here */
++      break;
++    }
++  }
++  *optr = 0; /* zero terminate output buffer */
++
++}
++
++/*
++ * Returns true if the given URL is absolute (as opposed to relative) within
++ * the buffer size. Returns the scheme in the buffer if TRUE and 'buf' is
++ * non-NULL.
++ */
++bool Curl_is_absolute_url(const char *url, char *buf, size_t buflen)
++{
++  size_t i;
++  for(i = 0; i < buflen && url[i]; ++i) {
++    char s = url[i];
++    if(s == ':') {
++      if(buf)
++        buf[i] = 0;
++      return TRUE;
++    }
++    /* RFC 3986 3.1 explains:
++      scheme      = ALPHA *( ALPHA / DIGIT / "+" / "-" / "." )
++    */
++    else if(ISALNUM(s) || (s == '+') || (s == '-') || (s == '.') ) {
++      if(buf)
++        buf[i] = (char)TOLOWER(s);
++    }
++    else
++      break;
++  }
++  return FALSE;
++}
++
++/*
++ * Concatenate a relative URL to a base URL making it absolute.
++ * URL-encodes any spaces.
++ * The returned pointer must be freed by the caller unless NULL
++ * (returns NULL on out of memory).
++ */
++char *Curl_concat_url(const char *base, const char *relurl)
++{
++  /***
++   TRY to append this new path to the old URL
++   to the right of the host part. Oh crap, this is doomed to cause
++   problems in the future...
++  */
++  char *newest;
++  char *protsep;
++  char *pathsep;
++  size_t newlen;
++  bool host_changed = FALSE;
++
++  const char *useurl = relurl;
++  size_t urllen;
++
++  /* we must make our own copy of the URL to play with, as it may
++     point to read-only data */
++  char *url_clone = strdup(base);
++
++  if(!url_clone)
++    return NULL; /* skip out of this NOW */
++
++  /* protsep points to the start of the host name */
++  protsep = strstr(url_clone, "//");
++  if(!protsep)
++    protsep = url_clone;
++  else
++    protsep += 2; /* pass the slashes */
++
++  if('/' != relurl[0]) {
++    int level = 0;
++
++    /* First we need to find out if there's a ?-letter in the URL,
++       and cut it and the right-side of that off */
++    pathsep = strchr(protsep, '?');
++    if(pathsep)
++      *pathsep = 0;
++
++    /* we have a relative path to append to the last slash if there's one
++       available, or if the new URL is just a query string (starts with a
++       '?')  we append the new one at the end of the entire currently worked
++       out URL */
++    if(useurl[0] != '?') {
++      pathsep = strrchr(protsep, '/');
++      if(pathsep)
++        *pathsep = 0;
++    }
++
++    /* Check if there's any slash after the host name, and if so, remember
++       that position instead */
++    pathsep = strchr(protsep, '/');
++    if(pathsep)
++      protsep = pathsep + 1;
++    else
++      protsep = NULL;
++
++    /* now deal with one "./" or any amount of "../" in the newurl
++       and act accordingly */
++
++    if((useurl[0] == '.') && (useurl[1] == '/'))
++      useurl += 2; /* just skip the "./" */
++
++    while((useurl[0] == '.') &&
++          (useurl[1] == '.') &&
++          (useurl[2] == '/')) {
++      level++;
++      useurl += 3; /* pass the "../" */
++    }
++
++    if(protsep) {
++      while(level--) {
++        /* cut off one more level from the right of the original URL */
++        pathsep = strrchr(protsep, '/');
++        if(pathsep)
++          *pathsep = 0;
++        else {
++          *protsep = 0;
++          break;
++        }
++      }
++    }
++  }
++  else {
++    /* We got a new absolute path for this server */
++
++    if((relurl[0] == '/') && (relurl[1] == '/')) {
++      /* the new URL starts with //, just keep the protocol part from the
++         original one */
++      *protsep = 0;
++      useurl = &relurl[2]; /* we keep the slashes from the original, so we
++                              skip the new ones */
++      host_changed = TRUE;
++    }
++    else {
++      /* cut off the original URL from the first slash, or deal with URLs
++         without slash */
++      pathsep = strchr(protsep, '/');
++      if(pathsep) {
++        /* When people use badly formatted URLs, such as
++           "http://www.url.com?dir=/home/daniel" we must not use the first
++           slash, if there's a ?-letter before it! */
++        char *sep = strchr(protsep, '?');
++        if(sep && (sep < pathsep))
++          pathsep = sep;
++        *pathsep = 0;
++      }
++      else {
++        /* There was no slash. Now, since we might be operating on a badly
++           formatted URL, such as "http://www.url.com?id=2380" which doesn't
++           use a slash separator as it is supposed to, we need to check for a
++           ?-letter as well! */
++        pathsep = strchr(protsep, '?');
++        if(pathsep)
++          *pathsep = 0;
++      }
++    }
++  }
++
++  /* If the new part contains a space, this is a mighty stupid redirect
++     but we still make an effort to do "right". To the left of a '?'
++     letter we replace each space with %20 while it is replaced with '+'
++     on the right side of the '?' letter.
++  */
++  newlen = Curl_strlen_url(useurl, !host_changed);
++
++  urllen = strlen(url_clone);
++
++  newest = malloc(urllen + 1 + /* possible slash */
++                  newlen + 1 /* zero byte */);
++
++  if(!newest) {
++    free(url_clone); /* don't leak this */
++    return NULL;
++  }
++
++  /* copy over the root url part */
++  memcpy(newest, url_clone, urllen);
++
++  /* check if we need to append a slash */
++  if(('/' == useurl[0]) || (protsep && !*protsep) || ('?' == useurl[0]))
++    ;
++  else
++    newest[urllen++]='/';
++
++  /* then append the new piece on the right side */
++  Curl_strcpy_url(&newest[urllen], useurl, !host_changed);
++
++  free(url_clone);
++
++  return newest;
++}
++
++/*
++ * parse_hostname_login()
++ *
++ * Parse the login details (user name, password and options) from the URL and
++ * strip them out of the host name
++ *
++ */
++static CURLUcode parse_hostname_login(struct Curl_URL *u,
++                                      const struct Curl_handler *h,
++                                      char **hostname,
++                                      unsigned int flags)
++{
++  CURLUcode result = CURLUE_OK;
++  CURLcode ccode;
++  char *userp = NULL;
++  char *passwdp = NULL;
++  char *optionsp = NULL;
++
++  /* At this point, we're hoping all the other special cases have
++   * been taken care of, so conn->host.name is at most
++   *    [user[:password][;options]]@]hostname
++   *
++   * We need somewhere to put the embedded details, so do that first.
++   */
++
++  char *ptr = strchr(*hostname, '@');
++  char *login = *hostname;
++
++  if(!ptr)
++    goto out;
++
++  /* We will now try to extract the
++   * possible login information in a string like:
++   * ftp://user:password@ftp.my.site:8021/README */
++  *hostname = ++ptr;
++
++  /* We could use the login information in the URL so extract it. Only parse
++     options if the handler says we should. */
++  ccode = Curl_parse_login_details(login, ptr - login - 1,
++                                   &userp, &passwdp,
++                                   h->flags & PROTOPT_URLOPTIONS ?
++                                   &optionsp:NULL);
++  if(ccode) {
++    result = CURLUE_MALFORMED_INPUT;
++    goto out;
++  }
++
++  if(userp) {
++    if(flags & CURLU_DISALLOW_USER) {
++      /* Option DISALLOW_USER is set and url contains username. */
++      result = CURLUE_USER_NOT_ALLOWED;
++      goto out;
++    }
++
++    u->user = userp;
++  }
++
++  if(passwdp)
++    u->password = passwdp;
++
++  if(optionsp)
++    u->options = optionsp;
++
++  return CURLUE_OK;
++  out:
++
++  free(userp);
++  free(passwdp);
++  free(optionsp);
++
++  return result;
++}
++
++static CURLUcode parse_port(struct Curl_URL *u, char *hostname)
++{
++  char *portptr;
++  char endbracket;
++  int len;
++
++  if((1 == sscanf(hostname, "[%*45[0123456789abcdefABCDEF:.]%c%n",
++                  &endbracket, &len)) &&
++     (']' == endbracket)) {
++    /* this is a RFC2732-style specified IP-address */
++    portptr = &hostname[len];
++    if (*portptr != ':')
++      return CURLUE_MALFORMED_INPUT;
++  }
++  else
++    portptr = strchr(hostname, ':');
++
++  if(portptr) {
++    char *rest;
++    long port;
++    char portbuf[7];
++
++    if(!ISDIGIT(portptr[1]))
++      return CURLUE_BAD_PORT_NUMBER;
++
++    port = strtol(portptr + 1, &rest, 10);  /* Port number must be decimal */
++
++    if((port <= 0) || (port > 0xffff))
++      /* Single unix standard says port numbers are 16 bits long, but we don't
++         treat port zero as OK. */
++      return CURLUE_BAD_PORT_NUMBER;
++
++    if(rest[0])
++      return CURLUE_BAD_PORT_NUMBER;
++
++    if(rest != &portptr[1]) {
++      *portptr++ = '\0'; /* cut off the name there */
++      *rest = 0;
++      /* generate a new to get rid of leading zeroes etc */
++      snprintf(portbuf, sizeof(portbuf), "%ld", port);
++      u->portnum = port;
++      u->port = strdup(portbuf);
++      if(!u->port)
++        return CURLUE_OUT_OF_MEMORY;
++    }
++    else {
++      /* Browser behavior adaptation. If there's a colon with no digits after,
++         just cut off the name there which makes us ignore the colon and just
++         use the default port. Firefox and Chrome both do that. */
++      *portptr = '\0';
++    }
++  }
++
++  return CURLUE_OK;
++}
++
++/* scan for byte values < 31 or 127 */
++static CURLUcode junkscan(char *part)
++{
++  char badbytes[]={
++    /* */ 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
++    0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
++    0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
++    0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
++    0x7f,
++    0x00 /* zero terminate */
++  };
++  if(part) {
++    size_t n = strlen(part);
++    size_t nfine = strcspn(part, badbytes);
++    if(nfine != n)
++      /* since we don't know which part is scanned, return a generic error
++         code */
++      return CURLUE_MALFORMED_INPUT;
++  }
++  return CURLUE_OK;
++}
++
++static CURLUcode hostname_check(char *hostname, unsigned int flags)
++{
++  const char *l; /* accepted characters */
++  size_t len;
++  size_t hlen = strlen(hostname);
++  (void)flags;
++
++  if(hostname[0] == '[') {
++    hostname++;
++    l = "0123456789abcdefABCDEF::.";
++    hlen -= 2;
++  }
++  else /* % for URL escaped letters */
++    l = "0123456789abcdefghijklimnopqrstuvwxyz-_.ABCDEFGHIJKLIMNOPQRSTUVWXYZ%";
++
++  len = strspn(hostname, l);
++  if(hlen != len)
++    /* hostname with bad content */
++    return CURLUE_MALFORMED_INPUT;
++
++  return CURLUE_OK;
++}
++
++#define HOSTNAME_END(x) (((x) == '/') || ((x) == '?') || ((x) == '#'))
++
++static CURLUcode seturl(const char *url, CURLU *u, unsigned int flags)
++{
++  char *path;
++  bool path_alloced = FALSE;
++  char *hostname;
++  char *query = NULL;
++  char *fragment = NULL;
++  CURLUcode result;
++  bool url_has_scheme = FALSE;
++  char schemebuf[MAX_SCHEME_LEN];
++  char *schemep;
++  size_t schemelen = 0;
++  size_t urllen;
++  const struct Curl_handler *h = NULL;
++
++  if(!url)
++    return CURLUE_MALFORMED_INPUT;
++
++  /*************************************************************
++   * Parse the URL.
++   ************************************************************/
++  /* allocate scratch area */
++  urllen = strlen(url);
++  path = u->scratch = malloc(urllen * 2 + 2);
++  if(!path)
++    return CURLUE_OUT_OF_MEMORY;
++
++  hostname = &path[urllen + 1];
++  hostname[0] = 0;
++
++  /* MSDOS/Windows style drive prefix, eg c: in c:foo */
++#define STARTS_WITH_DRIVE_PREFIX(str) \
++  ((('a' <= str[0] && str[0] <= 'z') || \
++    ('A' <= str[0] && str[0] <= 'Z')) && \
++   (str[1] == ':'))
++
++  /* MSDOS/Windows style drive prefix, optionally with
++   * a '|' instead of ':', followed by a slash or NUL */
++#define STARTS_WITH_URL_DRIVE_PREFIX(str) \
++  ((('a' <= (str)[0] && (str)[0] <= 'z') || \
++    ('A' <= (str)[0] && (str)[0] <= 'Z')) && \
++   ((str)[1] == ':' || (str)[1] == '|') && \
++   ((str)[2] == '/' || (str)[2] == '\\' || (str)[2] == 0))
++
++  if(Curl_is_absolute_url(url, schemebuf, sizeof(schemebuf))) {
++    url_has_scheme = TRUE;
++    schemelen = strlen(schemebuf);
++  }
++
++  /* handle the file: scheme */
++  if(url_has_scheme && strcasecompare(schemebuf, "file")) {
++    /* path has been allocated large anough to hold this */
++    strcpy(path, &url[5]);
++
++    hostname = NULL; /* no host for file: URLs */
++    u->scheme = strdup("file");
++    if(!u->scheme)
++      return CURLUE_OUT_OF_MEMORY;
++
++    /* Extra handling URLs with an authority component (i.e. that start with
++     * "file://")
++     *
++     * We allow omitted hostname (e.g. file:/<path>) -- valid according to
++     * RFC 8089, but not the (current) WHAT-WG URL spec.
++     */
++    if(path[0] == '/' && path[1] == '/') {
++      /* swallow the two slashes */
++      char *ptr = &path[2];
++
++      /*
++       * According to RFC 8089, a file: URL can be reliably dereferenced if:
++       *
++       *  o it has no/blank hostname, or
++       *
++       *  o the hostname matches "localhost" (case-insensitively), or
++       *
++       *  o the hostname is a FQDN that resolves to this machine.
++       *
++       * For brevity, we only consider URLs with empty, "localhost", or
++       * "127.0.0.1" hostnames as local.
++       *
++       * Additionally, there is an exception for URLs with a Windows drive
++       * letter in the authority (which was accidentally omitted from RFC 8089
++       * Appendix E, but believe me, it was meant to be there. --MK)
++       */
++      if(ptr[0] != '/' && !STARTS_WITH_URL_DRIVE_PREFIX(ptr)) {
++        /* the URL includes a host name, it must match "localhost" or
++           "127.0.0.1" to be valid */
++        if(!checkprefix("localhost/", ptr) &&
++           !checkprefix("127.0.0.1/", ptr)) {
++          /* Invalid file://hostname/, expected localhost or 127.0.0.1 or
++             none */
++          return CURLUE_MALFORMED_INPUT;
++        }
++        ptr += 9; /* now points to the slash after the host */
++      }
++
++      path = ptr;
++    }
++
++#if !defined(MSDOS) && !defined(WIN32) && !defined(__CYGWIN__)
++    /* Don't allow Windows drive letters when not in Windows.
++     * This catches both "file:/c:" and "file:c:" */
++    if(('/' == path[0] && STARTS_WITH_URL_DRIVE_PREFIX(&path[1])) ||
++       STARTS_WITH_URL_DRIVE_PREFIX(path)) {
++      /* File drive letters are only accepted in MSDOS/Windows */
++      return CURLUE_MALFORMED_INPUT;
++    }
++#else
++    /* If the path starts with a slash and a drive letter, ditch the slash */
++    if('/' == path[0] && STARTS_WITH_URL_DRIVE_PREFIX(&path[1])) {
++      /* This cannot be done with strcpy, as the memory chunks overlap! */
++      memmove(path, &path[1], strlen(&path[1]) + 1);
++    }
++#endif
++
++  }
++  else {
++    /* clear path */
++    const char *p;
++    const char *hostp;
++    size_t len;
++    path[0] = 0;
++
++    if(url_has_scheme) {
++      int i = 0;
++      p = &url[schemelen + 1];
++      while(p && (*p == '/') && (i < 4)) {
++        p++;
++        i++;
++      }
++      if((i < 1) || (i>3))
++        /* less than one or more than three slashes */
++        return CURLUE_MALFORMED_INPUT;
++
++      schemep = schemebuf;
++      if(!Curl_builtin_scheme(schemep) &&
++         !(flags & CURLU_NON_SUPPORT_SCHEME))
++        return CURLUE_UNSUPPORTED_SCHEME;
++
++      if(junkscan(schemep))
++        return CURLUE_MALFORMED_INPUT;
++    }
++    else {
++      /* no scheme! */
++
++      if(!(flags & CURLU_DEFAULT_SCHEME))
++        return CURLUE_MALFORMED_INPUT;
++      schemep = (char *) DEFAULT_SCHEME;
++
++      /*
++       * The URL was badly formatted, let's try without scheme specified.
++       */
++      p = url;
++    }
++    hostp = p; /* host name starts here */
++
++    while(*p && !HOSTNAME_END(*p)) /* find end of host name */
++      p++;
++
++    len = p - hostp;
++    if(!len)
++      return CURLUE_MALFORMED_INPUT;
++
++    memcpy(hostname, hostp, len);
++    hostname[len] = 0;
++
++    len = strlen(p);
++    memcpy(path, p, len);
++    path[len] = 0;
++
++    u->scheme = strdup(schemep);
++    if(!u->scheme)
++      return CURLUE_OUT_OF_MEMORY;
++  }
++
++  /* if this is a known scheme, get some details */
++  h = Curl_builtin_scheme(u->scheme);
++
++  if(junkscan(path))
++    return CURLUE_MALFORMED_INPUT;
++
++  query = strchr(path, '?');
++  if(query)
++    *query++ = 0;
++
++  fragment = strchr(query?query:path, '#');
++  if(fragment)
++    *fragment++ = 0;
++
++  if(!path[0])
++    /* if there's no path set, unset */
++    path = NULL;
++  else if(!(flags & CURLU_PATH_AS_IS)) {
++    /* sanitise paths and remove ../ and ./ sequences according to RFC3986 */
++    char *newp = Curl_dedotdotify(path);
++    if(!newp)
++      return CURLUE_OUT_OF_MEMORY;
++
++    if(strcmp(newp, path)) {
++      /* if we got a new version */
++      path = newp;
++      path_alloced = TRUE;
++    }
++    else
++      free(newp);
++  }
++  if(path) {
++    u->path = path_alloced?path:strdup(path);
++    if(!u->path)
++      return CURLUE_OUT_OF_MEMORY;
++  }
++
++  if(hostname) {
++    /*
++     * Parse the login details and strip them out of the host name.
++     */
++    if(junkscan(hostname))
++      return CURLUE_MALFORMED_INPUT;
++
++    result = parse_hostname_login(u, h, &hostname, flags);
++    if(result)
++      return result;
++
++    result = parse_port(u, hostname);
++    if(result)
++      return result;
++
++    result = hostname_check(hostname, flags);
++    if(result)
++      return result;
++
++    u->host = strdup(hostname);
++    if(!u->host)
++      return CURLUE_OUT_OF_MEMORY;
++  }
++
++  if(query && query[0]) {
++    u->query = strdup(query);
++    if(!u->query)
++      return CURLUE_OUT_OF_MEMORY;
++  }
++  if(fragment && fragment[0]) {
++    u->fragment = strdup(fragment);
++    if(!u->fragment)
++      return CURLUE_OUT_OF_MEMORY;
++  }
++
++  free(u->scratch);
++  u->scratch = NULL;
++
++  return CURLUE_OK;
++}
++
++/*
++ * Parse the URL and set the relevant members of the Curl_URL struct.
++ */
++static CURLUcode parseurl(const char *url, CURLU *u, unsigned int flags)
++{
++  CURLUcode result = seturl(url, u, flags);
++  if(result) {
++    free_urlhandle(u);
++    memset(u, 0, sizeof(struct Curl_URL));
++  }
++  return result;
++}
++
++/*
++ */
++CURLU *curl_url(void)
++{
++  return calloc(sizeof(struct Curl_URL), 1);
++}
++
++void curl_url_cleanup(CURLU *u)
++{
++  if(u) {
++    free_urlhandle(u);
++    free(u);
++  }
++}
++
++#define DUP(dest, src, name)         \
++  if(src->name) {                    \
++    dest->name = strdup(src->name);  \
++    if(!dest->name)                  \
++      goto fail;                     \
++  }
++
++CURLU *curl_url_dup(CURLU *in)
++{
++  struct Curl_URL *u = calloc(sizeof(struct Curl_URL), 1);
++  if(u) {
++    DUP(u, in, scheme);
++    DUP(u, in, user);
++    DUP(u, in, password);
++    DUP(u, in, options);
++    DUP(u, in, host);
++    DUP(u, in, port);
++    DUP(u, in, path);
++    DUP(u, in, query);
++    DUP(u, in, fragment);
++    u->portnum = in->portnum;
++  }
++  return u;
++  fail:
++  curl_url_cleanup(u);
++  return NULL;
++}
++
++CURLUcode curl_url_get(CURLU *u, CURLUPart what,
++                       char **part, unsigned int flags)
++{
++  char *ptr;
++  CURLUcode ifmissing = CURLUE_UNKNOWN_PART;
++  char portbuf[7];
++  bool urldecode = (flags & CURLU_URLDECODE)?1:0;
++  bool plusdecode = FALSE;
++  (void)flags;
++  if(!u)
++    return CURLUE_BAD_HANDLE;
++  if(!part)
++    return CURLUE_BAD_PARTPOINTER;
++  *part = NULL;
++
++  switch(what) {
++  case CURLUPART_SCHEME:
++    ptr = u->scheme;
++    ifmissing = CURLUE_NO_SCHEME;
++    urldecode = FALSE; /* never for schemes */
++    break;
++  case CURLUPART_USER:
++    ptr = u->user;
++    ifmissing = CURLUE_NO_USER;
++    break;
++  case CURLUPART_PASSWORD:
++    ptr = u->password;
++    ifmissing = CURLUE_NO_PASSWORD;
++    break;
++  case CURLUPART_OPTIONS:
++    ptr = u->options;
++    ifmissing = CURLUE_NO_OPTIONS;
++    break;
++  case CURLUPART_HOST:
++    ptr = u->host;
++    ifmissing = CURLUE_NO_HOST;
++    break;
++  case CURLUPART_PORT:
++    ptr = u->port;
++    ifmissing = CURLUE_NO_PORT;
++    urldecode = FALSE; /* never for port */
++    if(!ptr && (flags & CURLU_DEFAULT_PORT) && u->scheme) {
++      /* there's no stored port number, but asked to deliver
++         a default one for the scheme */
++      const struct Curl_handler *h =
++        Curl_builtin_scheme(u->scheme);
++      if(h) {
++        snprintf(portbuf, sizeof(portbuf), "%ld", h->defport);
++        ptr = portbuf;
++      }
++    }
++    else if(ptr && u->scheme) {
++      /* there is a stored port number, but ask to inhibit if
++         it matches the default one for the scheme */
++      const struct Curl_handler *h =
++        Curl_builtin_scheme(u->scheme);
++      if(h && (h->defport == u->portnum) &&
++         (flags & CURLU_NO_DEFAULT_PORT))
++        ptr = NULL;
++    }
++    break;
++  case CURLUPART_PATH:
++    ptr = u->path;
++    if(!ptr) {
++      ptr = u->path = strdup("/");
++      if(!u->path)
++        return CURLUE_OUT_OF_MEMORY;
++    }
++    break;
++  case CURLUPART_QUERY:
++    ptr = u->query;
++    ifmissing = CURLUE_NO_QUERY;
++    plusdecode = urldecode;
++    break;
++  case CURLUPART_FRAGMENT:
++    ptr = u->fragment;
++    ifmissing = CURLUE_NO_FRAGMENT;
++    break;
++  case CURLUPART_URL: {
++    char *url;
++    char *scheme;
++    char *options = u->options;
++    char *port = u->port;
++    urldecode = FALSE; /* not for the whole thing */
++    if(u->scheme && strcasecompare("file", u->scheme)) {
++      url = aprintf("file://%s%s%s",
++                    u->path,
++                    u->fragment? "#": "",
++                    u->fragment? u->fragment : "");
++    }
++    else if(!u->host)
++      return CURLUE_NO_HOST;
++    else {
++      const struct Curl_handler *h = NULL;
++      if(u->scheme)
++        scheme = u->scheme;
++      else if(flags & CURLU_DEFAULT_SCHEME)
++        scheme = (char *) DEFAULT_SCHEME;
++      else
++        return CURLUE_NO_SCHEME;
++
++      if(scheme) {
++        h = Curl_builtin_scheme(scheme);
++        if(!port && (flags & CURLU_DEFAULT_PORT)) {
++          /* there's no stored port number, but asked to deliver
++             a default one for the scheme */
++          if(h) {
++            snprintf(portbuf, sizeof(portbuf), "%ld", h->defport);
++            port = portbuf;
++          }
++        }
++        else if(port) {
++          /* there is a stored port number, but asked to inhibit if it matches
++             the default one for the scheme */
++          if(h && (h->defport == u->portnum) &&
++             (flags & CURLU_NO_DEFAULT_PORT))
++            port = NULL;
++        }
++      }
++      if(h && !(h->flags & PROTOPT_URLOPTIONS))
++        options = NULL;
++
++      url = aprintf("%s://%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
++                    scheme,
++                    u->user ? u->user : "",
++                    u->password ? ":": "",
++                    u->password ? u->password : "",
++                    options ? ";" : "",
++                    options ? options : "",
++                    (u->user || u->password || options) ? "@": "",
++                    u->host,
++                    port ? ":": "",
++                    port ? port : "",
++                    (u->path && (u->path[0] != '/')) ? "/": "",
++                    u->path ? u->path : "/",
++                    u->query? "?": "",
++                    u->query? u->query : "",
++                    u->fragment? "#": "",
++                    u->fragment? u->fragment : "");
++    }
++    if(!url)
++      return CURLUE_OUT_OF_MEMORY;
++    *part = url;
++    return CURLUE_OK;
++    break;
++  }
++  default:
++    ptr = NULL;
++  }
++  if(ptr) {
++    *part = strdup(ptr);
++    if(!*part)
++      return CURLUE_OUT_OF_MEMORY;
++    if(plusdecode) {
++      /* convert + to space */
++      char *plus;
++      for(plus = *part; *plus; ++plus) {
++        if(*plus == '+')
++          *plus = ' ';
++      }
++    }
++    if(urldecode) {
++      char *decoded;
++      size_t dlen;
++      CURLcode res = Curl_urldecode(NULL, *part, 0, &decoded, &dlen, TRUE);
++      free(*part);
++      if(res) {
++        *part = NULL;
++        return CURLUE_URLDECODE;
++      }
++      *part = decoded;
++    }
++    return CURLUE_OK;
++  }
++  else
++    return ifmissing;
++}
++
++CURLUcode curl_url_set(CURLU *u, CURLUPart what,
++                       const char *part, unsigned int flags)
++{
++  char **storep = NULL;
++  long port = 0;
++  bool urlencode = (flags & CURLU_URLENCODE)? 1 : 0;
++  bool plusencode = FALSE;
++  bool urlskipslash = FALSE;
++  bool appendquery = FALSE;
++
++  if(!u)
++    return CURLUE_BAD_HANDLE;
++  if(!part) {
++    /* setting a part to NULL clears it */
++    switch(what) {
++    case CURLUPART_URL:
++      break;
++    case CURLUPART_SCHEME:
++      storep = &u->scheme;
++      break;
++    case CURLUPART_USER:
++      storep = &u->user;
++      break;
++    case CURLUPART_PASSWORD:
++      storep = &u->password;
++      break;
++    case CURLUPART_OPTIONS:
++      storep = &u->options;
++      break;
++    case CURLUPART_HOST:
++      storep = &u->host;
++      break;
++    case CURLUPART_PORT:
++      storep = &u->port;
++      break;
++    case CURLUPART_PATH:
++      storep = &u->path;
++      break;
++    case CURLUPART_QUERY:
++      storep = &u->query;
++      break;
++    case CURLUPART_FRAGMENT:
++      storep = &u->fragment;
++      break;
++    default:
++      return CURLUE_UNKNOWN_PART;
++    }
++    if(storep && *storep) {
++      free(*storep);
++      *storep = NULL;
++    }
++    return CURLUE_OK;
++  }
++
++  switch(what) {
++  case CURLUPART_SCHEME:
++    if(!(flags & CURLU_NON_SUPPORT_SCHEME) &&
++       /* verify that it is a fine scheme */
++       !Curl_builtin_scheme(part))
++      return CURLUE_UNSUPPORTED_SCHEME;
++    storep = &u->scheme;
++    urlencode = FALSE; /* never */
++    break;
++  case CURLUPART_USER:
++    storep = &u->user;
++    break;
++  case CURLUPART_PASSWORD:
++    storep = &u->password;
++    break;
++  case CURLUPART_OPTIONS:
++    storep = &u->options;
++    break;
++  case CURLUPART_HOST:
++    storep = &u->host;
++    break;
++  case CURLUPART_PORT:
++    urlencode = FALSE; /* never */
++    port = strtol(part, NULL, 10);  /* Port number must be decimal */
++    if((port <= 0) || (port > 0xffff))
++      return CURLUE_BAD_PORT_NUMBER;
++    storep = &u->port;
++    break;
++  case CURLUPART_PATH:
++    urlskipslash = TRUE;
++    storep = &u->path;
++    break;
++  case CURLUPART_QUERY:
++    plusencode = urlencode;
++    appendquery = (flags & CURLU_APPENDQUERY)?1:0;
++    storep = &u->query;
++    break;
++  case CURLUPART_FRAGMENT:
++    storep = &u->fragment;
++    break;
++  case CURLUPART_URL: {
++    /*
++     * Allow a new URL to replace the existing (if any) contents.
++     *
++     * If the existing contents is enough for a URL, allow a relative URL to
++     * replace it.
++     */
++    CURLUcode result;
++    char *oldurl;
++    char *redired_url;
++    CURLU *handle2;
++
++    if(Curl_is_absolute_url(part, NULL, MAX_SCHEME_LEN)) {
++      handle2 = curl_url();
++      if(!handle2)
++        return CURLUE_OUT_OF_MEMORY;
++      result = parseurl(part, handle2, flags);
++      if(!result)
++        mv_urlhandle(handle2, u);
++      else
++        curl_url_cleanup(handle2);
++      return result;
++    }
++    /* extract the full "old" URL to do the redirect on */
++    result = curl_url_get(u, CURLUPART_URL, &oldurl, flags);
++    if(result) {
++      /* couldn't get the old URL, just use the new! */
++      handle2 = curl_url();
++      if(!handle2)
++        return CURLUE_OUT_OF_MEMORY;
++      result = parseurl(part, handle2, flags);
++      if(!result)
++        mv_urlhandle(handle2, u);
++      else
++        curl_url_cleanup(handle2);
++      return result;
++    }
++
++    /* apply the relative part to create a new URL */
++    redired_url = Curl_concat_url(oldurl, part);
++    free(oldurl);
++    if(!redired_url)
++      return CURLUE_OUT_OF_MEMORY;
++
++    /* now parse the new URL */
++    handle2 = curl_url();
++    if(!handle2) {
++      free(redired_url);
++      return CURLUE_OUT_OF_MEMORY;
++    }
++    result = parseurl(redired_url, handle2, flags);
++    free(redired_url);
++    if(!result)
++      mv_urlhandle(handle2, u);
++    else
++      curl_url_cleanup(handle2);
++    return result;
++  }
++  default:
++    return CURLUE_UNKNOWN_PART;
++  }
++  if(storep) {
++    const char *newp = part;
++    size_t nalloc = strlen(part);
++
++    if(urlencode) {
++      const char *i;
++      char *o;
++      bool free_part = FALSE;
++      char *enc = malloc(nalloc * 3 + 1); /* for worst case! */
++      if(!enc)
++        return CURLUE_OUT_OF_MEMORY;
++      if(plusencode) {
++        /* space to plus */
++        i = part;
++        for(o = enc; *i; ++o, ++i)
++          *o = (*i == ' ') ? '+' : *i;
++        *o = 0; /* zero terminate */
++        part = strdup(enc);
++        if(!part) {
++          free(enc);
++          return CURLUE_OUT_OF_MEMORY;
++        }
++        free_part = TRUE;
++      }
++      for(i = part, o = enc; *i; i++) {
++        if(Curl_isunreserved(*i) ||
++           ((*i == '/') && urlskipslash) ||
++           ((*i == '=') && appendquery) ||
++           ((*i == '+') && plusencode)) {
++          *o = *i;
++          o++;
++        }
++        else {
++          snprintf(o, 4, "%%%02x", *i);
++          o += 3;
++        }
++      }
++      *o = 0; /* zero terminate */
++      newp = enc;
++      if(free_part)
++        free((char *)part);
++    }
++    else {
++      char *p;
++      newp = strdup(part);
++      if(!newp)
++        return CURLUE_OUT_OF_MEMORY;
++      p = (char *)newp;
++      while(*p) {
++        /* make sure percent encoded are lower case */
++        if((*p == '%') && ISXDIGIT(p[1]) && ISXDIGIT(p[2]) &&
++           (ISUPPER(p[1]) || ISUPPER(p[2]))) {
++          p[1] = (char)TOLOWER(p[1]);
++          p[2] = (char)TOLOWER(p[2]);
++          p += 3;
++        }
++        else
++          p++;
++      }
++    }
++
++    if(appendquery) {
++      /* Append the string onto the old query. Add a '&' separator if none is
++         present at the end of the exsting query already */
++      size_t querylen = u->query ? strlen(u->query) : 0;
++      bool addamperand = querylen && (u->query[querylen -1] != '&');
++      if(querylen) {
++        size_t newplen = strlen(newp);
++        char *p = malloc(querylen + addamperand + newplen + 1);
++        if(!p) {
++          free((char *)newp);
++          return CURLUE_OUT_OF_MEMORY;
++        }
++        strcpy(p, u->query); /* original query */
++        if(addamperand)
++          p[querylen] = '&'; /* ampersand */
++        strcpy(&p[querylen + addamperand], newp); /* new suffix */
++        free((char *)newp);
++        free(*storep);
++        *storep = p;
++        return CURLUE_OK;
++      }
++    }
++
++    free(*storep);
++    *storep = (char *)newp;
++  }
++  /* set after the string, to make it not assigned if the allocation above
++     fails */
++  if(port)
++    u->portnum = port;
++  return CURLUE_OK;
++}
+diff --git a/tests/data/Makefile.inc b/tests/data/Makefile.inc
+index aa5fff0..0f6ac44 100644
+--- a/tests/data/Makefile.inc
++++ b/tests/data/Makefile.inc
+@@ -178,6 +178,8 @@ test1533 test1534 test1535 test1536 test1537 test1538 \
+ test1540 \
+ test1550 test1551 test1552 test1553 test1554 test1555 test1556 test1557 \
+ \
++test1560 \
++\
+ test1590 \
+ test1600 test1601 test1602 test1603 test1604 test1605 test1606 test1607 \
+ test1608 test1609 \
+diff --git a/tests/data/test1560 b/tests/data/test1560
+new file mode 100644
+index 0000000..720df03
+--- /dev/null
++++ b/tests/data/test1560
+@@ -0,0 +1,28 @@
++<testcase>
++<info>
++<keywords>
++unittest
++URL API
++</keywords>
++</info>
++
++#
++# Client-side
++<client>
++<server>
++none
++</server>
++<features>
++file
++https
++http
++</features>
++ <name>
++URL API
++ </name>
++<tool>
++lib1560
++</tool>
++</client>
++
++</testcase>
+diff --git a/tests/libtest/Makefile.am b/tests/libtest/Makefile.am
+index d14f37d..dc97e32 100644
+--- a/tests/libtest/Makefile.am
++++ b/tests/libtest/Makefile.am
+@@ -133,3 +133,8 @@ lib1521.c: $(top_srcdir)/tests/libtest/mk-lib1521.pl $(top_srcdir)/include/curl/
+ 
+ checksrc:
+ 	@PERL@ $(top_srcdir)/lib/checksrc.pl $(srcdir)/*.c
++
++if CURLDEBUG
++# for debug builds, we scan the sources on all regular make invokes
++all-local: checksrc
++endif
+diff --git a/tests/libtest/Makefile.inc b/tests/libtest/Makefile.inc
+index 238ef97..7a3cd16 100644
+--- a/tests/libtest/Makefile.inc
++++ b/tests/libtest/Makefile.inc
+@@ -30,6 +30,7 @@ noinst_PROGRAMS = chkhostname libauthretry libntlmconnect                \
+  lib1534 lib1535 lib1536 lib1537 lib1538 \
+  lib1540 \
+  lib1550 lib1551 lib1552 lib1553 lib1554 lib1555 lib1556 lib1557 \
++ lib1560 \
+  lib1900 \
+  lib2033
+ 
+@@ -507,6 +508,9 @@ lib1557_SOURCES = lib1557.c $(SUPPORTFILES) $(TESTUTIL) $(WARNLESS)
+ lib1557_LDADD = $(TESTUTIL_LIBS)
+ lib1557_CPPFLAGS = $(AM_CPPFLAGS) -DLIB1557
+ 
++lib1560_SOURCES = lib1560.c $(SUPPORTFILES) $(TESTUTIL) $(WARNLESS)
++lib1560_LDADD = $(TESTUTIL_LIBS)
++
+ lib1900_SOURCES = lib1900.c $(SUPPORTFILES) $(TESTUTIL) $(WARNLESS)
+ lib1900_LDADD = $(TESTUTIL_LIBS)
+ lib1900_CPPFLAGS = $(AM_CPPFLAGS)
+diff --git a/tests/libtest/lib1560.c b/tests/libtest/lib1560.c
+new file mode 100644
+index 0000000..669ea9a
+--- /dev/null
++++ b/tests/libtest/lib1560.c
+@@ -0,0 +1,760 @@
++/***************************************************************************
++ *                                  _   _ ____  _
++ *  Project                     ___| | | |  _ \| |
++ *                             / __| | | | |_) | |
++ *                            | (__| |_| |  _ <| |___
++ *                             \___|\___/|_| \_\_____|
++ *
++ * Copyright (C) 1998 - 2018, Daniel Stenberg, <daniel@haxx.se>, et al.
++ *
++ * This software is licensed as described in the file COPYING, which
++ * you should have received as part of this distribution. The terms
++ * are also available at https://curl.haxx.se/docs/copyright.html.
++ *
++ * You may opt to use, copy, modify, merge, publish, distribute and/or sell
++ * copies of the Software, and permit persons to whom the Software is
++ * furnished to do so, under the terms of the COPYING file.
++ *
++ * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
++ * KIND, either express or implied.
++ *
++ ***************************************************************************/
++
++/*
++ * Note:
++ *
++ * Since the URL parser by default only accepts schemes that *this instance*
++ * of libcurl supports, make sure that the test1560 file lists all the schemes
++ * that this test will assume to be present!
++ */
++
++#include "test.h"
++
++#include "testutil.h"
++#include "warnless.h"
++#include "memdebug.h" /* LAST include file */
++
++struct part {
++  CURLUPart part;
++  const char *name;
++};
++
++
++static int checkparts(CURLU *u, const char *in, const char *wanted,
++                      unsigned int getflags)
++{
++  int i;
++  CURLUcode rc;
++  char buf[256];
++  char *bufp = &buf[0];
++  size_t len = sizeof(buf);
++  struct part parts[] = {
++    {CURLUPART_SCHEME, "scheme"},
++    {CURLUPART_USER, "user"},
++    {CURLUPART_PASSWORD, "password"},
++    {CURLUPART_OPTIONS, "options"},
++    {CURLUPART_HOST, "host"},
++    {CURLUPART_PORT, "port"},
++    {CURLUPART_PATH, "path"},
++    {CURLUPART_QUERY, "query"},
++    {CURLUPART_FRAGMENT, "fragment"},
++    {0, NULL}
++  };
++  buf[0] = 0;
++
++  for(i = 0; parts[i].name; i++) {
++    char *p = NULL;
++    size_t n;
++    rc = curl_url_get(u, parts[i].part, &p, getflags);
++    if(!rc && p) {
++      snprintf(bufp, len, "%s%s", buf[0]?" | ":"", p);
++    }
++    else
++      snprintf(bufp, len, "%s[%d]", buf[0]?" | ":"", (int)rc);
++
++    n = strlen(bufp);
++    bufp += n;
++    len -= n;
++    curl_free(p);
++  }
++  if(strcmp(buf, wanted)) {
++    fprintf(stderr, "in: %s\nwanted: %s\ngot:    %s\n", in, wanted, buf);
++    return 1;
++  }
++  return 0;
++}
++
++struct redircase {
++  const char *in;
++  const char *set;
++  const char *out;
++  unsigned int urlflags;
++  unsigned int setflags;
++  CURLUcode ucode;
++};
++
++struct setcase {
++  const char *in;
++  const char *set;
++  const char *out;
++  unsigned int urlflags;
++  unsigned int setflags;
++  CURLUcode ucode;
++};
++
++struct testcase {
++  const char *in;
++  const char *out;
++  unsigned int urlflags;
++  unsigned int getflags;
++  CURLUcode ucode;
++};
++
++struct urltestcase {
++  const char *in;
++  const char *out;
++  unsigned int urlflags; /* pass to curl_url() */
++  unsigned int getflags; /* pass to curl_url_get() */
++  CURLUcode ucode;
++};
++
++struct querycase {
++  const char *in;
++  const char *q;
++  const char *out;
++  unsigned int urlflags; /* pass to curl_url() */
++  unsigned int qflags; /* pass to curl_url_get() */
++  CURLUcode ucode;
++};
++
++static struct testcase get_parts_list[] ={
++  {"https://127.0.0.1:443",
++   "https | [11] | [12] | [13] | 127.0.0.1 | [15] | / | [17] | [18]",
++   0, CURLU_NO_DEFAULT_PORT, CURLUE_OK},
++  {"http://%3a:%3a@ex%0ample/%3f+?+%3f+%23#+%23%3f%g7",
++   "http | : | : | [13] | [6] | [15] | /?+ |  ? # | +#?%g7",
++   0, CURLU_URLDECODE, CURLUE_OK},
++  {"http://%3a:%3a@ex%0ample/%3f?%3f%35#%35%3f%g7",
++   "http | %3a | %3a | [13] | ex%0ample | [15] | /%3f | %3f%35 | %35%3f%g7",
++   0, 0, CURLUE_OK},
++  {"http://HO0_-st%41/",
++   "http | [11] | [12] | [13] | HO0_-st%41 | [15] | / | [17] | [18]",
++   0, 0, CURLUE_OK},
++  {"file://hello.html",
++   "",
++   0, 0, CURLUE_MALFORMED_INPUT},
++  {"http://HO0_-st/",
++   "http | [11] | [12] | [13] | HO0_-st | [15] | / | [17] | [18]",
++   0, 0, CURLUE_OK},
++  {"imap://user:pass;option@server/path",
++   "imap | user | pass | option | server | [15] | /path | [17] | [18]",
++   0, 0, CURLUE_OK},
++  {"http://user:pass;option@server/path",
++   "http | user | pass;option | [13] | server | [15] | /path | [17] | [18]",
++   0, 0, CURLUE_OK},
++  {"file:/hello.html",
++   "file | [11] | [12] | [13] | [14] | [15] | /hello.html | [17] | [18]",
++   0, 0, CURLUE_OK},
++  {"file://127.0.0.1/hello.html",
++   "file | [11] | [12] | [13] | [14] | [15] | /hello.html | [17] | [18]",
++   0, 0, CURLUE_OK},
++  {"file:////hello.html",
++   "file | [11] | [12] | [13] | [14] | [15] | //hello.html | [17] | [18]",
++   0, 0, CURLUE_OK},
++  {"file:///hello.html",
++   "file | [11] | [12] | [13] | [14] | [15] | /hello.html | [17] | [18]",
++   0, 0, CURLUE_OK},
++  {"https://127.0.0.1",
++   "https | [11] | [12] | [13] | 127.0.0.1 | 443 | / | [17] | [18]",
++   0, CURLU_DEFAULT_PORT, CURLUE_OK},
++  {"https://127.0.0.1",
++   "https | [11] | [12] | [13] | 127.0.0.1 | [15] | / | [17] | [18]",
++   CURLU_DEFAULT_SCHEME, 0, CURLUE_OK},
++  {"https://[::1]:1234",
++   "https | [11] | [12] | [13] | [::1] | 1234 | / | [17] | [18]",
++   CURLU_DEFAULT_SCHEME, 0, CURLUE_OK},
++  {"https://127abc.com",
++   "https | [11] | [12] | [13] | 127abc.com | [15] | / | [17] | [18]",
++   CURLU_DEFAULT_SCHEME, 0, CURLUE_OK},
++  {"https:// example.com?check",
++   "",
++   CURLU_DEFAULT_SCHEME, 0, CURLUE_MALFORMED_INPUT},
++  {"https://e x a m p l e.com?check",
++   "",
++   CURLU_DEFAULT_SCHEME, 0, CURLUE_MALFORMED_INPUT},
++  {"https://example.com?check",
++   "https | [11] | [12] | [13] | example.com | [15] | / | check | [18]",
++   CURLU_DEFAULT_SCHEME, 0, CURLUE_OK},
++  {"https://example.com:65536",
++   "",
++   CURLU_DEFAULT_SCHEME, 0, CURLUE_BAD_PORT_NUMBER},
++  {"https://example.com:0#moo",
++   "",
++   CURLU_DEFAULT_SCHEME, 0, CURLUE_BAD_PORT_NUMBER},
++  {"https://example.com:01#moo",
++   "https | [11] | [12] | [13] | example.com | 1 | / | "
++   "[17] | moo",
++   CURLU_DEFAULT_SCHEME, 0, CURLUE_OK},
++  {"https://example.com:1#moo",
++   "https | [11] | [12] | [13] | example.com | 1 | / | "
++   "[17] | moo",
++   CURLU_DEFAULT_SCHEME, 0, CURLUE_OK},
++  {"http://example.com#moo",
++   "http | [11] | [12] | [13] | example.com | [15] | / | "
++   "[17] | moo",
++   CURLU_DEFAULT_SCHEME, 0, CURLUE_OK},
++  {"http://example.com",
++   "http | [11] | [12] | [13] | example.com | [15] | / | "
++   "[17] | [18]",
++   CURLU_DEFAULT_SCHEME, 0, CURLUE_OK},
++  {"http://example.com/path/html",
++   "http | [11] | [12] | [13] | example.com | [15] | /path/html | "
++   "[17] | [18]",
++   CURLU_DEFAULT_SCHEME, 0, CURLUE_OK},
++  {"http://example.com/path/html?query=name",
++   "http | [11] | [12] | [13] | example.com | [15] | /path/html | "
++   "query=name | [18]",
++   CURLU_DEFAULT_SCHEME, 0, CURLUE_OK},
++  {"http://example.com/path/html?query=name#anchor",
++   "http | [11] | [12] | [13] | example.com | [15] | /path/html | "
++   "query=name | anchor",
++   CURLU_DEFAULT_SCHEME, 0, CURLUE_OK},
++  {"http://example.com:1234/path/html?query=name#anchor",
++   "http | [11] | [12] | [13] | example.com | 1234 | /path/html | "
++   "query=name | anchor",
++   CURLU_DEFAULT_SCHEME, 0, CURLUE_OK},
++  {"http:///user:password@example.com:1234/path/html?query=name#anchor",
++   "http | user | password | [13] | example.com | 1234 | /path/html | "
++   "query=name | anchor",
++   CURLU_DEFAULT_SCHEME, 0, CURLUE_OK},
++  {"https://user:password@example.com:1234/path/html?query=name#anchor",
++   "https | user | password | [13] | example.com | 1234 | /path/html | "
++   "query=name | anchor",
++   CURLU_DEFAULT_SCHEME, 0, CURLUE_OK},
++  {"http://user:password@example.com:1234/path/html?query=name#anchor",
++   "http | user | password | [13] | example.com | 1234 | /path/html | "
++   "query=name | anchor",
++   CURLU_DEFAULT_SCHEME, 0, CURLUE_OK},
++  {"http:/user:password@example.com:1234/path/html?query=name#anchor",
++   "http | user | password | [13] | example.com | 1234 | /path/html | "
++   "query=name | anchor",
++   CURLU_DEFAULT_SCHEME, 0, CURLUE_OK},
++  {"http:////user:password@example.com:1234/path/html?query=name#anchor",
++   "",
++   CURLU_DEFAULT_SCHEME, 0, CURLUE_MALFORMED_INPUT},
++  {NULL, NULL, 0, 0, CURLUE_OK},
++};
++
++static struct urltestcase get_url_list[] = {
++  {"HTTP://test/", "http://test/", 0, 0, CURLUE_OK},
++  {"http://HO0_-st..~./", "", 0, 0, CURLUE_MALFORMED_INPUT},
++  {"http:/@example.com: 123/", "", 0, 0, CURLUE_BAD_PORT_NUMBER},
++  {"http:/@example.com:123 /", "", 0, 0, CURLUE_BAD_PORT_NUMBER},
++  {"http:/@example.com:123a/", "", 0, 0, CURLUE_BAD_PORT_NUMBER},
++  {"http://host/file\r", "", 0, 0, CURLUE_MALFORMED_INPUT},
++  {"http://host/file\n\x03", "", 0, 0, CURLUE_MALFORMED_INPUT},
++  {"htt\x02://host/file", "",
++   CURLU_NON_SUPPORT_SCHEME, 0, CURLUE_MALFORMED_INPUT},
++  {" http://host/file", "", 0, 0, CURLUE_MALFORMED_INPUT},
++  /* here the password ends at the semicolon and options is 'word' */
++  {"imap://user:pass;word@host/file",
++   "imap://user:pass;word@host/file",
++   0, 0, CURLUE_OK},
++  /* here the password has the semicolon */
++  {"http://user:pass;word@host/file",
++   "http://user:pass;word@host/file",
++   0, 0, CURLUE_OK},
++  {"file:///file.txt#moo",
++   "file:///file.txt#moo",
++   0, 0, CURLUE_OK},
++  {"file:////file.txt",
++   "file:////file.txt",
++   0, 0, CURLUE_OK},
++  {"file:///file.txt",
++   "file:///file.txt",
++   0, 0, CURLUE_OK},
++  {"http://example.com/hello/../here",
++   "http://example.com/hello/../here",
++   CURLU_PATH_AS_IS, 0, CURLUE_OK},
++  {"http://example.com/hello/../here",
++   "http://example.com/here",
++   0, 0, CURLUE_OK},
++  {"http://example.com:80",
++   "http://example.com/",
++   0, CURLU_NO_DEFAULT_PORT, CURLUE_OK},
++  {"tp://example.com/path/html",
++   "",
++   0, 0, CURLUE_UNSUPPORTED_SCHEME},
++  {"http://hello:fool@example.com",
++   "",
++   CURLU_DISALLOW_USER, 0, CURLUE_USER_NOT_ALLOWED},
++  {"http:/@example.com:123",
++   "http://example.com:123/",
++   0, 0, CURLUE_OK},
++  {"http:/:password@example.com",
++   "http://:password@example.com/",
++   0, 0, CURLUE_OK},
++  {"http://user@example.com?#",
++   "http://user@example.com/",
++   0, 0, CURLUE_OK},
++  {"http://user@example.com?",
++   "http://user@example.com/",
++   0, 0, CURLUE_OK},
++  {"http://user@example.com#anchor",
++   "http://user@example.com/#anchor",
++   0, 0, CURLUE_OK},
++  {"example.com/path/html",
++   "https://example.com/path/html",
++   CURLU_DEFAULT_SCHEME, 0, CURLUE_OK},
++  {"example.com/path/html",
++   "",
++   0, 0, CURLUE_MALFORMED_INPUT},
++  {"http://user:password@example.com:1234/path/html?query=name#anchor",
++   "http://user:password@example.com:1234/path/html?query=name#anchor",
++   0, 0, CURLUE_OK},
++  {"http://example.com:1234/path/html?query=name#anchor",
++   "http://example.com:1234/path/html?query=name#anchor",
++   0, 0, CURLUE_OK},
++  {"http://example.com/path/html?query=name#anchor",
++   "http://example.com/path/html?query=name#anchor",
++   0, 0, CURLUE_OK},
++  {"http://example.com/path/html?query=name",
++   "http://example.com/path/html?query=name",
++   0, 0, CURLUE_OK},
++  {"http://example.com/path/html",
++   "http://example.com/path/html",
++   0, 0, CURLUE_OK},
++  {"tp://example.com/path/html",
++   "tp://example.com/path/html",
++   CURLU_NON_SUPPORT_SCHEME, 0, CURLUE_OK},
++  {NULL, NULL, 0, 0, 0}
++};
++
++static int checkurl(const char *url, const char *out)
++{
++  if(strcmp(out, url)) {
++    fprintf(stderr, "Wanted: %s\nGot   : %s\n",
++            out, url);
++    return 1;
++  }
++  return 0;
++}
++
++/* !checksrc! disable SPACEBEFORECOMMA 1 */
++static struct setcase set_parts_list[] = {
++  {"https://host/",
++   "path=%4A%4B%4C,",
++   "https://host/%4a%4b%4c",
++   0, 0, CURLUE_NO_HOST},
++  {"https://host/mooo?q#f",
++   "path=NULL,query=NULL,fragment=NULL,",
++   "https://host/",
++   0, 0, CURLUE_NO_HOST},
++  {"https://user:secret@host/",
++   "user=NULL,password=NULL,",
++   "https://host/",
++   0, 0, CURLUE_NO_HOST},
++  {NULL,
++   "scheme=https,user=   @:,host=foobar,",
++   "https://%20%20%20%40%3a@foobar/",
++   0, CURLU_URLENCODE, CURLUE_OK},
++  {NULL,
++   "scheme=https,host=  ,path= ,user= ,password= ,query= ,fragment= ,",
++   "https://%20:%20@%20%20/%20?+#%20",
++   0, CURLU_URLENCODE, CURLUE_OK},
++  {NULL,
++   "scheme=https,host=foobar,path=/this /path /is /here,",
++   "https://foobar/this%20/path%20/is%20/here",
++   0, CURLU_URLENCODE, CURLUE_OK},
++  {"imap://user:secret;opt@host/",
++   "options=updated,scheme=imaps,password=p4ssw0rd,",
++   "imaps://user:p4ssw0rd;updated@host/",
++   0, 0, CURLUE_NO_HOST},
++  {"imap://user:secret;optit@host/",
++   "scheme=https,",
++   "https://user:secret@host/",
++   0, 0, CURLUE_NO_HOST},
++  {"file:///file#anchor",
++   "scheme=https,host=example,",
++   "https://example/file#anchor",
++   0, 0, CURLUE_NO_HOST},
++  {NULL, /* start fresh! */
++   "scheme=file,host=127.0.0.1,path=/no,user=anonymous,",
++   "file:///no",
++   0, 0, CURLUE_OK},
++  {NULL, /* start fresh! */
++   "scheme=ftp,host=127.0.0.1,path=/no,user=anonymous,",
++   "ftp://anonymous@127.0.0.1/no",
++   0, 0, CURLUE_OK},
++  {NULL, /* start fresh! */
++   "scheme=https,host=example.com,",
++   "https://example.com/",
++   0, CURLU_NON_SUPPORT_SCHEME, CURLUE_OK},
++  {"http://user:foo@example.com/path?query#frag",
++   "fragment=changed,",
++   "http://user:foo@example.com/path?query#changed",
++   0, CURLU_NON_SUPPORT_SCHEME, CURLUE_OK},
++  {"http://example.com/",
++   "scheme=foo,", /* not accepted */
++   "http://example.com/",
++   0, 0, CURLUE_OK},
++  {"http://example.com/",
++   "scheme=https,path=/hello,fragment=snippet,",
++   "https://example.com/hello#snippet",
++   0, 0, CURLUE_OK},
++  {"http://example.com:80",
++   "user=foo,port=1922,",
++   "http://foo@example.com:1922/",
++   0, 0, CURLUE_OK},
++  {"http://example.com:80",
++   "user=foo,password=bar,",
++   "http://foo:bar@example.com:80/",
++   0, 0, CURLUE_OK},
++  {"http://example.com:80",
++   "user=foo,",
++   "http://foo@example.com:80/",
++   0, 0, CURLUE_OK},
++  {"http://example.com",
++   "host=www.example.com,",
++   "http://www.example.com/",
++   0, 0, CURLUE_OK},
++  {"http://example.com:80",
++   "scheme=ftp,",
++   "ftp://example.com:80/",
++   0, 0, CURLUE_OK},
++  {NULL, NULL, NULL, 0, 0, 0}
++};
++
++static CURLUPart part2id(char *part)
++{
++  if(!strcmp("url", part))
++    return CURLUPART_URL;
++  if(!strcmp("scheme", part))
++    return CURLUPART_SCHEME;
++  if(!strcmp("user", part))
++    return CURLUPART_USER;
++  if(!strcmp("password", part))
++    return CURLUPART_PASSWORD;
++  if(!strcmp("options", part))
++    return CURLUPART_OPTIONS;
++  if(!strcmp("host", part))
++    return CURLUPART_HOST;
++  if(!strcmp("port", part))
++    return CURLUPART_PORT;
++  if(!strcmp("path", part))
++    return CURLUPART_PATH;
++  if(!strcmp("query", part))
++    return CURLUPART_QUERY;
++  if(!strcmp("fragment", part))
++    return CURLUPART_FRAGMENT;
++  return 9999; /* bad input => bad output */
++}
++
++static void updateurl(CURLU *u, const char *cmd, unsigned int setflags)
++{
++  const char *p = cmd;
++
++  /* make sure the last command ends with a comma too! */
++  while(p) {
++    char *e = strchr(p, ',');
++    if(e) {
++      size_t n = e-p;
++      char buf[80];
++      char part[80];
++      char value[80];
++      memcpy(buf, p, n);
++      buf[n] = 0;
++      if(2 == sscanf(buf, "%79[^=]=%79[^,]", part, value)) {
++        CURLUPart what = part2id(part);
++#if 0
++        /* for debugging this */
++        fprintf(stderr, "%s = %s [%d]\n", part, value, (int)what);
++#endif
++        if(!strcmp("NULL", value))
++          curl_url_set(u, what, NULL, setflags);
++        else
++          curl_url_set(u, what, value, setflags);
++      }
++      p = e + 1;
++      continue;
++    }
++    break;
++  }
++
++}
++
++static struct redircase set_url_list[] = {
++  {"file://localhost/path?query#frag",
++   "foo#another",
++   "file:///foo#another",
++   0, 0, 0},
++  {"http://example.com/path?query#frag",
++   "https://two.example.com/bradnew",
++   "https://two.example.com/bradnew",
++   0, 0, 0},
++  {"http://example.com/path?query#frag",
++   "../../newpage#foo",
++   "http://example.com/newpage#foo",
++   0, 0, 0},
++  {"http://user:foo@example.com/path?query#frag",
++   "../../newpage",
++   "http://user:foo@example.com/newpage",
++   0, 0, 0},
++  {"http://user:foo@example.com/path?query#frag",
++   "../newpage",
++   "http://user:foo@example.com/newpage",
++   0, 0, 0},
++  {NULL, NULL, NULL, 0, 0, 0}
++};
++
++static int set_url(void)
++{
++  int i;
++  CURLUcode rc;
++  CURLU *urlp;
++  int error = 0;
++
++  for(i = 0; set_url_list[i].in && !error; i++) {
++    char *url = NULL;
++    urlp = curl_url();
++    if(!urlp)
++      break;
++    rc = curl_url_set(urlp, CURLUPART_URL, set_url_list[i].in,
++                      set_url_list[i].urlflags);
++    if(!rc) {
++      rc = curl_url_set(urlp, CURLUPART_URL, set_url_list[i].set,
++                        set_url_list[i].setflags);
++      if(rc) {
++        fprintf(stderr, "%s:%d Set URL %s returned %d\n",
++                __FILE__, __LINE__, set_url_list[i].set,
++                (int)rc);
++        error++;
++      }
++      else {
++        rc = curl_url_get(urlp, CURLUPART_URL, &url, 0);
++        if(rc) {
++          fprintf(stderr, "%s:%d Get URL returned %d\n",
++                  __FILE__, __LINE__, (int)rc);
++          error++;
++        }
++        else {
++          if(checkurl(url, set_url_list[i].out)) {
++            error++;
++          }
++        }
++      }
++      curl_free(url);
++    }
++    else if(rc != set_url_list[i].ucode) {
++      fprintf(stderr, "Set URL\nin: %s\nreturned %d (expected %d)\n",
++              set_url_list[i].in, (int)rc, set_url_list[i].ucode);
++      error++;
++    }
++    curl_url_cleanup(urlp);
++  }
++  return error;
++}
++
++static int set_parts(void)
++{
++  int i;
++  CURLUcode rc;
++  int error = 0;
++
++  for(i = 0; set_parts_list[i].set && !error; i++) {
++    char *url = NULL;
++    CURLU *urlp = curl_url();
++    if(!urlp) {
++      error++;
++      break;
++    }
++    if(set_parts_list[i].in)
++      rc = curl_url_set(urlp, CURLUPART_URL, set_parts_list[i].in,
++                        set_parts_list[i].urlflags);
++    else
++      rc = CURLUE_OK;
++    if(!rc) {
++      updateurl(urlp, set_parts_list[i].set, set_parts_list[i].setflags);
++      rc = curl_url_get(urlp, CURLUPART_URL, &url, 0);
++
++      if(rc) {
++        fprintf(stderr, "%s:%d Get URL returned %d\n",
++                __FILE__, __LINE__, (int)rc);
++        error++;
++      }
++      else if(checkurl(url, set_parts_list[i].out)) {
++        error++;
++      }
++    }
++    else if(rc != set_parts_list[i].ucode) {
++      fprintf(stderr, "Set parts\nin: %s\nreturned %d (expected %d)\n",
++              set_parts_list[i].in, (int)rc, set_parts_list[i].ucode);
++      error++;
++    }
++    curl_free(url);
++    curl_url_cleanup(urlp);
++  }
++  return error;
++}
++
++static int get_url(void)
++{
++  int i;
++  CURLUcode rc;
++  int error = 0;
++  for(i = 0; get_url_list[i].in && !error; i++) {
++    char *url = NULL;
++    CURLU *urlp = curl_url();
++    if(!urlp) {
++      error++;
++      break;
++    }
++    rc = curl_url_set(urlp, CURLUPART_URL, get_url_list[i].in,
++                      get_url_list[i].urlflags);
++    if(!rc) {
++      rc = curl_url_get(urlp, CURLUPART_URL, &url, get_url_list[i].getflags);
++
++      if(rc) {
++        fprintf(stderr, "%s:%d returned %d\n",
++                __FILE__, __LINE__, (int)rc);
++        error++;
++      }
++      else {
++        if(checkurl(url, get_url_list[i].out)) {
++          error++;
++        }
++      }
++    }
++    else if(rc != get_url_list[i].ucode) {
++      fprintf(stderr, "Get URL\nin: %s\nreturned %d (expected %d)\n",
++              get_url_list[i].in, (int)rc, get_url_list[i].ucode);
++      error++;
++    }
++    curl_free(url);
++    curl_url_cleanup(urlp);
++  }
++  return error;
++}
++
++static int get_parts(void)
++{
++  int i;
++  CURLUcode rc;
++  CURLU *urlp;
++  int error = 0;
++  for(i = 0; get_parts_list[i].in && !error; i++) {
++    urlp = curl_url();
++    if(!urlp) {
++      error++;
++      break;
++    }
++    rc = curl_url_set(urlp, CURLUPART_URL,
++                      get_parts_list[i].in,
++                      get_parts_list[i].urlflags);
++    if(rc != get_parts_list[i].ucode) {
++      fprintf(stderr, "Get parts\nin: %s\nreturned %d (expected %d)\n",
++              get_parts_list[i].in, (int)rc, get_parts_list[i].ucode);
++      error++;
++    }
++    else if(get_parts_list[i].ucode) {
++      /* the expected error happened */
++    }
++    else if(checkparts(urlp, get_parts_list[i].in, get_parts_list[i].out,
++                       get_parts_list[i].getflags))
++      error++;
++    curl_url_cleanup(urlp);
++  }
++  return error;
++}
++
++static struct querycase append_list[] = {
++  {"HTTP://test/?s", "name=joe\x02", "http://test/?s&name=joe%02",
++   0, CURLU_URLENCODE, CURLUE_OK},
++  {"HTTP://test/?size=2#f", "name=joe=", "http://test/?size=2&name=joe=#f",
++   0, CURLU_URLENCODE, CURLUE_OK},
++  {"HTTP://test/?size=2#f", "name=joe doe",
++   "http://test/?size=2&name=joe+doe#f",
++   0, CURLU_URLENCODE, CURLUE_OK},
++  {"HTTP://test/", "name=joe", "http://test/?name=joe", 0, 0, CURLUE_OK},
++  {"HTTP://test/?size=2", "name=joe", "http://test/?size=2&name=joe",
++   0, 0, CURLUE_OK},
++  {"HTTP://test/?size=2&", "name=joe", "http://test/?size=2&name=joe",
++   0, 0, CURLUE_OK},
++  {"HTTP://test/?size=2#f", "name=joe", "http://test/?size=2&name=joe#f",
++   0, 0, CURLUE_OK},
++  {NULL, NULL, NULL, 0, 0, 0}
++};
++
++static int append(void)
++{
++  int i;
++  CURLUcode rc;
++  CURLU *urlp;
++  int error = 0;
++  for(i = 0; append_list[i].in && !error; i++) {
++    urlp = curl_url();
++    if(!urlp) {
++      error++;
++      break;
++    }
++    rc = curl_url_set(urlp, CURLUPART_URL,
++                      append_list[i].in,
++                      append_list[i].urlflags);
++    if(rc)
++      error++;
++    else
++      rc = curl_url_set(urlp, CURLUPART_QUERY,
++                        append_list[i].q,
++                        append_list[i].qflags | CURLU_APPENDQUERY);
++    if(error)
++      ;
++    else if(rc != append_list[i].ucode) {
++      fprintf(stderr, "Append\nin: %s\nreturned %d (expected %d)\n",
++              append_list[i].in, (int)rc, append_list[i].ucode);
++      error++;
++    }
++    else if(append_list[i].ucode) {
++      /* the expected error happened */
++    }
++    else {
++      char *url;
++      rc = curl_url_get(urlp, CURLUPART_URL, &url, 0);
++      if(rc) {
++        fprintf(stderr, "%s:%d Get URL returned %d\n",
++                __FILE__, __LINE__, (int)rc);
++        error++;
++      }
++      else {
++        if(checkurl(url, append_list[i].out)) {
++          error++;
++        }
++        curl_free(url);
++      }
++    }
++    curl_url_cleanup(urlp);
++  }
++  return error;
++}
++
++int test(char *URL)
++{
++  (void)URL; /* not used */
++
++  if(append())
++    return 5;
++
++  if(set_url())
++    return 1;
++
++  if(set_parts())
++    return 2;
++
++  if(get_url())
++    return 3;
++
++  if(get_parts())
++    return 4;
++
++  printf("success\n");
++  return 0;
++}
+-- 
+2.17.2
+
+
+From 581a3b902b949f090776c5295a8aa0786edba773 Mon Sep 17 00:00:00 2001
+From: Daniel Stenberg <daniel@haxx.se>
+Date: Sat, 8 Sep 2018 16:02:25 +0200
+Subject: [PATCH 02/14] curl_url-docs: fix AVAILABILITY as Added in curl 7.62.0
+
+Upstream-commit: 890eea5aade0fc4ee167e83948d53351c11dd1ae
+Signed-off-by: Kamil Dudka <kdudka@redhat.com>
+---
+ docs/libcurl/curl_url.3         | 2 +-
+ docs/libcurl/curl_url_cleanup.3 | 2 +-
+ docs/libcurl/curl_url_dup.3     | 2 +-
+ docs/libcurl/curl_url_get.3     | 2 +-
+ docs/libcurl/curl_url_set.3     | 2 +-
+ 5 files changed, 5 insertions(+), 5 deletions(-)
+
+diff --git a/docs/libcurl/curl_url.3 b/docs/libcurl/curl_url.3
+index 0a56264..a14c45b 100644
+--- a/docs/libcurl/curl_url.3
++++ b/docs/libcurl/curl_url.3
+@@ -55,7 +55,7 @@ Returns a \fBCURLU *\fP if successful, or NULL if out of memory.
+   }
+ .fi
+ .SH AVAILABILITY
+-Added in curl 7.63.0
++Added in curl 7.62.0
+ .SH "SEE ALSO"
+ .BR curl_url_cleanup "(3), " curl_url_get "(3), " curl_url_set "(3), "
+ .BR curl_url_dup "(3), "
+diff --git a/docs/libcurl/curl_url_cleanup.3 b/docs/libcurl/curl_url_cleanup.3
+index a8158b7..4d095a9 100644
+--- a/docs/libcurl/curl_url_cleanup.3
++++ b/docs/libcurl/curl_url_cleanup.3
+@@ -38,7 +38,7 @@ none
+   curl_url_cleanup(url);
+ .fi
+ .SH AVAILABILITY
+-Added in curl 7.63.0
++Added in curl 7.62.0
+ .SH "SEE ALSO"
+ .BR curl_url_dup "(3), " curl_url "(3), " curl_url_set "(3), "
+ .BR curl_url_get "(3), "
+diff --git a/docs/libcurl/curl_url_dup.3 b/docs/libcurl/curl_url_dup.3
+index 4815dbd..c0259e0 100644
+--- a/docs/libcurl/curl_url_dup.3
++++ b/docs/libcurl/curl_url_dup.3
+@@ -46,7 +46,7 @@ Returns a new handle or NULL if out of memory.
+   curl_url_cleanup(url);
+ .fi
+ .SH AVAILABILITY
+-Added in curl 7.63.0
++Added in curl 7.62.0
+ .SH "SEE ALSO"
+ .BR curl_url_cleanup "(3), " curl_url "(3), " curl_url_set "(3), "
+ .BR curl_url_get "(3), "
+diff --git a/docs/libcurl/curl_url_get.3 b/docs/libcurl/curl_url_get.3
+index 824d496..b1313ea 100644
+--- a/docs/libcurl/curl_url_get.3
++++ b/docs/libcurl/curl_url_get.3
+@@ -104,7 +104,7 @@ If this function returns an error, no URL part is returned.
+   }
+ .fi
+ .SH AVAILABILITY
+-Added in curl 7.63.0
++Added in curl 7.62.0
+ .SH "SEE ALSO"
+ .BR curl_url_cleanup "(3), " curl_url "(3), " curl_url_set "(3), "
+ .BR curl_url_dup "(3), "
+diff --git a/docs/libcurl/curl_url_set.3 b/docs/libcurl/curl_url_set.3
+index 75fc0d9..79272e8 100644
+--- a/docs/libcurl/curl_url_set.3
++++ b/docs/libcurl/curl_url_set.3
+@@ -114,7 +114,7 @@ If this function returns an error, no URL part is returned.
+   curl_url_cleanup(url);
+ .fi
+ .SH AVAILABILITY
+-Added in curl 7.63.0
++Added in curl 7.62.0
+ .SH "SEE ALSO"
+ .BR curl_url_cleanup "(3), " curl_url "(3), " curl_url_get "(3), "
+ .BR curl_url_dup "(3), "
+-- 
+2.17.2
+
+
+From 9c33cac88a9d94557ba48df7c290afc950895bc4 Mon Sep 17 00:00:00 2001
+From: Daniel Stenberg <daniel@haxx.se>
+Date: Sat, 8 Sep 2018 19:39:57 +0200
+Subject: [PATCH 03/14] curl_url_set.3: correct description
+
+Upstream-commit: 8b85a3cac516a302a8ce3911cf8b9a229b62a59d
+Signed-off-by: Kamil Dudka <kdudka@redhat.com>
+---
+ docs/libcurl/curl_url_set.3 | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/docs/libcurl/curl_url_set.3 b/docs/libcurl/curl_url_set.3
+index 79272e8..0d6e9aa 100644
+--- a/docs/libcurl/curl_url_set.3
++++ b/docs/libcurl/curl_url_set.3
+@@ -21,7 +21,7 @@
+ .\" **************************************************************************
+ .TH curl_url_set 3 "6 Aug 2018" "libcurl" "libcurl Manual"
+ .SH NAME
+-curl_url_set - set a part from a URL
++curl_url_set - set a URL part
+ .SH SYNOPSIS
+ .B #include <curl/curl.h>
+ 
+-- 
+2.17.2
+
+
+From dc2c1d978ec78a5f278d194e1b258015e8bfd664 Mon Sep 17 00:00:00 2001
+From: Daniel Stenberg <daniel@haxx.se>
+Date: Sat, 8 Sep 2018 22:57:36 +0200
+Subject: [PATCH 04/14] urlapi: avoid derefencing a possible NULL pointer
+
+Coverity CID 1439134
+
+Upstream-commit: 01dedc99fc8d386fe955421ab05a1c4094c9190b
+Signed-off-by: Kamil Dudka <kdudka@redhat.com>
+---
+ lib/urlapi.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/lib/urlapi.c b/lib/urlapi.c
+index 8287861..3183598 100644
+--- a/lib/urlapi.c
++++ b/lib/urlapi.c
+@@ -438,10 +438,10 @@ static CURLUcode parse_hostname_login(struct Curl_URL *u,
+   *hostname = ++ptr;
+ 
+   /* We could use the login information in the URL so extract it. Only parse
+-     options if the handler says we should. */
++     options if the handler says we should. Note that 'h' might be NULL! */
+   ccode = Curl_parse_login_details(login, ptr - login - 1,
+                                    &userp, &passwdp,
+-                                   h->flags & PROTOPT_URLOPTIONS ?
++                                   (h && (h->flags & PROTOPT_URLOPTIONS)) ?
+                                    &optionsp:NULL);
+   if(ccode) {
+     result = CURLUE_MALFORMED_INPUT;
+-- 
+2.17.2
+
+
+From 6684d372c20609afd21f21399deda6deedea911e Mon Sep 17 00:00:00 2001
+From: Daniel Gustafsson <daniel@yesql.se>
+Date: Sat, 8 Sep 2018 23:05:21 +0200
+Subject: [PATCH 05/14] url.c: fix comment typo and indentation
+
+Closes #2960
+
+Upstream-commit: 6e4b8c5073c3985cef98656c3b375981d25a8898
+Signed-off-by: Kamil Dudka <kdudka@redhat.com>
+---
+ lib/url.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/lib/url.c b/lib/url.c
+index 4f75f11..dcc6cc8 100644
+--- a/lib/url.c
++++ b/lib/url.c
+@@ -1939,7 +1939,7 @@ static struct connectdata *allocate_conn(struct Curl_easy *data)
+   return NULL;
+ }
+ 
+-/* returns the handdler if the given scheme is built-in */
++/* returns the handler if the given scheme is built-in */
+ const struct Curl_handler *Curl_builtin_scheme(const char *scheme)
+ {
+   const struct Curl_handler * const *pp;
+@@ -2245,7 +2245,7 @@ static CURLcode parseurlandfillconn(struct Curl_easy *data,
+        the host-name part */
+     memmove(path + hostlen + 1, path, pathlen + 1);
+ 
+-     /* now copy the trailing host part in front of the existing path */
++    /* now copy the trailing host part in front of the existing path */
+     memcpy(path + 1, query, hostlen);
+ 
+     path[0]='/'; /* prepend the missing slash */
+-- 
+2.17.2
+
+
+From 0f8d6ab26abd00459d1364a69d7771a6b3a58ce3 Mon Sep 17 00:00:00 2001
+From: Daniel Stenberg <daniel@haxx.se>
+Date: Mon, 10 Sep 2018 10:09:18 +0200
+Subject: [PATCH 06/14] libcurl-url.3: overview man page for the URL API
+
+Closes #2967
+
+Upstream-commit: 11e8a43f853b9bf050db58f073e6f2411821ce60
+Signed-off-by: Kamil Dudka <kdudka@redhat.com>
+---
+ docs/libcurl/Makefile.inc  |   1 +
+ docs/libcurl/libcurl-url.3 | 137 +++++++++++++++++++++++++++++++++++++
+ 2 files changed, 138 insertions(+)
+ create mode 100644 docs/libcurl/libcurl-url.3
+
+diff --git a/docs/libcurl/Makefile.inc b/docs/libcurl/Makefile.inc
+index 955492c..97cb50c 100644
+--- a/docs/libcurl/Makefile.inc
++++ b/docs/libcurl/Makefile.inc
+@@ -23,4 +23,5 @@ man_MANS = curl_easy_cleanup.3 curl_easy_getinfo.3 curl_easy_init.3      \
+   curl_mime_filename.3 curl_mime_subparts.3                              \
+   curl_mime_type.3 curl_mime_headers.3 curl_mime_encoder.3 libcurl-env.3 \
+   curl_url.3 curl_url_cleanup.3 curl_url_dup.3 curl_url_get.3 curl_url_set.3 \
++  libcurl-url.3 \
+   libcurl-security.3
+diff --git a/docs/libcurl/libcurl-url.3 b/docs/libcurl/libcurl-url.3
+new file mode 100644
+index 0000000..4ad0a15
+--- /dev/null
++++ b/docs/libcurl/libcurl-url.3
+@@ -0,0 +1,137 @@
++.\" **************************************************************************
++.\" *                                  _   _ ____  _
++.\" *  Project                     ___| | | |  _ \| |
++.\" *                             / __| | | | |_) | |
++.\" *                            | (__| |_| |  _ <| |___
++.\" *                             \___|\___/|_| \_\_____|
++.\" *
++.\" * Copyright (C) 1998 - 2018, Daniel Stenberg, <daniel@haxx.se>, et al.
++.\" *
++.\" * This software is licensed as described in the file COPYING, which
++.\" * you should have received as part of this distribution. The terms
++.\" * are also available at https://curl.haxx.se/docs/copyright.html.
++.\" *
++.\" * You may opt to use, copy, modify, merge, publish, distribute and/or sell
++.\" * copies of the Software, and permit persons to whom the Software is
++.\" * furnished to do so, under the terms of the COPYING file.
++.\" *
++.\" * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
++.\" * KIND, either express or implied.
++.\" *
++.\" **************************************************************************
++.TH libcurl 3 "10 Sep 2018" "libcurl" "libcurl url interface"
++.SH NAME
++libcurl-url \- URL interface overview
++.SH DESCRIPTION
++The URL interface provides a set of functions for parsing and generating URLs.
++.SH INCLUDE
++You still only include <curl/curl.h> in your code. Note that the URL API was
++introduced in 7.62.0.
++.SH CREATE
++Create a handle that holds URL info and resources with \fIcurl_url(3)\fP:
++
++  CURLU *h = curl_url();
++.SH CLEANUP
++When done with it, clean it up with \fIcurl_url_cleanup(3)\fP:
++
++  curl_url_cleanup(h);
++.SH DUPLICATE
++When you need a copy of a handle, just duplicate it with \fIcurl_url_dup(3)\fP:
++
++  CURLU *nh = curl_url_dup(h);
++.SH PARSING
++By "setting" a URL to the handle with \fIcurl_url_set(3)\fP, the URL is parsed
++and stored in the handle. If the URL is not syntactically correct it will
++return an error instead.
++
++.nf
++  rc = curl_url_set(h, CURLUPART_URL,
++                    "https://example.com:449/foo/bar?name=moo", 0);
++.fi
++
++The zero in the fourth argument is a bitmask for changing specific features.
++
++If successful, this stores the URL in its individual parts within the handle.
++.SH REDIRECT
++When a handle already contains info about a URL, setting a relative URL will
++make it "redirect" to adapt to it.
++
++  rc = curl_url_set(h, CURLUPART_URL, "../test?another", 0);
++.SH "GET URL"
++The `CURLU` handle represents a URL and you can easily extract that with
++\fIcurl_url_get(3)\fP:
++
++  char *url;
++  rc = curl_url_get(h, CURLUPART_URL, &url, 0);
++  curl_free(url);
++
++The zero in the fourth argument is a bitmask for changing specific features.
++.SH "GET PARTS"
++When a URL has been parsed or parts have been set, you can extract those
++pieces from the handle at any time.
++
++.nf
++  rc = curl_url_get(h, CURLUPART_HOST, &host, 0);
++  rc = curl_url_get(h, CURLUPART_SCHEME, &scheme, 0);
++  rc = curl_url_get(h, CURLUPART_USER, &user, 0);
++  rc = curl_url_get(h, CURLUPART_PASSWORD, &password, 0);
++  rc = curl_url_get(h, CURLUPART_PORT, &port, 0);
++  rc = curl_url_get(h, CURLUPART_PATH, &path, 0);
++  rc = curl_url_get(h, CURLUPART_QUERY, &query, 0);
++  rc = curl_url_get(h, CURLUPART_FRAGMENT, &fragment, 0);
++.fi
++
++Extracted parts are not URL decoded unless the user also asks for it with the
++CURLU_URLDECODE flag set in the fourth bitmask argument.
++
++Remember to free the returned string with \fIcurl_free(3)\fP when you're done
++with it!
++.SH "SET PARTS"
++A user set individual URL parts, either after having parsed a full URL or
++instead of parsing such.
++
++.nf
++  rc = curl_url_set(urlp, CURLUPART_HOST, "www.example.com", 0);
++  rc = curl_url_set(urlp, CURLUPART_SCHEME, "https", 0);
++  rc = curl_url_set(urlp, CURLUPART_USER, "john", 0);
++  rc = curl_url_set(urlp, CURLUPART_PASSWORD, "doe", 0);
++  rc = curl_url_set(urlp, CURLUPART_PORT, "443", 0);
++  rc = curl_url_set(urlp, CURLUPART_PATH, "/index.html", 0);
++  rc = curl_url_set(urlp, CURLUPART_QUERY, "name=john", 0);
++  rc = curl_url_set(urlp, CURLUPART_FRAGMENT, "anchor", 0);
++.fi
++
++Set parts are not URL encoded unless the user asks for it with the
++`CURLU_URLENCODE` flag.
++.SH "APPENDQUERY"
++An application can append a string to the right end of the query part with the
++`CURLU_APPENDQUERY` flag to \fIcurl_url_set(3)\fP.
++
++Imagine a handle that holds the URL `https://example.com/?shoes=2`. An
++application can then add the string `hat=1` to the query part like this:
++
++.nf
++  rc = curl_url_set(urlp, CURLUPART_QUERY, "hat=1", CURLU_APPENDQUERY);
++.fi
++
++It will even notice the lack of an ampersand (`&`) separator so it will inject
++one too, and the handle's full URL will then equal
++`https://example.com/?shoes=2&hat=1`.
++
++The appended string can of course also get URL encoded on add, and if asked to
++URL encode, the encoding process will skip the '=' character. For example,
++append `candy=N&N` to what we already have, and URL encode it to deal with the
++ampersand in the data:
++
++.nf
++  rc = curl_url_set(urlp, CURLUPART_QUERY, "candy=N&N",
++                    CURLU_APPENDQUERY | CURLU_URLENCODE);
++.fi
++
++Now the URL looks like
++.nf
++  https://example.com/?shoes=2&hat=1&candy=N%26N`
++.fi
++.SH "SEE ALSO"
++.BR curl_url "(3), " curl_url_cleanup "(3), " curl_url_get "(3), "
++.BR curl_url_dup "(3), " curl_url_set "(3), " CURLOPT_URL "(3), "
+-- 
+2.17.2
+
+
+From 4c235b460cf40f8ce0c6ad06b44ecb4dddc128e4 Mon Sep 17 00:00:00 2001
+From: Dave Reisner <dreisner@archlinux.org>
+Date: Mon, 10 Sep 2018 09:38:46 -0400
+Subject: [PATCH 07/14] curl_url_set.3: fix typo in reference to
+ CURLU_APPENDQUERY
+
+Upstream-commit: 04110573801feb2f278e2f774087a0525d5e8d0a
+Signed-off-by: Kamil Dudka <kdudka@redhat.com>
+---
+ docs/libcurl/curl_url_set.3 | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/docs/libcurl/curl_url_set.3 b/docs/libcurl/curl_url_set.3
+index 0d6e9aa..b2b273f 100644
+--- a/docs/libcurl/curl_url_set.3
++++ b/docs/libcurl/curl_url_set.3
+@@ -75,7 +75,7 @@ If used in with \fICURLU_APPENDQUERY\fP, the provided part will be appended on
+ the end of the existing query - and if the previous part didn't end with an
+ ampersand (&), an ampersand will be inserted before the new appended part.
+ 
+-When \fCURLU_APPENDQUERY\fP is used together with \fICURLU_URLENCODE\fP,
++When \fICURLU_APPENDQUERY\fP is used together with \fICURLU_URLENCODE\fP,
+ the '=' symbols will not be URL encoded.
+ 
+ The question mark in the URL is not part of the actual query contents.
+-- 
+2.17.2
+
+
+From fb07ea0cf9c612b2fad6a113b1d40aa7896fe43a Mon Sep 17 00:00:00 2001
+From: Dave Reisner <dreisner@archlinux.org>
+Date: Mon, 10 Sep 2018 09:39:33 -0400
+Subject: [PATCH 08/14] curl_url_set.3: properly escape \n in example code
+
+This yields
+
+  "the scheme is %s\n"
+
+instead of
+
+  "the scheme is %s0
+
+Closes #2970
+
+Upstream-commit: c1e5980f6672a2bd2d26894f093b435f2deb04e0
+Signed-off-by: Kamil Dudka <kdudka@redhat.com>
+---
+ docs/libcurl/curl_url_get.3 | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/docs/libcurl/curl_url_get.3 b/docs/libcurl/curl_url_get.3
+index b1313ea..53f7954 100644
+--- a/docs/libcurl/curl_url_get.3
++++ b/docs/libcurl/curl_url_get.3
+@@ -97,7 +97,7 @@ If this function returns an error, no URL part is returned.
+     char *scheme;
+     rc = curl_url_get(url, CURLUPART_SCHEME, &scheme, 0);
+     if(!rc) {
+-      printf("the scheme is %s\n", scheme);
++      printf("the scheme is %s\\n", scheme);
+       curl_free(scheme);
+     }
+     curl_url_cleanup(url);
+-- 
+2.17.2
+
+
+From 376ae7de5a5a5f5b5513e6055700d010f21d4da3 Mon Sep 17 00:00:00 2001
+From: Daniel Gustafsson <daniel@yesql.se>
+Date: Wed, 19 Sep 2018 13:44:10 +0200
+Subject: [PATCH 09/14] urlapi: don't set value which is never read
+
+In the CURLUPART_URL case, there is no codepath which invokes url
+decoding so remove the assignment of the urldecode variable. This
+fixes the deadstore bug-report from clang static analysis.
+
+Closes #3015
+Reviewed-by: Daniel Stenberg <daniel@haxx.se>
+
+Upstream-commit: 522e647cc52c45ebdb58d57f242204f9a72c45dd
+Signed-off-by: Kamil Dudka <kdudka@redhat.com>
+---
+ lib/urlapi.c | 1 -
+ 1 file changed, 1 deletion(-)
+
+diff --git a/lib/urlapi.c b/lib/urlapi.c
+index 3183598..127f390 100644
+--- a/lib/urlapi.c
++++ b/lib/urlapi.c
+@@ -970,7 +970,6 @@ CURLUcode curl_url_get(CURLU *u, CURLUPart what,
+     char *scheme;
+     char *options = u->options;
+     char *port = u->port;
+-    urldecode = FALSE; /* not for the whole thing */
+     if(u->scheme && strcasecompare("file", u->scheme)) {
+       url = aprintf("file://%s%s%s",
+                     u->path,
+-- 
+2.17.2
+
+
+From 26dd137f3ca894e6402a98889d3b182f608d3c7f Mon Sep 17 00:00:00 2001
+From: Daniel Stenberg <daniel@haxx.se>
+Date: Wed, 19 Sep 2018 10:17:03 +0200
+Subject: [PATCH 10/14] urlapi: add CURLU_GUESS_SCHEME and fix hostname
+ acceptance
+
+In order for this API to fully work for libcurl itself, it now offers a
+CURLU_GUESS_SCHEME flag that makes it "guess" scheme based on the host
+name prefix just like libcurl always did. If there's no known prefix, it
+will guess "http://".
+
+Separately, it relaxes the check of the host name so that IDN host names
+can be passed in as well.
+
+Both these changes are necessary for libcurl itself to use this API.
+
+Assisted-by: Daniel Gustafsson
+Closes #3018
+
+Upstream-commit: 9307c219ad4741db860b864c860ac2f8bf9fad9d
+Signed-off-by: Kamil Dudka <kdudka@redhat.com>
+---
+ docs/libcurl/curl_url_set.3 | 10 ++++++++
+ include/curl/urlapi.h       |  1 +
+ lib/urlapi.c                | 48 ++++++++++++++++++++++++++++---------
+ tests/data/test1560         |  6 +++++
+ tests/libtest/lib1560.c     | 26 +++++++++++++++++++-
+ 5 files changed, 79 insertions(+), 12 deletions(-)
+
+diff --git a/docs/libcurl/curl_url_set.3 b/docs/libcurl/curl_url_set.3
+index b2b273f..95b76bd 100644
+--- a/docs/libcurl/curl_url_set.3
++++ b/docs/libcurl/curl_url_set.3
+@@ -96,6 +96,16 @@ The query part gets space-to-plus conversion before the URL conversion.
+ 
+ This URL encoding is charset unaware and will convert the input on a
+ byte-by-byte manner.
++.IP CURLU_DEFAULT_SCHEME
++If set, will make libcurl allow the URL to be set without a scheme and then
++sets that to the default scheme: HTTPS. Overrides the \fICURLU_GUESS_SCHEME\fP
++option if both are set.
++.IP CURLU_GUESS_SCHEME
++If set, will make libcurl allow the URL to be set without a scheme and it
++instead "guesses" which scheme that was intended based on the host name.  If
++the outermost sub-domain name matches DICT, FTP, IMAP, LDAP, POP3 or SMTP then
++that scheme will be used, otherwise it picks HTTP. Conflicts with the
++\fICURLU_DEFAULT_SCHEME\fP option which takes precendence if both are set.
+ .SH RETURN VALUE
+ Returns a CURLUcode error value, which is CURLUE_OK (0) if everything went
+ fine.
+diff --git a/include/curl/urlapi.h b/include/curl/urlapi.h
+index b16cfce..319de35 100644
+--- a/include/curl/urlapi.h
++++ b/include/curl/urlapi.h
+@@ -75,6 +75,7 @@ typedef enum {
+ #define CURLU_URLDECODE (1<<6)          /* URL decode on get */
+ #define CURLU_URLENCODE (1<<7)          /* URL encode on set */
+ #define CURLU_APPENDQUERY (1<<8)        /* append a form style part */
++#define CURLU_GUESS_SCHEME (1<<9)       /* legacy curl-style guessing */
+ 
+ typedef struct Curl_URL CURLU;
+ 
+diff --git a/lib/urlapi.c b/lib/urlapi.c
+index 127f390..45f1e14 100644
+--- a/lib/urlapi.c
++++ b/lib/urlapi.c
+@@ -554,7 +554,7 @@ static CURLUcode junkscan(char *part)
+ 
+ static CURLUcode hostname_check(char *hostname, unsigned int flags)
+ {
+-  const char *l; /* accepted characters */
++  const char *l = NULL; /* accepted characters */
+   size_t len;
+   size_t hlen = strlen(hostname);
+   (void)flags;
+@@ -564,14 +564,21 @@ static CURLUcode hostname_check(char *hostname, unsigned int flags)
+     l = "0123456789abcdefABCDEF::.";
+     hlen -= 2;
+   }
+-  else /* % for URL escaped letters */
+-    l = "0123456789abcdefghijklimnopqrstuvwxyz-_.ABCDEFGHIJKLIMNOPQRSTUVWXYZ%";
+-
+-  len = strspn(hostname, l);
+-  if(hlen != len)
+-    /* hostname with bad content */
+-    return CURLUE_MALFORMED_INPUT;
+ 
++  if(l) {
++    /* only valid letters are ok */
++    len = strspn(hostname, l);
++    if(hlen != len)
++      /* hostname with bad content */
++      return CURLUE_MALFORMED_INPUT;
++  }
++  else {
++    /* letters from the second string is not ok */
++    len = strcspn(hostname, " ");
++    if(hlen != len)
++      /* hostname with bad content */
++      return CURLUE_MALFORMED_INPUT;
++  }
+   return CURLUE_OK;
+ }
+ 
+@@ -587,7 +594,7 @@ static CURLUcode seturl(const char *url, CURLU *u, unsigned int flags)
+   CURLUcode result;
+   bool url_has_scheme = FALSE;
+   char schemebuf[MAX_SCHEME_LEN];
+-  char *schemep;
++  char *schemep = NULL;
+   size_t schemelen = 0;
+   size_t urllen;
+   const struct Curl_handler *h = NULL;
+@@ -723,9 +730,10 @@ static CURLUcode seturl(const char *url, CURLU *u, unsigned int flags)
+     else {
+       /* no scheme! */
+ 
+-      if(!(flags & CURLU_DEFAULT_SCHEME))
++      if(!(flags & (CURLU_DEFAULT_SCHEME|CURLU_GUESS_SCHEME)))
+         return CURLUE_MALFORMED_INPUT;
+-      schemep = (char *) DEFAULT_SCHEME;
++      if(flags & CURLU_DEFAULT_SCHEME)
++        schemep = (char *) DEFAULT_SCHEME;
+ 
+       /*
+        * The URL was badly formatted, let's try without scheme specified.
+@@ -744,6 +752,24 @@ static CURLUcode seturl(const char *url, CURLU *u, unsigned int flags)
+     memcpy(hostname, hostp, len);
+     hostname[len] = 0;
+ 
++    if((flags & CURLU_GUESS_SCHEME) && !schemep) {
++      /* legacy curl-style guess based on host name */
++      if(checkprefix("ftp.", hostname))
++        schemep = (char *)"ftp";
++      else if(checkprefix("dict.", hostname))
++        schemep = (char *)"dict";
++      else if(checkprefix("ldap.", hostname))
++        schemep = (char *)"ldap";
++      else if(checkprefix("imap.", hostname))
++        schemep = (char *)"imap";
++      else if(checkprefix("smtp.", hostname))
++        schemep = (char *)"smtp";
++      else if(checkprefix("pop3.", hostname))
++        schemep = (char *)"pop3";
++      else
++        schemep = (char *)"http";
++    }
++
+     len = strlen(p);
+     memcpy(path, p, len);
+     path[len] = 0;
+diff --git a/tests/data/test1560 b/tests/data/test1560
+index 720df03..4b6c97a 100644
+--- a/tests/data/test1560
++++ b/tests/data/test1560
+@@ -16,6 +16,12 @@ none
+ file
+ https
+ http
++pop3
++smtp
++imap
++ldap
++dict
++ftp
+ </features>
+  <name>
+ URL API
+diff --git a/tests/libtest/lib1560.c b/tests/libtest/lib1560.c
+index 669ea9a..30fb582 100644
+--- a/tests/libtest/lib1560.c
++++ b/tests/libtest/lib1560.c
+@@ -246,8 +246,32 @@ static struct testcase get_parts_list[] ={
+ };
+ 
+ static struct urltestcase get_url_list[] = {
++  {"smtp.example.com/path/html",
++   "smtp://smtp.example.com/path/html",
++   CURLU_GUESS_SCHEME, 0, CURLUE_OK},
++  {"https.example.com/path/html",
++   "http://https.example.com/path/html",
++   CURLU_GUESS_SCHEME, 0, CURLUE_OK},
++  {"dict.example.com/path/html",
++   "dict://dict.example.com/path/html",
++   CURLU_GUESS_SCHEME, 0, CURLUE_OK},
++  {"pop3.example.com/path/html",
++   "pop3://pop3.example.com/path/html",
++   CURLU_GUESS_SCHEME, 0, CURLUE_OK},
++  {"ldap.example.com/path/html",
++   "ldap://ldap.example.com/path/html",
++   CURLU_GUESS_SCHEME, 0, CURLUE_OK},
++  {"imap.example.com/path/html",
++   "imap://imap.example.com/path/html",
++   CURLU_GUESS_SCHEME, 0, CURLUE_OK},
++  {"ftp.example.com/path/html",
++   "ftp://ftp.example.com/path/html",
++   CURLU_GUESS_SCHEME, 0, CURLUE_OK},
++  {"example.com/path/html",
++   "http://example.com/path/html",
++   CURLU_GUESS_SCHEME, 0, CURLUE_OK},
+   {"HTTP://test/", "http://test/", 0, 0, CURLUE_OK},
+-  {"http://HO0_-st..~./", "", 0, 0, CURLUE_MALFORMED_INPUT},
++  {"http://HO0_-st..~./", "http://HO0_-st..~./", 0, 0, CURLUE_OK},
+   {"http:/@example.com: 123/", "", 0, 0, CURLUE_BAD_PORT_NUMBER},
+   {"http:/@example.com:123 /", "", 0, 0, CURLUE_BAD_PORT_NUMBER},
+   {"http:/@example.com:123a/", "", 0, 0, CURLUE_BAD_PORT_NUMBER},
+-- 
+2.17.2
+
+
+From 4e335817d4ac0ee5596363004bfcaaad15bc6127 Mon Sep 17 00:00:00 2001
+From: Daniel Stenberg <daniel@haxx.se>
+Date: Wed, 19 Sep 2018 11:28:40 +0200
+Subject: [PATCH 11/14] urlapi: document the error codes, remove two unused
+ ones
+
+Assisted-by: Daniel Gustafsson
+Closes #3019
+
+Upstream-commit: 5c73093edb3bd527db9c8abdee53d0f18e6a4cc1
+Signed-off-by: Kamil Dudka <kdudka@redhat.com>
+---
+ docs/libcurl/libcurl-errors.3    | 37 ++++++++++++++++++++++++++++-
+ docs/libcurl/symbols-in-versions |  2 --
+ include/curl/urlapi.h            |  8 +++----
+ tests/libtest/lib1560.c          | 40 ++++++++++++++++----------------
+ 4 files changed, 59 insertions(+), 28 deletions(-)
+
+diff --git a/docs/libcurl/libcurl-errors.3 b/docs/libcurl/libcurl-errors.3
+index 30c57b3..411a272 100644
+--- a/docs/libcurl/libcurl-errors.3
++++ b/docs/libcurl/libcurl-errors.3
+@@ -5,7 +5,7 @@
+ .\" *                            | (__| |_| |  _ <| |___
+ .\" *                             \___|\___/|_| \_\_____|
+ .\" *
+-.\" * Copyright (C) 1998 - 2015, Daniel Stenberg, <daniel@haxx.se>, et al.
++.\" * Copyright (C) 1998 - 2018, Daniel Stenberg, <daniel@haxx.se>, et al.
+ .\" *
+ .\" * This software is licensed as described in the file COPYING, which
+ .\" * you should have received as part of this distribution. The terms
+@@ -307,6 +307,41 @@ Not enough memory was available.
+ .IP "CURLSHE_NOT_BUILT_IN (5)"
+ The requested sharing could not be done because the library you use don't have
+ that particular feature enabled. (Added in 7.23.0)
++.SH "CURLUcode"
++.IP "CURLUE_BAD_HANDLE (1)"
++An argument that should be a CURLU pointer was passed in as a NULL.
++.IP "CURLUE_BAD_PARTPOINTER (2)"
++A NULL pointer was passed to the 'part' argument of \fIcurl_url_get(3)\fP.
++.IP "CURLUE_MALFORMED_INPUT (3)"
++A malformed input was passed to a URL API function.
++.IP "CURLUE_BAD_PORT_NUMBER (4)"
++The port number was not a decimal number between 0 and 65535.
++.IP "CURLUE_UNSUPPORTED_SCHEME (5)"
++This libcurl build doesn't support the given URL scheme.
++.IP "CURLUE_URLDECODE (6)"
++URL decode error, most likely because of rubbish in the input.
++.IP "CURLUE_OUT_OF_MEMORY (7)"
++A memory function failed.
++.IP "CURLUE_USER_NOT_ALLOWED (8)"
++Credentials was passed in the URL when prohibited.
++.IP "CURLUE_UNKNOWN_PART (9)"
++An unknown part ID was passed to a URL API function.
++.IP "CURLUE_NO_SCHEME (10)"
++There is no scheme part in the URL.
++.IP "CURLUE_NO_USER (11)"
++There is no user part in the URL.
++.IP "CURLUE_NO_PASSWORD (12)"
++There is no password part in the URL.
++.IP "CURLUE_NO_OPTIONS (13)"
++There is no options part in the URL.
++.IP "CURLUE_NO_HOST (14)"
++There is no host part in the URL.
++.IP "CURLUE_NO_PORT (15)"
++There is no port part in the URL.
++.IP "CURLUE_NO_QUERY (16)"
++There is no query part in the URL.
++.IP "CURLUE_NO_FRAGMENT (17)"
++There is no fragment part in the URL.
+ .SH "SEE ALSO"
+ .BR curl_easy_strerror "(3), " curl_multi_strerror "(3), "
+ .BR curl_share_strerror "(3), " CURLOPT_ERRORBUFFER "(3), "
+diff --git a/docs/libcurl/symbols-in-versions b/docs/libcurl/symbols-in-versions
+index c797cb7..3b3861f 100644
+--- a/docs/libcurl/symbols-in-versions
++++ b/docs/libcurl/symbols-in-versions
+@@ -736,14 +736,12 @@ CURLUE_NO_FRAGMENT              7.62.0
+ CURLUE_NO_HOST                  7.62.0
+ CURLUE_NO_OPTIONS               7.62.0
+ CURLUE_NO_PASSWORD              7.62.0
+-CURLUE_NO_PATH                  7.62.0
+ CURLUE_NO_PORT                  7.62.0
+ CURLUE_NO_QUERY                 7.62.0
+ CURLUE_NO_SCHEME                7.62.0
+ CURLUE_NO_USER                  7.62.0
+ CURLUE_OK                       7.62.0
+ CURLUE_OUT_OF_MEMORY            7.62.0
+-CURLUE_RELATIVE                 7.62.0
+ CURLUE_UNKNOWN_PART             7.62.0
+ CURLUE_UNSUPPORTED_SCHEME       7.62.0
+ CURLUE_URLDECODE                7.62.0
+diff --git a/include/curl/urlapi.h b/include/curl/urlapi.h
+index 319de35..90dd56c 100644
+--- a/include/curl/urlapi.h
++++ b/include/curl/urlapi.h
+@@ -35,7 +35,7 @@ typedef enum {
+   CURLUE_BAD_PORT_NUMBER,     /* 4 */
+   CURLUE_UNSUPPORTED_SCHEME,  /* 5 */
+   CURLUE_URLDECODE,           /* 6 */
+-  CURLUE_RELATIVE,            /* 7 */
++  CURLUE_OUT_OF_MEMORY,       /* 7 */
+   CURLUE_USER_NOT_ALLOWED,    /* 8 */
+   CURLUE_UNKNOWN_PART,        /* 9 */
+   CURLUE_NO_SCHEME,           /* 10 */
+@@ -44,10 +44,8 @@ typedef enum {
+   CURLUE_NO_OPTIONS,          /* 13 */
+   CURLUE_NO_HOST,             /* 14 */
+   CURLUE_NO_PORT,             /* 15 */
+-  CURLUE_NO_PATH,             /* 16 */
+-  CURLUE_NO_QUERY,            /* 17 */
+-  CURLUE_NO_FRAGMENT,         /* 18 */
+-  CURLUE_OUT_OF_MEMORY        /* 19 */
++  CURLUE_NO_QUERY,            /* 16 */
++  CURLUE_NO_FRAGMENT          /* 17 */
+ } CURLUcode;
+ 
+ typedef enum {
+diff --git a/tests/libtest/lib1560.c b/tests/libtest/lib1560.c
+index 30fb582..224cb88 100644
+--- a/tests/libtest/lib1560.c
++++ b/tests/libtest/lib1560.c
+@@ -129,7 +129,7 @@ struct querycase {
+ 
+ static struct testcase get_parts_list[] ={
+   {"https://127.0.0.1:443",
+-   "https | [11] | [12] | [13] | 127.0.0.1 | [15] | / | [17] | [18]",
++   "https | [11] | [12] | [13] | 127.0.0.1 | [15] | / | [16] | [17]",
+    0, CURLU_NO_DEFAULT_PORT, CURLUE_OK},
+   {"http://%3a:%3a@ex%0ample/%3f+?+%3f+%23#+%23%3f%g7",
+    "http | : | : | [13] | [6] | [15] | /?+ |  ? # | +#?%g7",
+@@ -138,43 +138,43 @@ static struct testcase get_parts_list[] ={
+    "http | %3a | %3a | [13] | ex%0ample | [15] | /%3f | %3f%35 | %35%3f%g7",
+    0, 0, CURLUE_OK},
+   {"http://HO0_-st%41/",
+-   "http | [11] | [12] | [13] | HO0_-st%41 | [15] | / | [17] | [18]",
++   "http | [11] | [12] | [13] | HO0_-st%41 | [15] | / | [16] | [17]",
+    0, 0, CURLUE_OK},
+   {"file://hello.html",
+    "",
+    0, 0, CURLUE_MALFORMED_INPUT},
+   {"http://HO0_-st/",
+-   "http | [11] | [12] | [13] | HO0_-st | [15] | / | [17] | [18]",
++   "http | [11] | [12] | [13] | HO0_-st | [15] | / | [16] | [17]",
+    0, 0, CURLUE_OK},
+   {"imap://user:pass;option@server/path",
+-   "imap | user | pass | option | server | [15] | /path | [17] | [18]",
++   "imap | user | pass | option | server | [15] | /path | [16] | [17]",
+    0, 0, CURLUE_OK},
+   {"http://user:pass;option@server/path",
+-   "http | user | pass;option | [13] | server | [15] | /path | [17] | [18]",
++   "http | user | pass;option | [13] | server | [15] | /path | [16] | [17]",
+    0, 0, CURLUE_OK},
+   {"file:/hello.html",
+-   "file | [11] | [12] | [13] | [14] | [15] | /hello.html | [17] | [18]",
++   "file | [11] | [12] | [13] | [14] | [15] | /hello.html | [16] | [17]",
+    0, 0, CURLUE_OK},
+   {"file://127.0.0.1/hello.html",
+-   "file | [11] | [12] | [13] | [14] | [15] | /hello.html | [17] | [18]",
++   "file | [11] | [12] | [13] | [14] | [15] | /hello.html | [16] | [17]",
+    0, 0, CURLUE_OK},
+   {"file:////hello.html",
+-   "file | [11] | [12] | [13] | [14] | [15] | //hello.html | [17] | [18]",
++   "file | [11] | [12] | [13] | [14] | [15] | //hello.html | [16] | [17]",
+    0, 0, CURLUE_OK},
+   {"file:///hello.html",
+-   "file | [11] | [12] | [13] | [14] | [15] | /hello.html | [17] | [18]",
++   "file | [11] | [12] | [13] | [14] | [15] | /hello.html | [16] | [17]",
+    0, 0, CURLUE_OK},
+   {"https://127.0.0.1",
+-   "https | [11] | [12] | [13] | 127.0.0.1 | 443 | / | [17] | [18]",
++   "https | [11] | [12] | [13] | 127.0.0.1 | 443 | / | [16] | [17]",
+    0, CURLU_DEFAULT_PORT, CURLUE_OK},
+   {"https://127.0.0.1",
+-   "https | [11] | [12] | [13] | 127.0.0.1 | [15] | / | [17] | [18]",
++   "https | [11] | [12] | [13] | 127.0.0.1 | [15] | / | [16] | [17]",
+    CURLU_DEFAULT_SCHEME, 0, CURLUE_OK},
+   {"https://[::1]:1234",
+-   "https | [11] | [12] | [13] | [::1] | 1234 | / | [17] | [18]",
++   "https | [11] | [12] | [13] | [::1] | 1234 | / | [16] | [17]",
+    CURLU_DEFAULT_SCHEME, 0, CURLUE_OK},
+   {"https://127abc.com",
+-   "https | [11] | [12] | [13] | 127abc.com | [15] | / | [17] | [18]",
++   "https | [11] | [12] | [13] | 127abc.com | [15] | / | [16] | [17]",
+    CURLU_DEFAULT_SCHEME, 0, CURLUE_OK},
+   {"https:// example.com?check",
+    "",
+@@ -183,7 +183,7 @@ static struct testcase get_parts_list[] ={
+    "",
+    CURLU_DEFAULT_SCHEME, 0, CURLUE_MALFORMED_INPUT},
+   {"https://example.com?check",
+-   "https | [11] | [12] | [13] | example.com | [15] | / | check | [18]",
++   "https | [11] | [12] | [13] | example.com | [15] | / | check | [17]",
+    CURLU_DEFAULT_SCHEME, 0, CURLUE_OK},
+   {"https://example.com:65536",
+    "",
+@@ -193,27 +193,27 @@ static struct testcase get_parts_list[] ={
+    CURLU_DEFAULT_SCHEME, 0, CURLUE_BAD_PORT_NUMBER},
+   {"https://example.com:01#moo",
+    "https | [11] | [12] | [13] | example.com | 1 | / | "
+-   "[17] | moo",
++   "[16] | moo",
+    CURLU_DEFAULT_SCHEME, 0, CURLUE_OK},
+   {"https://example.com:1#moo",
+    "https | [11] | [12] | [13] | example.com | 1 | / | "
+-   "[17] | moo",
++   "[16] | moo",
+    CURLU_DEFAULT_SCHEME, 0, CURLUE_OK},
+   {"http://example.com#moo",
+    "http | [11] | [12] | [13] | example.com | [15] | / | "
+-   "[17] | moo",
++   "[16] | moo",
+    CURLU_DEFAULT_SCHEME, 0, CURLUE_OK},
+   {"http://example.com",
+    "http | [11] | [12] | [13] | example.com | [15] | / | "
+-   "[17] | [18]",
++   "[16] | [17]",
+    CURLU_DEFAULT_SCHEME, 0, CURLUE_OK},
+   {"http://example.com/path/html",
+    "http | [11] | [12] | [13] | example.com | [15] | /path/html | "
+-   "[17] | [18]",
++   "[16] | [17]",
+    CURLU_DEFAULT_SCHEME, 0, CURLUE_OK},
+   {"http://example.com/path/html?query=name",
+    "http | [11] | [12] | [13] | example.com | [15] | /path/html | "
+-   "query=name | [18]",
++   "query=name | [17]",
+    CURLU_DEFAULT_SCHEME, 0, CURLUE_OK},
+   {"http://example.com/path/html?query=name#anchor",
+    "http | [11] | [12] | [13] | example.com | [15] | /path/html | "
+-- 
+2.17.2
+
+
+From 88dfdac2fc1b34a321a323868ea06116c72fe6d2 Mon Sep 17 00:00:00 2001
+From: Daniel Stenberg <daniel@haxx.se>
+Date: Fri, 21 Sep 2018 08:17:39 +0200
+Subject: [PATCH 12/14] urlapi: fix support for address scope in IPv6 numerical
+ addresses
+
+Closes #3024
+
+Upstream-commit: 2097cd515289581df5dfb6eeb5942d083a871fa4
+Signed-off-by: Kamil Dudka <kdudka@redhat.com>
+---
+ lib/urlapi-int.h        | 4 ++++
+ lib/urlapi.c            | 8 ++------
+ tests/libtest/lib1560.c | 3 +++
+ 3 files changed, 9 insertions(+), 6 deletions(-)
+
+diff --git a/lib/urlapi-int.h b/lib/urlapi-int.h
+index 7ac09fd..a5bb8ea 100644
+--- a/lib/urlapi-int.h
++++ b/lib/urlapi-int.h
+@@ -22,6 +22,10 @@
+  *
+  ***************************************************************************/
+ #include "curl_setup.h"
++/* scheme is not URL encoded, the longest libcurl supported ones are 6
++   letters */
++#define MAX_SCHEME_LEN 8
++
+ bool Curl_is_absolute_url(const char *url, char *scheme, size_t buflen);
+ char *Curl_concat_url(const char *base, const char *relurl);
+ size_t Curl_strlen_url(const char *url, bool relative);
+diff --git a/lib/urlapi.c b/lib/urlapi.c
+index 45f1e14..a12112e 100644
+--- a/lib/urlapi.c
++++ b/lib/urlapi.c
+@@ -53,10 +53,6 @@ struct Curl_URL {
+ 
+ #define DEFAULT_SCHEME "https"
+ 
+-/* scheme is not URL encoded, the longest libcurl supported ones are 6
+-   letters */
+-#define MAX_SCHEME_LEN 8
+-
+ static void free_urlhandle(struct Curl_URL *u)
+ {
+   free(u->scheme);
+@@ -480,7 +476,7 @@ static CURLUcode parse_port(struct Curl_URL *u, char *hostname)
+   char endbracket;
+   int len;
+ 
+-  if((1 == sscanf(hostname, "[%*45[0123456789abcdefABCDEF:.]%c%n",
++  if((1 == sscanf(hostname, "[%*45[0123456789abcdefABCDEF:.%%]%c%n",
+                   &endbracket, &len)) &&
+      (']' == endbracket)) {
+     /* this is a RFC2732-style specified IP-address */
+@@ -561,7 +557,7 @@ static CURLUcode hostname_check(char *hostname, unsigned int flags)
+ 
+   if(hostname[0] == '[') {
+     hostname++;
+-    l = "0123456789abcdefABCDEF::.";
++    l = "0123456789abcdefABCDEF::.%";
+     hlen -= 2;
+   }
+ 
+diff --git a/tests/libtest/lib1560.c b/tests/libtest/lib1560.c
+index 224cb88..7a5be81 100644
+--- a/tests/libtest/lib1560.c
++++ b/tests/libtest/lib1560.c
+@@ -128,6 +128,9 @@ struct querycase {
+ };
+ 
+ static struct testcase get_parts_list[] ={
++  {"https://[::1%252]:1234",
++   "https | [11] | [12] | [13] | [::1%252] | 1234 | / | [16] | [17]",
++   CURLU_DEFAULT_SCHEME, 0, CURLUE_OK},
+   {"https://127.0.0.1:443",
+    "https | [11] | [12] | [13] | 127.0.0.1 | [15] | / | [16] | [17]",
+    0, CURLU_NO_DEFAULT_PORT, CURLUE_OK},
+-- 
+2.17.2
+
+
+From 6c9f3f4bc604ba06a4f43807ace9189503a5e9fc Mon Sep 17 00:00:00 2001
+From: Daniel Stenberg <daniel@haxx.se>
+Date: Fri, 2 Nov 2018 15:11:16 +0100
+Subject: [PATCH 13/14] URL: fix IPv6 numeral address parser
+
+Regression from 46e164069d1a52. Extended test 1560 to verify.
+
+Reported-by: tpaukrt on github
+Fixes #3218
+Closes #3219
+
+Upstream-commit: b28094833a971870fd8c07960b3b12bf6fbbaad3
+Signed-off-by: Kamil Dudka <kdudka@redhat.com>
+---
+ lib/urlapi.c            | 8 ++++++--
+ tests/libtest/lib1560.c | 9 +++++++++
+ 2 files changed, 15 insertions(+), 2 deletions(-)
+
+diff --git a/lib/urlapi.c b/lib/urlapi.c
+index a12112e..8626052 100644
+--- a/lib/urlapi.c
++++ b/lib/urlapi.c
+@@ -481,8 +481,12 @@ static CURLUcode parse_port(struct Curl_URL *u, char *hostname)
+      (']' == endbracket)) {
+     /* this is a RFC2732-style specified IP-address */
+     portptr = &hostname[len];
+-    if (*portptr != ':')
+-      return CURLUE_MALFORMED_INPUT;
++    if(*portptr) {
++      if(*portptr != ':')
++        return CURLUE_MALFORMED_INPUT;
++    }
++    else
++      portptr = NULL;
+   }
+   else
+     portptr = strchr(hostname, ':');
+diff --git a/tests/libtest/lib1560.c b/tests/libtest/lib1560.c
+index 7a5be81..483035c 100644
+--- a/tests/libtest/lib1560.c
++++ b/tests/libtest/lib1560.c
+@@ -128,6 +128,15 @@ struct querycase {
+ };
+ 
+ static struct testcase get_parts_list[] ={
++  {"http://[fd00:a41::50]:8080",
++   "http | [11] | [12] | [13] | [fd00:a41::50] | 8080 | / | [16] | [17]",
++   CURLU_DEFAULT_SCHEME, 0, CURLUE_OK},
++  {"http://[fd00:a41::50]/",
++   "http | [11] | [12] | [13] | [fd00:a41::50] | [15] | / | [16] | [17]",
++   CURLU_DEFAULT_SCHEME, 0, CURLUE_OK},
++  {"http://[fd00:a41::50]",
++   "http | [11] | [12] | [13] | [fd00:a41::50] | [15] | / | [16] | [17]",
++   CURLU_DEFAULT_SCHEME, 0, CURLUE_OK},
+   {"https://[::1%252]:1234",
+    "https | [11] | [12] | [13] | [::1%252] | 1234 | / | [16] | [17]",
+    CURLU_DEFAULT_SCHEME, 0, CURLUE_OK},
+-- 
+2.17.2
+
+
+From 9fa7298750c1d66331dc55a202277b131868c048 Mon Sep 17 00:00:00 2001
+From: Daniel Stenberg <daniel@haxx.se>
+Date: Wed, 2 Jan 2019 20:18:27 +0100
+Subject: [PATCH 14/14] xattr: strip credentials from any URL that is stored
+
+Both user and password are cleared uncondtitionally.
+
+Added unit test 1621 to verify.
+
+Fixes #3423
+Closes #3433
+
+Upstream-commit: 98e6629154044e4ab1ee7cff8351c7ebcb131e88
+Signed-off-by: Kamil Dudka <kdudka@redhat.com>
+---
+ src/tool_xattr.c        | 63 +++++++++++++++++++++++++----
+ tests/data/Makefile.inc |  2 +-
+ tests/data/test1621     | 27 +++++++++++++
+ tests/unit/Makefile.inc |  6 ++-
+ tests/unit/unit1621.c   | 89 +++++++++++++++++++++++++++++++++++++++++
+ 5 files changed, 177 insertions(+), 10 deletions(-)
+ create mode 100644 tests/data/test1621
+ create mode 100644 tests/unit/unit1621.c
+
+diff --git a/src/tool_xattr.c b/src/tool_xattr.c
+index 92b99db..730381b 100644
+--- a/src/tool_xattr.c
++++ b/src/tool_xattr.c
+@@ -5,7 +5,7 @@
+  *                            | (__| |_| |  _ <| |___
+  *                             \___|\___/|_| \_\_____|
+  *
+- * Copyright (C) 1998 - 2014, Daniel Stenberg, <daniel@haxx.se>, et al.
++ * Copyright (C) 1998 - 2019, Daniel Stenberg, <daniel@haxx.se>, et al.
+  *
+  * This software is licensed as described in the file COPYING, which
+  * you should have received as part of this distribution. The terms
+@@ -49,6 +49,46 @@ static const struct xattr_mapping {
+   { NULL,                  CURLINFO_NONE } /* last element, abort loop here */
+ };
+ 
++/* returns TRUE if a new URL is returned, that then needs to be freed */
++/* @unittest: 1621 */
++#ifdef UNITTESTS
++bool stripcredentials(char **url);
++#else
++static
++#endif
++bool stripcredentials(char **url)
++{
++  CURLU *u;
++  CURLUcode uc;
++  char *nurl;
++  u = curl_url();
++  if(u) {
++    uc = curl_url_set(u, CURLUPART_URL, *url, 0);
++    if(uc)
++      goto error;
++
++    uc = curl_url_set(u, CURLUPART_USER, NULL, 0);
++    if(uc)
++      goto error;
++
++    uc = curl_url_set(u, CURLUPART_PASSWORD, NULL, 0);
++    if(uc)
++      goto error;
++
++    uc = curl_url_get(u, CURLUPART_URL, &nurl, 0);
++    if(uc)
++      goto error;
++
++    curl_url_cleanup(u);
++
++    *url = nurl;
++    return TRUE;
++  }
++  error:
++  curl_url_cleanup(u);
++  return FALSE;
++}
++
+ /* store metadata from the curl request alongside the downloaded
+  * file using extended attributes
+  */
+@@ -62,17 +102,24 @@ int fwrite_xattr(CURL *curl, int fd)
+     char *value = NULL;
+     CURLcode result = curl_easy_getinfo(curl, mappings[i].info, &value);
+     if(!result && value) {
++      bool freeptr = FALSE;
++      if(CURLINFO_EFFECTIVE_URL == mappings[i].info)
++        freeptr = stripcredentials(&value);
++      if(value) {
+ #ifdef HAVE_FSETXATTR_6
+-      err = fsetxattr(fd, mappings[i].attr, value, strlen(value), 0, 0);
++        err = fsetxattr(fd, mappings[i].attr, value, strlen(value), 0, 0);
+ #elif defined(HAVE_FSETXATTR_5)
+-      err = fsetxattr(fd, mappings[i].attr, value, strlen(value), 0);
++        err = fsetxattr(fd, mappings[i].attr, value, strlen(value), 0);
+ #elif defined(__FreeBSD_version)
+-      err = extattr_set_fd(fd, EXTATTR_NAMESPACE_USER, mappings[i].attr, value,
+-                           strlen(value));
+-      /* FreeBSD's extattr_set_fd returns the length of the extended attribute
+-       */
+-      err = err < 0 ? err : 0;
++        err = extattr_set_fd(fd, EXTATTR_NAMESPACE_USER, mappings[i].attr,
++                             value, strlen(value));
++        /* FreeBSD's extattr_set_fd returns the length of the extended
++           attribute */
++        err = err < 0 ? err : 0;
+ #endif
++        if(freeptr)
++          curl_free(value);
++      }
+     }
+     i++;
+   }
+diff --git a/tests/data/Makefile.inc b/tests/data/Makefile.inc
+index dd38f89..6172b77 100644
+--- a/tests/data/Makefile.inc
++++ b/tests/data/Makefile.inc
+@@ -182,7 +182,7 @@ test1560 \
+ \
+ test1590 \
+ test1600 test1601 test1602 test1603 test1604 test1605 test1606 test1607 \
+-test1608 test1609 \
++test1608 test1609 test1621 \
+ \
+ test1700 test1701 test1702 \
+ \
+diff --git a/tests/data/test1621 b/tests/data/test1621
+new file mode 100644
+index 0000000..1117d1b
+--- /dev/null
++++ b/tests/data/test1621
+@@ -0,0 +1,27 @@
++<testcase>
++<info>
++<keywords>
++unittest
++stripcredentials
++</keywords>
++</info>
++
++#
++# Client-side
++<client>
++<server>
++none
++</server>
++<features>
++unittest
++https
++</features>
++ <name>
++unit tests for stripcredentials from URL
++ </name>
++<tool>
++unit1621
++</tool>
++</client>
++
++</testcase>
+diff --git a/tests/unit/Makefile.inc b/tests/unit/Makefile.inc
+index 8b1a607..82eaec7 100644
+--- a/tests/unit/Makefile.inc
++++ b/tests/unit/Makefile.inc
+@@ -10,7 +10,7 @@ UNITPROGS = unit1300 unit1301 unit1302 unit1303 unit1304 unit1305 unit1307	\
+  unit1330 unit1394 unit1395 unit1396 unit1397 unit1398	\
+  unit1399	\
+  unit1600 unit1601 unit1602 unit1603 unit1604 unit1605 unit1606 unit1607 \
+- unit1608 unit1609
++ unit1608 unit1609 unit1621
+ 
+ unit1300_SOURCES = unit1300.c $(UNITFILES)
+ unit1300_CPPFLAGS = $(AM_CPPFLAGS)
+@@ -95,3 +95,7 @@ unit1608_CPPFLAGS = $(AM_CPPFLAGS)
+ 
+ unit1609_SOURCES = unit1609.c $(UNITFILES)
+ unit1609_CPPFLAGS = $(AM_CPPFLAGS)
++ 
++unit1621_SOURCES = unit1621.c $(UNITFILES)
++unit1621_CPPFLAGS = $(AM_CPPFLAGS)
++unit1621_LDADD = $(top_builddir)/src/libcurltool.la $(top_builddir)/lib/libcurl.la
+diff --git a/tests/unit/unit1621.c b/tests/unit/unit1621.c
+new file mode 100644
+index 0000000..6e07b6e
+--- /dev/null
++++ b/tests/unit/unit1621.c
+@@ -0,0 +1,89 @@
++/***************************************************************************
++ *                                  _   _ ____  _
++ *  Project                     ___| | | |  _ \| |
++ *                             / __| | | | |_) | |
++ *                            | (__| |_| |  _ <| |___
++ *                             \___|\___/|_| \_\_____|
++ *
++ * Copyright (C) 1998 - 2019, Daniel Stenberg, <daniel@haxx.se>, et al.
++ *
++ * This software is licensed as described in the file COPYING, which
++ * you should have received as part of this distribution. The terms
++ * are also available at https://curl.haxx.se/docs/copyright.html.
++ *
++ * You may opt to use, copy, modify, merge, publish, distribute and/or sell
++ * copies of the Software, and permit persons to whom the Software is
++ * furnished to do so, under the terms of the COPYING file.
++ *
++ * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
++ * KIND, either express or implied.
++ *
++ ***************************************************************************/
++#include "curlcheck.h"
++
++#include "urldata.h"
++#include "url.h"
++
++#include "memdebug.h" /* LAST include file */
++
++static CURLcode unit_setup(void)
++{
++  return CURLE_OK;
++}
++
++static void unit_stop(void)
++{
++}
++
++#ifdef __MINGW32__
++UNITTEST_START
++{
++  return 0;
++}
++UNITTEST_STOP
++#else
++
++bool stripcredentials(char **url);
++
++struct checkthis {
++  const char *input;
++  const char *output;
++};
++
++static struct checkthis tests[] = {
++  { "ninja://foo@example.com", "ninja://foo@example.com" },
++  { "https://foo@example.com", "https://example.com/" },
++  { "https://localhost:45", "https://localhost:45/" },
++  { "https://foo@localhost:45", "https://localhost:45/" },
++  { "http://daniel:password@localhost", "http://localhost/" },
++  { "http://daniel@localhost", "http://localhost/" },
++  { "http://localhost/", "http://localhost/" },
++  { NULL, NULL } /* end marker */
++};
++
++UNITTEST_START
++{
++  bool cleanup;
++  char *url;
++  int i;
++  int rc = 0;
++
++  for(i = 0; tests[i].input; i++) {
++    url = (char *)tests[i].input;
++    cleanup = stripcredentials(&url);
++    printf("Test %u got input \"%s\", output: \"%s\"\n",
++           i, tests[i].input, url);
++
++    if(strcmp(tests[i].output, url)) {
++      fprintf(stderr, "Test %u got input \"%s\", expected output \"%s\"\n"
++              " Actual output: \"%s\"\n", i, tests[i].input, tests[i].output,
++              url);
++      rc++;
++    }
++    if(cleanup)
++      curl_free(url);
++  }
++  return rc;
++}
++UNITTEST_STOP
++#endif
+-- 
+2.17.2
+
diff --git a/SOURCES/0009-curl-7.61.1-CVE-2018-16890.patch b/SOURCES/0009-curl-7.61.1-CVE-2018-16890.patch
new file mode 100644
index 0000000..0a15ade
--- /dev/null
+++ b/SOURCES/0009-curl-7.61.1-CVE-2018-16890.patch
@@ -0,0 +1,36 @@
+From 81c0e81531623251a0e78f7779c049f530abe733 Mon Sep 17 00:00:00 2001
+From: Daniel Stenberg <daniel@haxx.se>
+Date: Wed, 2 Jan 2019 20:33:08 +0100
+Subject: [PATCH] NTLM: fix size check condition for type2 received data
+
+Bug: https://curl.haxx.se/docs/CVE-2018-16890.html
+Reported-by: Wenxiang Qian
+CVE-2018-16890
+
+Upstream-commit: b780b30d1377adb10bbe774835f49e9b237fb9bb
+Signed-off-by: Kamil Dudka <kdudka@redhat.com>
+---
+ lib/vauth/ntlm.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+diff --git a/lib/vauth/ntlm.c b/lib/vauth/ntlm.c
+index cdb8d8f..b614cda 100644
+--- a/lib/vauth/ntlm.c
++++ b/lib/vauth/ntlm.c
+@@ -182,10 +182,11 @@ static CURLcode ntlm_decode_type2_target(struct Curl_easy *data,
+     target_info_len = Curl_read16_le(&buffer[40]);
+     target_info_offset = Curl_read32_le(&buffer[44]);
+     if(target_info_len > 0) {
+-      if(((target_info_offset + target_info_len) > size) ||
++      if((target_info_offset >= size) ||
++         ((target_info_offset + target_info_len) > size) ||
+          (target_info_offset < 48)) {
+         infof(data, "NTLM handshake failure (bad type-2 message). "
+-                    "Target Info Offset Len is set incorrect by the peer\n");
++              "Target Info Offset Len is set incorrect by the peer\n");
+         return CURLE_BAD_CONTENT_ENCODING;
+       }
+ 
+-- 
+2.17.2
+
diff --git a/SOURCES/0010-curl-7.61.1-CVE-2019-3822.patch b/SOURCES/0010-curl-7.61.1-CVE-2019-3822.patch
new file mode 100644
index 0000000..c860817
--- /dev/null
+++ b/SOURCES/0010-curl-7.61.1-CVE-2019-3822.patch
@@ -0,0 +1,41 @@
+From ab22e3a00f04b458039c21111cfa448051e5777d Mon Sep 17 00:00:00 2001
+From: Daniel Stenberg <daniel@haxx.se>
+Date: Thu, 3 Jan 2019 12:59:28 +0100
+Subject: [PATCH] ntlm: fix *_type3_message size check to avoid buffer overflow
+
+Bug: https://curl.haxx.se/docs/CVE-2019-3822.html
+Reported-by: Wenxiang Qian
+CVE-2019-3822
+
+Upstream-commit: 50c9484278c63b958655a717844f0721263939cc
+Signed-off-by: Kamil Dudka <kdudka@redhat.com>
+---
+ lib/vauth/ntlm.c | 11 +++++++----
+ 1 file changed, 7 insertions(+), 4 deletions(-)
+
+diff --git a/lib/vauth/ntlm.c b/lib/vauth/ntlm.c
+index b614cda..a3a55d9 100644
+--- a/lib/vauth/ntlm.c
++++ b/lib/vauth/ntlm.c
+@@ -777,11 +777,14 @@ CURLcode Curl_auth_create_ntlm_type3_message(struct Curl_easy *data,
+   });
+ 
+ #ifdef USE_NTRESPONSES
+-  if(size < (NTLM_BUFSIZE - ntresplen)) {
+-    DEBUGASSERT(size == (size_t)ntrespoff);
+-    memcpy(&ntlmbuf[size], ptr_ntresp, ntresplen);
+-    size += ntresplen;
++  /* ntresplen + size should not be risking an integer overflow here */
++  if(ntresplen + size > sizeof(ntlmbuf)) {
++    failf(data, "incoming NTLM message too big");
++    return CURLE_OUT_OF_MEMORY;
+   }
++  DEBUGASSERT(size == (size_t)ntrespoff);
++  memcpy(&ntlmbuf[size], ptr_ntresp, ntresplen);
++  size += ntresplen;
+ 
+   DEBUG_OUT({
+     fprintf(stderr, "\n   ntresp=");
+-- 
+2.17.2
+
diff --git a/SOURCES/0011-curl-7.61.1-CVE-2019-3823.patch b/SOURCES/0011-curl-7.61.1-CVE-2019-3823.patch
new file mode 100644
index 0000000..d1d259f
--- /dev/null
+++ b/SOURCES/0011-curl-7.61.1-CVE-2019-3823.patch
@@ -0,0 +1,50 @@
+From d26f1025d0a0a6c602d758a2e0917759492473e9 Mon Sep 17 00:00:00 2001
+From: Daniel Gustafsson <daniel@yesql.se>
+Date: Sat, 19 Jan 2019 00:42:47 +0100
+Subject: [PATCH] smtp: avoid risk of buffer overflow in strtol
+
+If the incoming len 5, but the buffer does not have a termination
+after 5 bytes, the strtol() call may keep reading through the line
+buffer until is exceeds its boundary. Fix by ensuring that we are
+using a bounded read with a temporary buffer on the stack.
+
+Bug: https://curl.haxx.se/docs/CVE-2019-3823.html
+Reported-by: Brian Carpenter (Geeknik Labs)
+CVE-2019-3823
+
+Upstream-commit: 39df4073e5413fcdbb5a38da0c1ce6f1c0ceb484
+Signed-off-by: Kamil Dudka <kdudka@redhat.com>
+---
+ lib/smtp.c | 8 ++++++--
+ 1 file changed, 6 insertions(+), 2 deletions(-)
+
+diff --git a/lib/smtp.c b/lib/smtp.c
+index ecf10a4..1b9f92d 100644
+--- a/lib/smtp.c
++++ b/lib/smtp.c
+@@ -5,7 +5,7 @@
+  *                            | (__| |_| |  _ <| |___
+  *                             \___|\___/|_| \_\_____|
+  *
+- * Copyright (C) 1998 - 2018, Daniel Stenberg, <daniel@haxx.se>, et al.
++ * Copyright (C) 1998 - 2019, Daniel Stenberg, <daniel@haxx.se>, et al.
+  *
+  * This software is licensed as described in the file COPYING, which
+  * you should have received as part of this distribution. The terms
+@@ -207,8 +207,12 @@ static bool smtp_endofresp(struct connectdata *conn, char *line, size_t len,
+      Section 4. Examples of RFC-4954 but some e-mail servers ignore this and
+      only send the response code instead as per Section 4.2. */
+   if(line[3] == ' ' || len == 5) {
++    char tmpline[6];
++
+     result = TRUE;
+-    *resp = curlx_sltosi(strtol(line, NULL, 10));
++    memset(tmpline, '\0', sizeof(tmpline));
++    memcpy(tmpline, line, (len == 5 ? 5 : 3));
++    *resp = curlx_sltosi(strtol(tmpline, NULL, 10));
+ 
+     /* Make sure real server never sends internal value */
+     if(*resp == 1)
+-- 
+2.17.2
+
diff --git a/SOURCES/0014-curl-7.61.1-libssh-socket.patch b/SOURCES/0014-curl-7.61.1-libssh-socket.patch
new file mode 100644
index 0000000..83c9cc7
--- /dev/null
+++ b/SOURCES/0014-curl-7.61.1-libssh-socket.patch
@@ -0,0 +1,66 @@
+From 095d4cf3b1c388b2871e3783f8c41b1e01200a25 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Felix=20H=C3=A4dicke?= <felixhaedicke@web.de>
+Date: Wed, 23 Jan 2019 23:47:55 +0100
+Subject: [PATCH] libssh: do not let libssh create socket
+
+By default, libssh creates a new socket, instead of using the socket
+created by curl for SSH connections.
+
+Pass the socket created by curl to libssh using ssh_options_set() with
+SSH_OPTIONS_FD directly after ssh_new(). So libssh uses our socket
+instead of creating a new one.
+
+This approach is very similar to what is done in the libssh2 code, where
+the socket created by curl is passed to libssh2 when
+libssh2_session_startup() is called.
+
+Fixes #3491
+Closes #3495
+
+Upstream-commit: 15c94b310bf9e0c92d71fca5a88eb67a1e2548a6
+Signed-off-by: Kamil Dudka <kdudka@redhat.com>
+---
+ lib/ssh-libssh.c | 6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+diff --git a/lib/ssh-libssh.c b/lib/ssh-libssh.c
+index 7d59089..4110be2 100644
+--- a/lib/ssh-libssh.c
++++ b/lib/ssh-libssh.c
+@@ -549,6 +549,7 @@ static CURLcode myssh_statemach_act(struct connectdata *conn, bool *block)
+   struct Curl_easy *data = conn->data;
+   struct SSHPROTO *protop = data->req.protop;
+   struct ssh_conn *sshc = &conn->proto.sshc;
++  curl_socket_t sock = conn->sock[FIRSTSOCKET];
+   int rc = SSH_NO_ERROR, err;
+   char *new_readdir_line;
+   int seekerr = CURL_SEEKFUNC_OK;
+@@ -792,7 +793,7 @@ static CURLcode myssh_statemach_act(struct connectdata *conn, bool *block)
+ 
+       Curl_pgrsTime(conn->data, TIMER_APPCONNECT);      /* SSH is connected */
+ 
+-      conn->sockfd = ssh_get_fd(sshc->ssh_session);
++      conn->sockfd = sock;
+       conn->writesockfd = CURL_SOCKET_BAD;
+ 
+       if(conn->handler->protocol == CURLPROTO_SFTP) {
+@@ -2048,6 +2049,7 @@ static CURLcode myssh_connect(struct connectdata *conn, bool *done)
+ {
+   struct ssh_conn *ssh;
+   CURLcode result;
++  curl_socket_t sock = conn->sock[FIRSTSOCKET];
+   struct Curl_easy *data = conn->data;
+   int rc;
+ 
+@@ -2076,6 +2078,8 @@ static CURLcode myssh_connect(struct connectdata *conn, bool *done)
+     return CURLE_FAILED_INIT;
+   }
+ 
++  ssh_options_set(ssh->ssh_session, SSH_OPTIONS_FD, &sock);
++
+   if(conn->user) {
+     infof(data, "User: %s\n", conn->user);
+     ssh_options_set(ssh->ssh_session, SSH_OPTIONS_USER, conn->user);
+-- 
+2.17.2
+
diff --git a/SOURCES/0017-curl-7.64.0-CVE-2019-5436.patch b/SOURCES/0017-curl-7.64.0-CVE-2019-5436.patch
new file mode 100644
index 0000000..8b0e453
--- /dev/null
+++ b/SOURCES/0017-curl-7.64.0-CVE-2019-5436.patch
@@ -0,0 +1,31 @@
+From 55a27027d5f024a0ecc2c23c81ed99de6192c9f3 Mon Sep 17 00:00:00 2001
+From: Daniel Stenberg <daniel@haxx.se>
+Date: Fri, 3 May 2019 22:20:37 +0200
+Subject: [PATCH] tftp: use the current blksize for recvfrom()
+
+bug: https://curl.haxx.se/docs/CVE-2019-5436.html
+Reported-by: l00p3r on hackerone
+CVE-2019-5436
+
+Upstream-commit: 2576003415625d7b5f0e390902f8097830b82275
+Signed-off-by: Kamil Dudka <kdudka@redhat.com>
+---
+ lib/tftp.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/lib/tftp.c b/lib/tftp.c
+index 269b3cd..4f2a131 100644
+--- a/lib/tftp.c
++++ b/lib/tftp.c
+@@ -1005,7 +1005,7 @@ static CURLcode tftp_connect(struct connectdata *conn, bool *done)
+   state->sockfd = state->conn->sock[FIRSTSOCKET];
+   state->state = TFTP_STATE_START;
+   state->error = TFTP_ERR_NONE;
+-  state->blksize = TFTP_BLKSIZE_DEFAULT;
++  state->blksize = blksize;
+   state->requested_blksize = blksize;
+ 
+   ((struct sockaddr *)&state->local_addr)->sa_family =
+-- 
+2.20.1
+
diff --git a/SOURCES/0018-curl-7.65.3-CVE-2019-5482.patch b/SOURCES/0018-curl-7.65.3-CVE-2019-5482.patch
new file mode 100644
index 0000000..f3785ec
--- /dev/null
+++ b/SOURCES/0018-curl-7.65.3-CVE-2019-5482.patch
@@ -0,0 +1,158 @@
+From 63f9837b4ccf600da79314e8667f91bda69988fc Mon Sep 17 00:00:00 2001
+From: Thomas Vegas <>
+Date: Sat, 31 Aug 2019 16:59:56 +0200
+Subject: [PATCH 1/2] tftp: return error when packet is too small for options
+
+Upstream-commit: 82f3ba3806a34fe94dcf9e5c9b88deda6679ca1b
+Signed-off-by: Kamil Dudka <kdudka@redhat.com>
+---
+ lib/tftp.c | 53 +++++++++++++++++++++++++++++++++--------------------
+ 1 file changed, 33 insertions(+), 20 deletions(-)
+
+diff --git a/lib/tftp.c b/lib/tftp.c
+index 289cda2..4532170 100644
+--- a/lib/tftp.c
++++ b/lib/tftp.c
+@@ -404,13 +404,14 @@ static CURLcode tftp_parse_option_ack(tftp_state_data_t *state,
+   return CURLE_OK;
+ }
+ 
+-static size_t tftp_option_add(tftp_state_data_t *state, size_t csize,
+-                              char *buf, const char *option)
++static CURLcode tftp_option_add(tftp_state_data_t *state, size_t *csize,
++                                char *buf, const char *option)
+ {
+-  if(( strlen(option) + csize + 1) > (size_t)state->blksize)
+-    return 0;
++  if(( strlen(option) + *csize + 1) > (size_t)state->blksize)
++    return CURLE_TFTP_ILLEGAL;
+   strcpy(buf, option);
+-  return strlen(option) + 1;
++  *csize += strlen(option) + 1;
++  return CURLE_OK;
+ }
+ 
+ static CURLcode tftp_connect_for_tx(tftp_state_data_t *state,
+@@ -511,26 +512,38 @@ static CURLcode tftp_send_first(tftp_state_data_t *state, tftp_event_t event)
+       else
+         strcpy(buf, "0"); /* the destination is large enough */
+ 
+-      sbytes += tftp_option_add(state, sbytes,
+-                                (char *)state->spacket.data + sbytes,
+-                                TFTP_OPTION_TSIZE);
+-      sbytes += tftp_option_add(state, sbytes,
+-                                (char *)state->spacket.data + sbytes, buf);
++      result = tftp_option_add(state, &sbytes,
++                               (char *)state->spacket.data + sbytes,
++                               TFTP_OPTION_TSIZE);
++      if(result == CURLE_OK)
++        result = tftp_option_add(state, &sbytes,
++                                 (char *)state->spacket.data + sbytes, buf);
++
+       /* add blksize option */
+       snprintf(buf, sizeof(buf), "%d", state->requested_blksize);
+-      sbytes += tftp_option_add(state, sbytes,
+-                                (char *)state->spacket.data + sbytes,
+-                                TFTP_OPTION_BLKSIZE);
+-      sbytes += tftp_option_add(state, sbytes,
+-                                (char *)state->spacket.data + sbytes, buf);
++      if(result == CURLE_OK)
++        result = tftp_option_add(state, &sbytes,
++                                 (char *)state->spacket.data + sbytes,
++                                 TFTP_OPTION_BLKSIZE);
++      if(result == CURLE_OK)
++        result = tftp_option_add(state, &sbytes,
++                                 (char *)state->spacket.data + sbytes, buf);
+ 
+       /* add timeout option */
+       snprintf(buf, sizeof(buf), "%d", state->retry_time);
+-      sbytes += tftp_option_add(state, sbytes,
+-                                (char *)state->spacket.data + sbytes,
+-                                TFTP_OPTION_INTERVAL);
+-      sbytes += tftp_option_add(state, sbytes,
+-                                (char *)state->spacket.data + sbytes, buf);
++      if(result == CURLE_OK)
++        result = tftp_option_add(state, &sbytes,
++                                 (char *)state->spacket.data + sbytes,
++                                 TFTP_OPTION_INTERVAL);
++      if(result == CURLE_OK)
++        result = tftp_option_add(state, &sbytes,
++                                 (char *)state->spacket.data + sbytes, buf);
++
++      if(result != CURLE_OK) {
++        failf(data, "TFTP buffer too small for options");
++        free(filename);
++        return CURLE_TFTP_ILLEGAL;
++      }
+     }
+ 
+     /* the typecase for the 3rd argument is mostly for systems that do
+-- 
+2.20.1
+
+
+From b6b12a4cfe00c4850a1d6cee4cf267f00dee5987 Mon Sep 17 00:00:00 2001
+From: Thomas Vegas <>
+Date: Sat, 31 Aug 2019 17:30:51 +0200
+Subject: [PATCH 2/2] tftp: Alloc maximum blksize, and use default unless OACK
+ is received
+
+Fixes potential buffer overflow from 'recvfrom()', should the server
+return an OACK without blksize.
+
+Bug: https://curl.haxx.se/docs/CVE-2019-5482.html
+CVE-2019-5482
+
+Upstream-commit: facb0e4662415b5f28163e853dc6742ac5fafb3d
+Signed-off-by: Kamil Dudka <kdudka@redhat.com>
+---
+ lib/tftp.c | 12 +++++++++---
+ 1 file changed, 9 insertions(+), 3 deletions(-)
+
+diff --git a/lib/tftp.c b/lib/tftp.c
+index 4532170..5651b62 100644
+--- a/lib/tftp.c
++++ b/lib/tftp.c
+@@ -982,6 +982,7 @@ static CURLcode tftp_connect(struct connectdata *conn, bool *done)
+ {
+   tftp_state_data_t *state;
+   int blksize;
++  int need_blksize;
+ 
+   blksize = TFTP_BLKSIZE_DEFAULT;
+ 
+@@ -996,15 +997,20 @@ static CURLcode tftp_connect(struct connectdata *conn, bool *done)
+       return CURLE_TFTP_ILLEGAL;
+   }
+ 
++  need_blksize = blksize;
++  /* default size is the fallback when no OACK is received */
++  if(need_blksize < TFTP_BLKSIZE_DEFAULT)
++    need_blksize = TFTP_BLKSIZE_DEFAULT;
++
+   if(!state->rpacket.data) {
+-    state->rpacket.data = calloc(1, blksize + 2 + 2);
++    state->rpacket.data = calloc(1, need_blksize + 2 + 2);
+ 
+     if(!state->rpacket.data)
+       return CURLE_OUT_OF_MEMORY;
+   }
+ 
+   if(!state->spacket.data) {
+-    state->spacket.data = calloc(1, blksize + 2 + 2);
++    state->spacket.data = calloc(1, need_blksize + 2 + 2);
+ 
+     if(!state->spacket.data)
+       return CURLE_OUT_OF_MEMORY;
+@@ -1018,7 +1024,7 @@ static CURLcode tftp_connect(struct connectdata *conn, bool *done)
+   state->sockfd = state->conn->sock[FIRSTSOCKET];
+   state->state = TFTP_STATE_START;
+   state->error = TFTP_ERR_NONE;
+-  state->blksize = blksize;
++  state->blksize = TFTP_BLKSIZE_DEFAULT; /* Unless updated by OACK response */
+   state->requested_blksize = blksize;
+ 
+   ((struct sockaddr *)&state->local_addr)->sa_family =
+-- 
+2.20.1
+
diff --git a/SOURCES/0019-curl-7.65.3-CVE-2019-5481.patch b/SOURCES/0019-curl-7.65.3-CVE-2019-5481.patch
new file mode 100644
index 0000000..2cd79df
--- /dev/null
+++ b/SOURCES/0019-curl-7.65.3-CVE-2019-5481.patch
@@ -0,0 +1,46 @@
+From 13de299b112a59c373b330f0539166ecc9a7627b Mon Sep 17 00:00:00 2001
+From: Daniel Stenberg <daniel@haxx.se>
+Date: Tue, 3 Sep 2019 22:59:32 +0200
+Subject: [PATCH] security:read_data fix bad realloc()
+
+... that could end up a double-free
+
+CVE-2019-5481
+Bug: https://curl.haxx.se/docs/CVE-2019-5481.html
+
+Upstream-commit: 9069838b30fb3b48af0123e39f664cea683254a5
+Signed-off-by: Kamil Dudka <kdudka@redhat.com>
+---
+ lib/security.c | 6 ++----
+ 1 file changed, 2 insertions(+), 4 deletions(-)
+
+diff --git a/lib/security.c b/lib/security.c
+index 550ea2d..c5e4e13 100644
+--- a/lib/security.c
++++ b/lib/security.c
+@@ -191,7 +191,6 @@ static CURLcode read_data(struct connectdata *conn,
+                           struct krb5buffer *buf)
+ {
+   int len;
+-  void *tmp = NULL;
+   CURLcode result;
+ 
+   result = socket_read(fd, &len, sizeof(len));
+@@ -201,12 +200,11 @@ static CURLcode read_data(struct connectdata *conn,
+   if(len) {
+     /* only realloc if there was a length */
+     len = ntohl(len);
+-    tmp = Curl_saferealloc(buf->data, len);
++    buf->data = Curl_saferealloc(buf->data, len);
+   }
+-  if(tmp == NULL)
++  if(!len || !buf->data)
+     return CURLE_OUT_OF_MEMORY;
+ 
+-  buf->data = tmp;
+   result = socket_read(fd, buf->data, len);
+   if(result)
+     return result;
+-- 
+2.20.1
+
diff --git a/SOURCES/0101-curl-7.32.0-multilib.patch b/SOURCES/0101-curl-7.32.0-multilib.patch
new file mode 100644
index 0000000..532980e
--- /dev/null
+++ b/SOURCES/0101-curl-7.32.0-multilib.patch
@@ -0,0 +1,89 @@
+From 2a4754a3a7cf60ecc36d83cbe50b8c337cb87632 Mon Sep 17 00:00:00 2001
+From: Kamil Dudka <kdudka@redhat.com>
+Date: Fri, 12 Apr 2013 12:04:05 +0200
+Subject: [PATCH] prevent multilib conflicts on the curl-config script
+
+---
+ curl-config.in     |   21 +++------------------
+ docs/curl-config.1 |    4 +++-
+ libcurl.pc.in      |    1 +
+ 3 files changed, 7 insertions(+), 19 deletions(-)
+
+diff --git a/curl-config.in b/curl-config.in
+index 150004d..95d0759 100644
+--- a/curl-config.in
++++ b/curl-config.in
+@@ -76,7 +76,7 @@ while test $# -gt 0; do
+         ;;
+ 
+     --cc)
+-        echo "@CC@"
++        echo "gcc"
+         ;;
+ 
+     --prefix)
+@@ -143,32 +143,17 @@ while test $# -gt 0; do
+         ;;
+ 
+     --libs)
+-        if test "X@libdir@" != "X/usr/lib" -a "X@libdir@" != "X/usr/lib64"; then
+-           CURLLIBDIR="-L@libdir@ "
+-        else
+-           CURLLIBDIR=""
+-        fi
+-        if test "X@REQUIRE_LIB_DEPS@" = "Xyes"; then
+-          echo ${CURLLIBDIR}-lcurl @LIBCURL_LIBS@
+-        else
+-          echo ${CURLLIBDIR}-lcurl
+-        fi
++        echo -lcurl
+         ;;
+     --ssl-backends)
+         echo "@SSL_BACKENDS@"
+         ;;
+ 
+     --static-libs)
+-        if test "X@ENABLE_STATIC@" != "Xno" ; then
+-          echo @libdir@/libcurl.@libext@ @LDFLAGS@ @LIBCURL_LIBS@
+-        else
+-          echo "curl was built with static libraries disabled" >&2
+-          exit 1
+-        fi
+         ;;
+ 
+     --configure)
+-        echo @CONFIGURE_OPTIONS@
++        pkg-config libcurl --variable=configure_options | sed 's/^"//;s/"$//'
+         ;;
+ 
+     *)
+diff --git a/docs/curl-config.1 b/docs/curl-config.1
+index 14a9d2b..ffcc004 100644
+--- a/docs/curl-config.1
++++ b/docs/curl-config.1
+@@ -70,7 +70,9 @@ no, one or several names. If more than one name, they will appear
+ comma-separated. (Added in 7.58.0)
+ .IP "--static-libs"
+ Shows the complete set of libs and other linker options you will need in order
+-to link your application with libcurl statically. (Added in 7.17.1)
++to link your application with libcurl statically. Note that Fedora/RHEL libcurl
++packages do not provide any static libraries, thus cannot be linked statically.
++(Added in 7.17.1)
+ .IP "--version"
+ Outputs version information about the installed libcurl.
+ .IP "--vernum"
+diff --git a/libcurl.pc.in b/libcurl.pc.in
+index 2ba9c39..f8f8b00 100644
+--- a/libcurl.pc.in
++++ b/libcurl.pc.in
+@@ -29,6 +29,7 @@ libdir=@libdir@
+ includedir=@includedir@
+ supported_protocols="@SUPPORT_PROTOCOLS@"
+ supported_features="@SUPPORT_FEATURES@"
++configure_options=@CONFIGURE_OPTIONS@
+ 
+ Name: libcurl
+ URL: https://curl.haxx.se/
+-- 
+2.5.0
+
diff --git a/SOURCES/0102-curl-7.36.0-debug.patch b/SOURCES/0102-curl-7.36.0-debug.patch
new file mode 100644
index 0000000..bbb253f
--- /dev/null
+++ b/SOURCES/0102-curl-7.36.0-debug.patch
@@ -0,0 +1,65 @@
+From 6710648c2b270c9ce68a7d9f1bba1222c7be8b58 Mon Sep 17 00:00:00 2001
+From: Kamil Dudka <kdudka@redhat.com>
+Date: Wed, 31 Oct 2012 11:38:30 +0100
+Subject: [PATCH] prevent configure script from discarding -g in CFLAGS (#496778)
+
+---
+ configure            |   13 +++----------
+ m4/curl-compilers.m4 |   13 +++----------
+ 2 files changed, 6 insertions(+), 20 deletions(-)
+
+diff --git a/configure b/configure
+index 8f079a3..53b4774 100755
+--- a/configure
++++ b/configure
+@@ -16414,18 +16414,11 @@ $as_echo "yes" >&6; }
+     gccvhi=`echo $gccver | cut -d . -f1`
+     gccvlo=`echo $gccver | cut -d . -f2`
+     compiler_num=`(expr $gccvhi "*" 100 + $gccvlo) 2>/dev/null`
+-    flags_dbg_all="-g -g0 -g1 -g2 -g3"
+-    flags_dbg_all="$flags_dbg_all -ggdb"
+-    flags_dbg_all="$flags_dbg_all -gstabs"
+-    flags_dbg_all="$flags_dbg_all -gstabs+"
+-    flags_dbg_all="$flags_dbg_all -gcoff"
+-    flags_dbg_all="$flags_dbg_all -gxcoff"
+-    flags_dbg_all="$flags_dbg_all -gdwarf-2"
+-    flags_dbg_all="$flags_dbg_all -gvms"
++    flags_dbg_all=""
+     flags_dbg_yes="-g"
+     flags_dbg_off=""
+-    flags_opt_all="-O -O0 -O1 -O2 -O3 -Os -Og -Ofast"
+-    flags_opt_yes="-O2"
++    flags_opt_all=""
++    flags_opt_yes=""
+     flags_opt_off="-O0"
+ 
+     OLDCPPFLAGS=$CPPFLAGS
+diff --git a/m4/curl-compilers.m4 b/m4/curl-compilers.m4
+index 0cbba7a..9175b5b 100644
+--- a/m4/curl-compilers.m4
++++ b/m4/curl-compilers.m4
+@@ -157,18 +157,11 @@ AC_DEFUN([CURL_CHECK_COMPILER_GNU_C], [
+     gccvhi=`echo $gccver | cut -d . -f1`
+     gccvlo=`echo $gccver | cut -d . -f2`
+     compiler_num=`(expr $gccvhi "*" 100 + $gccvlo) 2>/dev/null`
+-    flags_dbg_all="-g -g0 -g1 -g2 -g3"
+-    flags_dbg_all="$flags_dbg_all -ggdb"
+-    flags_dbg_all="$flags_dbg_all -gstabs"
+-    flags_dbg_all="$flags_dbg_all -gstabs+"
+-    flags_dbg_all="$flags_dbg_all -gcoff"
+-    flags_dbg_all="$flags_dbg_all -gxcoff"
+-    flags_dbg_all="$flags_dbg_all -gdwarf-2"
+-    flags_dbg_all="$flags_dbg_all -gvms"
++    flags_dbg_all=""
+     flags_dbg_yes="-g"
+     flags_dbg_off=""
+-    flags_opt_all="-O -O0 -O1 -O2 -O3 -Os -Og -Ofast"
+-    flags_opt_yes="-O2"
++    flags_opt_all=""
++    flags_opt_yes=""
+     flags_opt_off="-O0"
+     CURL_CHECK_DEF([_WIN32], [], [silent])
+   else
+-- 
+1.7.1
+
diff --git a/SOURCES/0103-curl-7.59.0-python3.patch b/SOURCES/0103-curl-7.59.0-python3.patch
new file mode 100644
index 0000000..f66b6c0
--- /dev/null
+++ b/SOURCES/0103-curl-7.59.0-python3.patch
@@ -0,0 +1,140 @@
+From bdba7b54224814055185513de1e7ff6619031553 Mon Sep 17 00:00:00 2001
+From: Kamil Dudka <kdudka@redhat.com>
+Date: Thu, 15 Mar 2018 13:21:40 +0100
+Subject: [PATCH 1/2] tests/http_pipe.py: migrate to Python 3
+
+---
+ tests/http_pipe.py | 4 ++--
+ tests/runtests.pl  | 2 +-
+ 2 files changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/tests/http_pipe.py b/tests/http_pipe.py
+index bc32173..75ac165 100755
+--- a/tests/http_pipe.py
++++ b/tests/http_pipe.py
+@@ -383,13 +383,13 @@ class PipelineRequestHandler(socketserver.BaseRequestHandler):
+           self.request.setblocking(True)
+           if not new_data:
+             return
+-          new_requests = self._request_parser.ParseAdditionalData(new_data)
++          new_requests = self._request_parser.ParseAdditionalData(new_data.decode('utf8'))
+           self._response_builder.QueueRequests(
+               new_requests, self._request_parser.were_all_requests_http_1_1)
+           self._num_queued += len(new_requests)
+           self._last_queued_time = time.time()
+         elif fileno in wlist:
+-          num_bytes_sent = self.request.send(self._send_buffer[0:4096])
++          num_bytes_sent = self.request.send(self._send_buffer[0:4096].encode('utf8'))
+           self._send_buffer = self._send_buffer[num_bytes_sent:]
+           time.sleep(0.05)
+ 
+diff --git a/tests/runtests.pl b/tests/runtests.pl
+index d6aa5ca..4d395ef 100755
+--- a/tests/runtests.pl
++++ b/tests/runtests.pl
+@@ -1439,7 +1439,7 @@ sub runhttpserver {
+     elsif($alt eq "pipe") {
+         # basically the same, but another ID
+         $idnum = 3;
+-        $exe = "python $srcdir/http_pipe.py";
++        $exe = "python3 $srcdir/http_pipe.py";
+         $verbose_flag .= "1 ";
+     }
+     elsif($alt eq "unix") {
+-- 
+2.14.3
+
+
+From 3c4c7340e455b7256c0786759422f34ec3e2d440 Mon Sep 17 00:00:00 2001
+From: Kamil Dudka <kdudka@redhat.com>
+Date: Thu, 15 Mar 2018 14:49:56 +0100
+Subject: [PATCH 2/2] tests/{negtelnet,smb}server.py: migrate to Python 3
+
+Unfortunately, smbserver.py does not work with Python 3 because
+there is no 'impacket' module available for Python 3:
+
+https://github.com/CoreSecurity/impacket/issues/61
+---
+ tests/negtelnetserver.py | 12 ++++++------
+ tests/smbserver.py       |  4 ++--
+ 2 files changed, 8 insertions(+), 8 deletions(-)
+
+diff --git a/tests/negtelnetserver.py b/tests/negtelnetserver.py
+index 8cfd409..72ee771 100755
+--- a/tests/negtelnetserver.py
++++ b/tests/negtelnetserver.py
+@@ -23,7 +23,7 @@ IDENT = "NTEL"
+ 
+ # The strings that indicate the test framework is checking our aliveness
+ VERIFIED_REQ = b"verifiedserver"
+-VERIFIED_RSP = b"WE ROOLZ: {pid}"
++VERIFIED_RSP = "WE ROOLZ: {pid}"
+ 
+ 
+ def telnetserver(options):
+@@ -34,7 +34,7 @@ def telnetserver(options):
+     if options.pidfile:
+         pid = os.getpid()
+         with open(options.pidfile, "w") as f:
+-            f.write(b"{0}".format(pid))
++            f.write("{0}".format(pid))
+ 
+     local_bind = (HOST, options.port)
+     log.info("Listening on %s", local_bind)
+@@ -73,11 +73,11 @@ class NegotiatingTelnetHandler(socketserver.BaseRequestHandler):
+                 response_data = VERIFIED_RSP.format(pid=os.getpid())
+             else:
+                 log.debug("Received normal request - echoing back")
+-                response_data = data.strip()
++                response_data = data.decode('utf8').strip()
+ 
+             if response_data:
+                 log.debug("Sending %r", response_data)
+-                self.request.sendall(response_data)
++                self.request.sendall(response_data.encode('utf8'))
+ 
+         except IOError:
+             log.exception("IOError hit during request")
+@@ -132,7 +132,7 @@ class Negotiator(object):
+         return buffer
+ 
+     def byte_to_int(self, byte):
+-        return struct.unpack(b'B', byte)[0]
++        return int(byte)
+ 
+     def no_neg(self, byte, byte_int, buffer):
+         # Not negotiating anything thus far. Check to see if we
+@@ -197,7 +197,7 @@ class Negotiator(object):
+         self.tcp.sendall(packed_message)
+ 
+     def pack(self, arr):
+-        return struct.pack(b'{0}B'.format(len(arr)), *arr)
++        return struct.pack('{0}B'.format(len(arr)), *arr)
+ 
+     def send_iac(self, arr):
+         message = [NegTokens.IAC]
+diff --git a/tests/smbserver.py b/tests/smbserver.py
+index 195ae39..b09cd44 100755
+--- a/tests/smbserver.py
++++ b/tests/smbserver.py
+@@ -24,7 +24,7 @@
+ from __future__ import (absolute_import, division, print_function)
+ # unicode_literals)
+ import argparse
+-import ConfigParser
++import configparser
+ import os
+ import sys
+ import logging
+@@ -58,7 +58,7 @@ def smbserver(options):
+             f.write("{0}".format(pid))
+ 
+     # Here we write a mini config for the server
+-    smb_config = ConfigParser.ConfigParser()
++    smb_config = configparser.ConfigParser()
+     smb_config.add_section("global")
+     smb_config.set("global", "server_name", "SERVICE")
+     smb_config.set("global", "server_os", "UNIX")
+-- 
+2.14.3
+
diff --git a/SOURCES/0104-curl-7.19.7-localhost6.patch b/SOURCES/0104-curl-7.19.7-localhost6.patch
new file mode 100644
index 0000000..4f664d3
--- /dev/null
+++ b/SOURCES/0104-curl-7.19.7-localhost6.patch
@@ -0,0 +1,51 @@
+diff --git a/tests/data/test1083 b/tests/data/test1083
+index e441278..b0958b6 100644
+--- a/tests/data/test1083
++++ b/tests/data/test1083
+@@ -33,13 +33,13 @@ ipv6
+ http-ipv6
+ </server>
+  <name>
+-HTTP-IPv6 GET with ip6-localhost --interface
++HTTP-IPv6 GET with localhost6 --interface
+  </name>
+  <command>
+--g "http://%HOST6IP:%HTTP6PORT/1083" --interface ip6-localhost
++-g "http://%HOST6IP:%HTTP6PORT/1083" --interface localhost6
+ </command>
+ <precheck>
+-perl -e "if ('%CLIENT6IP' ne '[::1]') {print 'Test requires default test server host address';} else {exec './server/resolve --ipv6 ip6-localhost'; print 'Cannot run precheck resolve';}"
++perl -e "if ('%CLIENT6IP' ne '[::1]') {print 'Test requires default test server host address';} else {exec './server/resolve --ipv6 localhost6'; print 'Cannot run precheck resolve';}"
+ </precheck>
+ </client>
+ 
+diff --git a/tests/data/test241 b/tests/data/test241
+index 46eae1f..4e1632c 100644
+--- a/tests/data/test241
++++ b/tests/data/test241
+@@ -30,13 +30,13 @@ ipv6
+ http-ipv6
+ </server>
+  <name>
+-HTTP-IPv6 GET (using ip6-localhost)
++HTTP-IPv6 GET (using localhost6)
+  </name>
+  <command>
+--g "http://ip6-localhost:%HTTP6PORT/241"
++-g "http://localhost6:%HTTP6PORT/241"
+ </command>
+ <precheck>
+-./server/resolve --ipv6 ip6-localhost
++./server/resolve --ipv6 localhost6
+ </precheck>
+ </client>
+ 
+@@ -48,7 +48,7 @@ HTTP-IPv6 GET (using ip6-localhost)
+ </strip>
+ <protocol>
+ GET /241 HTTP/1.1
+-Host: ip6-localhost:%HTTP6PORT
++Host: localhost6:%HTTP6PORT
+ Accept: */*
+ 
+ </protocol>
diff --git a/SPECS/curl.spec b/SPECS/curl.spec
new file mode 100644
index 0000000..707664f
--- /dev/null
+++ b/SPECS/curl.spec
@@ -0,0 +1,1751 @@
+Summary: A utility for getting files from remote servers (FTP, HTTP, and others)
+Name: curl
+Version: 7.61.1
+Release: 12%{?dist}
+License: MIT
+Source: https://curl.haxx.se/download/%{name}-%{version}.tar.xz
+
+# test320: update expected output for gnutls-3.6.4
+Patch1:   0001-curl-7.61.1-test320-gnutls.patch
+
+# update the documentation of --tlsv1.0 in curl(1) man page (#1620217)
+Patch2:   0002-curl-7.61.1-tlsv1.0-man.patch
+
+# enable TLS 1.3 post-handshake auth in OpenSSL (#1636900)
+Patch3:   0003-curl-7.61.1-TLS-1.3-PHA.patch
+
+# fix bad arethmetic when outputting warnings to stderr (CVE-2018-16842)
+Patch4:   0004-curl-7.61.1-CVE-2018-16842.patch
+# we need `git apply` to apply this patch
+BuildRequires: git
+
+# fix use-after-free in handle close (CVE-2018-16840)
+Patch5:   0005-curl-7.61.1-CVE-2018-16840.patch
+
+# SASL password overflow via integer overflow (CVE-2018-16839)
+Patch6:   0006-curl-7.61.1-CVE-2018-16839.patch
+
+# curl -J: do not append to the destination file (#1660827)
+Patch7:   0007-curl-7.63.0-JO-preserve-local-file.patch
+
+# xattr: strip credentials from any URL that is stored (CVE-2018-20483)
+Patch8:   0008-curl-7.61.1-CVE-2018-20483.patch
+
+# fix NTLM type-2 out-of-bounds buffer read (CVE-2018-16890)
+Patch9:   0009-curl-7.61.1-CVE-2018-16890.patch
+
+# fix NTLMv2 type-3 header stack buffer overflow (CVE-2019-3822)
+Patch10:  0010-curl-7.61.1-CVE-2019-3822.patch
+
+# fix SMTP end-of-response out-of-bounds read (CVE-2019-3823)
+Patch11:  0011-curl-7.61.1-CVE-2019-3823.patch
+
+# do not let libssh create a new socket for SCP/SFTP (#1669156)
+Patch14:  0014-curl-7.61.1-libssh-socket.patch
+
+# fix TFTP receive buffer overflow (CVE-2019-5436)
+Patch17:  0017-curl-7.64.0-CVE-2019-5436.patch
+
+# fix heap buffer overflow in function tftp_receive_packet() (CVE-2019-5482)
+Patch18:  0018-curl-7.65.3-CVE-2019-5482.patch
+
+# double free due to subsequent call of realloc() (CVE-2019-5481)
+Patch19:  0019-curl-7.65.3-CVE-2019-5481.patch
+
+# patch making libcurl multilib ready
+Patch101: 0101-curl-7.32.0-multilib.patch
+
+# prevent configure script from discarding -g in CFLAGS (#496778)
+Patch102: 0102-curl-7.36.0-debug.patch
+
+# migrate tests/http_pipe.py to Python 3
+Patch103: 0103-curl-7.59.0-python3.patch
+
+# use localhost6 instead of ip6-localhost in the curl test-suite
+Patch104: 0104-curl-7.19.7-localhost6.patch
+
+Provides: curl-full = %{version}-%{release}
+Provides: webclient
+URL: https://curl.haxx.se/
+BuildRequires: automake
+BuildRequires: brotli-devel
+BuildRequires: coreutils
+BuildRequires: gcc
+BuildRequires: groff
+BuildRequires: krb5-devel
+BuildRequires: libidn2-devel
+BuildRequires: libmetalink-devel
+BuildRequires: libnghttp2-devel
+BuildRequires: libpsl-devel
+BuildRequires: libssh-devel
+BuildRequires: make
+BuildRequires: openldap-devel
+BuildRequires: openssh-clients
+BuildRequires: openssh-server
+BuildRequires: openssl-devel
+BuildRequires: pkgconfig
+BuildRequires: python3-devel
+BuildRequires: sed
+BuildRequires: stunnel
+BuildRequires: zlib-devel
+
+# needed to compress content of tool_hugehelp.c after changing curl.1 man page
+BuildRequires: perl(IO::Compress::Gzip)
+
+# gnutls-serv is used by the upstream test-suite
+BuildRequires: gnutls-utils
+
+# nghttpx (an HTTP/2 proxy) is used by the upstream test-suite
+BuildRequires: nghttp2
+
+# perl modules used in the test suite
+BuildRequires: perl(Cwd)
+BuildRequires: perl(Digest::MD5)
+BuildRequires: perl(Exporter)
+BuildRequires: perl(File::Basename)
+BuildRequires: perl(File::Copy)
+BuildRequires: perl(File::Spec)
+BuildRequires: perl(IPC::Open2)
+BuildRequires: perl(MIME::Base64)
+BuildRequires: perl(strict)
+BuildRequires: perl(Time::Local)
+BuildRequires: perl(Time::HiRes)
+BuildRequires: perl(warnings)
+BuildRequires: perl(vars)
+
+# The test-suite runs automatically through valgrind if valgrind is available
+# on the system.  By not installing valgrind into mock's chroot, we disable
+# this feature for production builds on architectures where valgrind is known
+# to be less reliable, in order to avoid unnecessary build failures (see RHBZ
+# #810992, #816175, and #886891).  Nevertheless developers are free to install
+# valgrind manually to improve test coverage on any architecture.
+%ifarch x86_64 %{ix86}
+BuildRequires: valgrind
+%endif
+
+# using an older version of libcurl could result in CURLE_UNKNOWN_OPTION
+Requires: libcurl%{?_isa} >= %{version}-%{release}
+
+# require at least the version of libpsl that we were built against,
+# to ensure that we have the necessary symbols available (#1631804)
+%global libpsl_version %(pkg-config --modversion libpsl 2>/dev/null || echo 0)
+
+# require at least the version of libssh that we were built against,
+# to ensure that we have the necessary symbols available (#525002, #642796)
+%global libssh_version %(pkg-config --modversion libssh 2>/dev/null || echo 0)
+
+# require at least the version of openssl-libs that we were built against,
+# to ensure that we have the necessary symbols available (#1462184, #1462211)
+%global openssl_version %(pkg-config --modversion openssl 2>/dev/null || echo 0)
+
+%description
+curl is a command line tool for transferring data with URL syntax, supporting
+FTP, FTPS, HTTP, HTTPS, SCP, SFTP, TFTP, TELNET, DICT, LDAP, LDAPS, FILE, IMAP,
+SMTP, POP3 and RTSP.  curl supports SSL certificates, HTTP POST, HTTP PUT, FTP
+uploading, HTTP form based upload, proxies, cookies, user+password
+authentication (Basic, Digest, NTLM, Negotiate, kerberos...), file transfer
+resume, proxy tunneling and a busload of other useful tricks. 
+
+%package -n libcurl
+Summary: A library for getting files from web servers
+Requires: libpsl%{?_isa} >= %{libpsl_version}
+Requires: libssh%{?_isa} >= %{libssh_version}
+Requires: openssl-libs%{?_isa} >= 1:%{openssl_version}
+Provides: libcurl-full = %{version}-%{release}
+Provides: libcurl-full%{?_isa} = %{version}-%{release}
+
+%description -n libcurl
+libcurl is a free and easy-to-use client-side URL transfer library, supporting
+FTP, FTPS, HTTP, HTTPS, SCP, SFTP, TFTP, TELNET, DICT, LDAP, LDAPS, FILE, IMAP,
+SMTP, POP3 and RTSP. libcurl supports SSL certificates, HTTP POST, HTTP PUT,
+FTP uploading, HTTP form based upload, proxies, cookies, user+password
+authentication (Basic, Digest, NTLM, Negotiate, Kerberos4), file transfer
+resume, http proxy tunneling and more.
+
+%package -n libcurl-devel
+Summary: Files needed for building applications with libcurl
+Requires: libcurl%{?_isa} = %{version}-%{release}
+
+Provides: curl-devel = %{version}-%{release}
+Provides: curl-devel%{?_isa} = %{version}-%{release}
+Obsoletes: curl-devel < %{version}-%{release}
+
+%description -n libcurl-devel
+The libcurl-devel package includes header files and libraries necessary for
+developing programs which use the libcurl library. It contains the API
+documentation of the library, too.
+
+%package -n curl-minimal
+Summary: Conservatively configured build of curl for minimal installations
+Provides: curl = %{version}-%{release}
+Conflicts: curl
+RemovePathPostfixes: .minimal
+
+# using an older version of libcurl could result in CURLE_UNKNOWN_OPTION
+Requires: libcurl%{?_isa} >= %{version}-%{release}
+
+%description -n curl-minimal
+This is a replacement of the 'curl' package for minimal installations.  It
+comes with a limited set of features compared to the 'curl' package.  On the
+other hand, the package is smaller and requires fewer run-time dependencies to
+be installed.
+
+%package -n libcurl-minimal
+Summary: Conservatively configured build of libcurl for minimal installations
+Requires: openssl-libs%{?_isa} >= 1:%{openssl_version}
+Provides: libcurl = %{version}-%{release}
+Provides: libcurl%{?_isa} = %{version}-%{release}
+Conflicts: libcurl
+RemovePathPostfixes: .minimal
+# needed for RemovePathPostfixes to work with shared libraries
+%undefine __brp_ldconfig
+
+%description -n libcurl-minimal
+This is a replacement of the 'libcurl' package for minimal installations.  It
+comes with a limited set of features compared to the 'libcurl' package.  On the
+other hand, the package is smaller and requires fewer run-time dependencies to
+be installed.
+
+%prep
+%setup -q
+
+# upstream patches
+%patch1 -p1
+%patch2 -p1
+%patch3 -p1
+git init
+git apply %{PATCH4}
+%patch5 -p1
+%patch6 -p1
+%patch7 -p1
+%patch8 -p1
+%patch9 -p1
+%patch10 -p1
+%patch11 -p1
+%patch14 -p1
+
+# Fedora patches
+%patch101 -p1
+%patch102 -p1
+%patch103 -p1
+%patch104 -p1
+
+# upstream patches
+%patch17 -p1
+%patch18 -p1
+%patch19 -p1
+
+# make tests/*.py use Python 3
+sed -e '1 s|^#!/.*python|#!%{__python3}|' -i tests/*.py
+
+# regenerate Makefile.in files
+aclocal -I m4
+automake
+
+# disable test 1112 (#565305), test 1455 (occasionally fails with 'bind failed
+# with errno 98: Address already in use' in Koji environment), and test 1801
+# <https://github.com/bagder/curl/commit/21e82bd6#commitcomment-12226582>
+# and test 1900, which is flaky and covers a deprecated feature of libcurl
+# <https://github.com/curl/curl/pull/2705>
+printf "1112\n1455\n1801\n1900\n" >> tests/data/DISABLED
+
+# disable test 1319 on ppc64 (server times out)
+%ifarch ppc64
+echo "1319" >> tests/data/DISABLED
+%endif
+
+# temporarily disable test 582 on s390x (client times out)
+%ifarch s390x
+echo "582" >> tests/data/DISABLED
+%endif
+
+# adapt test 323 for updated OpenSSL
+sed -e 's/^35$/35,52/' -i tests/data/test323
+
+%build
+mkdir build-{full,minimal}
+export common_configure_opts=" \
+    --cache-file=../config.cache \
+    --disable-static \
+    --enable-symbol-hiding \
+    --enable-ipv6 \
+    --enable-threaded-resolver \
+    --with-gssapi \
+    --with-nghttp2 \
+    --with-ssl --with-ca-bundle=%{_sysconfdir}/pki/tls/certs/ca-bundle.crt"
+
+%global _configure ../configure
+
+# configure minimal build
+(
+    cd build-minimal
+    %configure $common_configure_opts \
+        --disable-ldap \
+        --disable-ldaps \
+        --disable-manual \
+        --without-brotli \
+        --without-libidn2 \
+        --without-libmetalink \
+        --without-libpsl \
+        --without-libssh
+)
+
+# configure full build
+(
+    cd build-full
+    %configure $common_configure_opts \
+        --enable-ldap \
+        --enable-ldaps \
+        --enable-manual \
+        --with-brotli \
+        --with-libidn2 \
+        --with-libmetalink \
+        --with-libpsl \
+        --with-libssh
+)
+
+# avoid using rpath
+sed -e 's/^runpath_var=.*/runpath_var=/' \
+    -e 's/^hardcode_libdir_flag_spec=".*"$/hardcode_libdir_flag_spec=""/' \
+    -i build-{full,minimal}/libtool
+
+make %{?_smp_mflags} V=1 -C build-minimal
+make %{?_smp_mflags} V=1 -C build-full
+
+%check
+# we have to override LD_LIBRARY_PATH because we eliminated rpath
+LD_LIBRARY_PATH="$RPM_BUILD_ROOT%{_libdir}:$LD_LIBRARY_PATH"
+export LD_LIBRARY_PATH
+
+# compile upstream test-cases
+cd build-full/tests
+make %{?_smp_mflags} V=1
+
+# relax crypto policy for the test-suite to make it pass again (#1611712)
+export OPENSSL_SYSTEM_CIPHERS_OVERRIDE=XXX
+export OPENSSL_CONF=
+
+# run the upstream test-suite
+srcdir=../../tests perl -I../../tests ../../tests/runtests.pl -a -p -v '!flaky'
+
+%install
+# install and rename the library that will be packaged as libcurl-minimal
+make DESTDIR=$RPM_BUILD_ROOT INSTALL="install -p" install -C build-minimal/lib
+rm -f ${RPM_BUILD_ROOT}%{_libdir}/libcurl.{la,so}
+for i in ${RPM_BUILD_ROOT}%{_libdir}/*; do
+    mv -v $i $i.minimal
+done
+
+# install and rename the executable that will be packaged as curl-minimal
+make DESTDIR=$RPM_BUILD_ROOT INSTALL="install -p" install -C build-minimal/src
+mv -v ${RPM_BUILD_ROOT}%{_bindir}/curl{,.minimal}
+
+# install libcurl.m4
+install -d $RPM_BUILD_ROOT%{_datadir}/aclocal
+install -m 644 docs/libcurl/libcurl.m4 $RPM_BUILD_ROOT%{_datadir}/aclocal
+
+# install the executable and library that will be packaged as curl and libcurl
+cd build-full
+make DESTDIR=$RPM_BUILD_ROOT INSTALL="install -p" install
+
+# install zsh completion for curl
+# (we have to override LD_LIBRARY_PATH because we eliminated rpath)
+LD_LIBRARY_PATH="$RPM_BUILD_ROOT%{_libdir}:$LD_LIBRARY_PATH" \
+    make DESTDIR=$RPM_BUILD_ROOT INSTALL="install -p" install -C scripts
+
+rm -f ${RPM_BUILD_ROOT}%{_libdir}/libcurl.la
+
+%ldconfig_scriptlets -n libcurl
+
+%ldconfig_scriptlets -n libcurl-minimal
+
+%files
+%doc CHANGES README*
+%doc docs/BUGS docs/FAQ docs/FEATURES
+%doc docs/MANUAL docs/RESOURCES
+%doc docs/TheArtOfHttpScripting docs/TODO
+%{_bindir}/curl
+%{_mandir}/man1/curl.1*
+%{_datadir}/zsh/site-functions
+
+%files -n libcurl
+%license COPYING
+%{_libdir}/libcurl.so.4
+%{_libdir}/libcurl.so.4.[0-9].[0-9]
+
+%files -n libcurl-devel
+%doc docs/examples/*.c docs/examples/Makefile.example docs/INTERNALS.md
+%doc docs/CONTRIBUTE.md docs/libcurl/ABI
+%{_bindir}/curl-config*
+%{_includedir}/curl
+%{_libdir}/*.so
+%{_libdir}/pkgconfig/*.pc
+%{_mandir}/man1/curl-config.1*
+%{_mandir}/man3/*
+%{_datadir}/aclocal/libcurl.m4
+
+%files -n curl-minimal
+%{_bindir}/curl.minimal
+%{_mandir}/man1/curl.1*
+
+%files -n libcurl-minimal
+%license COPYING
+%{_libdir}/libcurl.so.4.minimal
+%{_libdir}/libcurl.so.4.[0-9].[0-9].minimal
+
+%changelog
+* Wed Sep 11 2019 Kamil Dudka <kdudka@redhat.com> - 7.61.1-12
+- double free due to subsequent call of realloc() (CVE-2019-5481)
+- fix heap buffer overflow in function tftp_receive_packet() (CVE-2019-5482)
+- fix TFTP receive buffer overflow (CVE-2019-5436)
+
+* Mon May 13 2019 Kamil Dudka <kdudka@redhat.com> - 7.61.1-11
+- rebuild with updated annobin to prevent Execshield RPMDiff check from failing
+
+* Fri May 10 2019 Kamil Dudka <kdudka@redhat.com> - 7.61.1-10
+- fix SMTP end-of-response out-of-bounds read (CVE-2019-3823)
+- fix NTLMv2 type-3 header stack buffer overflow (CVE-2019-3822)
+- fix NTLM type-2 out-of-bounds buffer read (CVE-2018-16890)
+- xattr: strip credentials from any URL that is stored (CVE-2018-20483)
+
+* Mon Feb 18 2019 Kamil Dudka <kdudka@redhat.com> - 7.61.1-9
+- do not let libssh create a new socket for SCP/SFTP (#1669156)
+
+* Fri Jan 11 2019 Kamil Dudka <kdudka@redhat.com> - 7.61.1-8
+- curl -J: do not append to the destination file (#1660827)
+
+* Thu Nov 15 2018 Kamil Dudka <kdudka@redhat.com> - 7.61.1-7
+- make the patch for CVE-2018-16842 apply properly (CVE-2018-16842)
+
+* Mon Nov 05 2018 Kamil Dudka <kdudka@redhat.com> - 7.61.1-6
+- SASL password overflow via integer overflow (CVE-2018-16839)
+- fix use-after-free in handle close (CVE-2018-16840)
+- fix bad arethmetic when outputting warnings to stderr (CVE-2018-16842)
+
+* Thu Oct 11 2018 Kamil Dudka <kdudka@redhat.com> - 7.61.1-5
+- enable TLS 1.3 post-handshake auth in OpenSSL (#1636900)
+
+* Mon Oct 08 2018 Kamil Dudka <kdudka@redhat.com> - 7.61.1-4
+- make the built-in manual compressed again (#1620217)
+
+* Mon Oct 08 2018 Kamil Dudka <kdudka@redhat.com> - 7.61.1-3
+- update the documentation of --tlsv1.0 in curl(1) man page (#1620217)
+
+* Thu Oct 04 2018 Kamil Dudka <kdudka@redhat.com> - 7.61.1-2
+- enforce versioned libpsl dependency for libcurl (#1631804)
+
+* Thu Oct 04 2018 Kamil Dudka <kdudka@redhat.com> - 7.61.1-1
+- test320: update expected output for gnutls-3.6.4
+- new upstream release (#1625677)
+
+* Thu Aug 09 2018 Kamil Dudka <kdudka@redhat.com> - 7.61.0-5
+- ssl: set engine implicitly when a PKCS#11 URI is provided (#1219544)
+
+* Tue Aug 07 2018 Kamil Dudka <kdudka@redhat.com> - 7.61.0-4
+- relax crypto policy for the test-suite to make it pass again (#1611712)
+
+* Tue Jul 31 2018 Kamil Dudka <kdudka@redhat.com> - 7.61.0-3
+- disable flaky test 1900, which covers deprecated HTTP pipelining
+- adapt test 323 for updated OpenSSL
+
+* Tue Jul 17 2018 Kamil Dudka <kdudka@redhat.com> - 7.61.0-2
+- rebuild against against brotli-1.0.5
+
+* Wed Jul 11 2018 Kamil Dudka <kdudka@redhat.com> - 7.61.0-1
+- new upstream release, which fixes the following vulnerability
+    CVE-2018-0500 - SMTP send heap buffer overflow
+
+* Tue Jul 10 2018 Kamil Dudka <kdudka@redhat.com> - 7.60.0-3
+- enable support for brotli compression in libcurl-full
+
+* Wed Jul 04 2018 Kamil Dudka <kdudka@redhat.com> - 7.60.0-2
+- do not hard-wire path of the Python 3 interpreter
+
+* Wed May 16 2018 Kamil Dudka <kdudka@redhat.com> - 7.60.0-1
+- new upstream release, which fixes the following vulnerabilities
+    CVE-2018-1000300 - FTP shutdown response buffer overflow
+    CVE-2018-1000301 - RTSP bad headers buffer over-read
+
+* Thu Mar 15 2018 Kamil Dudka <kdudka@redhat.com> - 7.59.0-3
+- make the test-suite use Python 3
+
+* Wed Mar 14 2018 Kamil Dudka <kdudka@redhat.com> - 7.59.0-2
+- ftp: fix typo in recursive callback detection for seeking
+
+* Wed Mar 14 2018 Kamil Dudka <kdudka@redhat.com> - 7.59.0-1
+- new upstream release, which fixes the following vulnerabilities
+    CVE-2018-1000120 - FTP path trickery leads to NIL byte out of bounds write
+    CVE-2018-1000121 - LDAP NULL pointer dereference
+    CVE-2018-1000122 - RTSP RTP buffer over-read
+
+* Mon Mar 12 2018 Kamil Dudka <kdudka@redhat.com> - 7.58.0-8
+- http2: mark the connection for close on GOAWAY
+
+* Mon Feb 19 2018 Paul Howarth <paul@city-fan.org> - 7.58.0-7
+- Add explicity-used build requirements
+- Fix libcurl soname version number in %%files list to avoid accidental soname
+  bumps
+
+* Thu Feb 15 2018 Paul Howarth <paul@city-fan.org> - 7.58.0-6
+- switch to %%ldconfig_scriptlets
+- drop legacy BuildRoot: and Group: tags
+- enforce versioned libssh dependency for libcurl
+
+* Tue Feb 13 2018 Kamil Dudka <kdudka@redhat.com> - 7.58.0-5
+- drop temporary workaround for #1540549
+
+* Wed Feb 07 2018 Fedora Release Engineering <releng@fedoraproject.org> - 7.58.0-4
+- Rebuilt for https://fedoraproject.org/wiki/Fedora_28_Mass_Rebuild
+
+* Wed Jan 31 2018 Kamil Dudka <kdudka@redhat.com> - 7.58.0-3
+- temporarily work around internal compiler error on x86_64 (#1540549)
+- disable brp-ldconfig to make RemovePathPostfixes work with shared libs again
+
+* Wed Jan 24 2018 Andreas Schneider <asn@redhat.com> - 7.58.0-2
+- use libssh (instead of libssh2) to implement SCP/SFTP in libcurl (#1531483)
+
+* Wed Jan 24 2018 Kamil Dudka <kdudka@redhat.com> - 7.58.0-1
+- new upstream release, which fixes the following vulnerabilities
+    CVE-2018-1000005 - curl: HTTP/2 trailer out-of-bounds read
+    CVE-2018-1000007 - curl: HTTP authentication leak in redirects
+
+* Wed Nov 29 2017 Kamil Dudka <kdudka@redhat.com> - 7.57.0-1
+- new upstream release, which fixes the following vulnerabilities
+    CVE-2017-8816 - curl: NTLM buffer overflow via integer overflow
+    CVE-2017-8817 - curl: FTP wildcard out of bounds read
+    CVE-2017-8818 - curl: SSL out of buffer access
+
+* Mon Oct 23 2017 Kamil Dudka <kdudka@redhat.com> - 7.56.1-1
+- new upstream release (fixes CVE-2017-1000257)
+
+* Wed Oct 04 2017 Kamil Dudka <kdudka@redhat.com> - 7.56.0-1
+- new upstream release (fixes CVE-2017-1000254)
+
+* Mon Aug 28 2017 Kamil Dudka <kdudka@redhat.com> - 7.55.1-5
+- apply the patch for the previous commit and fix its name (#1485702)
+
+* Mon Aug 28 2017 Bastien Nocera <bnocera@redhat.com> - 7.55.1-4
+- Fix NetworkManager connectivity check not working (#1485702)
+
+* Tue Aug 22 2017 Kamil Dudka <kdudka@redhat.com> 7.55.1-3
+- utilize system wide crypto policies for TLS (#1483972)
+
+* Tue Aug 15 2017 Kamil Dudka <kdudka@redhat.com> 7.55.1-2
+- make zsh completion work again
+
+* Mon Aug 14 2017 Kamil Dudka <kdudka@redhat.com> 7.55.1-1
+- new upstream release
+
+* Wed Aug 09 2017 Kamil Dudka <kdudka@redhat.com> 7.55.0-1
+- drop multilib fix for libcurl header files no longer needed
+- new upstream release, which fixes the following vulnerabilities
+    CVE-2017-1000099 - FILE buffer read out of bounds
+    CVE-2017-1000100 - TFTP sends more than buffer size
+    CVE-2017-1000101 - URL globbing out of bounds read
+
+* Wed Aug 02 2017 Fedora Release Engineering <releng@fedoraproject.org> - 7.54.1-8
+- Rebuilt for https://fedoraproject.org/wiki/Fedora_27_Binutils_Mass_Rebuild
+
+* Fri Jul 28 2017 Florian Weimer <fweimer@redhat.com> - 7.54.1-7
+- Rebuild with fixed binutils (#1475636)
+
+* Fri Jul 28 2017 Igor Gnatenko <ignatenkobrain@fedoraproject.org> - 7.54.1-6
+- Enable separate debuginfo back
+
+* Thu Jul 27 2017 Kamil Dudka <kdudka@redhat.com> 7.54.1-5
+- rebuild to fix broken linkage of cmake on ppc64le
+
+* Wed Jul 26 2017 Kamil Dudka <kdudka@redhat.com> 7.54.1-4
+- avoid build failure caused broken RPM code that produces debuginfo packages
+
+* Wed Jul 26 2017 Fedora Release Engineering <releng@fedoraproject.org> - 7.54.1-3
+- Rebuilt for https://fedoraproject.org/wiki/Fedora_27_Mass_Rebuild
+
+* Mon Jun 19 2017 Kamil Dudka <kdudka@redhat.com> 7.54.1-2
+- enforce versioned openssl-libs dependency for libcurl (#1462184)
+
+* Wed Jun 14 2017 Kamil Dudka <kdudka@redhat.com> 7.54.1-1
+- new upstream release
+
+* Tue May 16 2017 Kamil Dudka <kdudka@redhat.com> 7.54.0-5
+- add *-full provides for curl and libcurl to make them explicitly installable
+
+* Thu May 04 2017 Kamil Dudka <kdudka@redhat.com> 7.54.0-4
+- make curl-minimal require a new enough version of libcurl
+
+* Thu Apr 27 2017 Kamil Dudka <kdudka@redhat.com> 7.54.0-3
+- switch the TLS backend back to OpenSSL (#1445153)
+
+* Tue Apr 25 2017 Kamil Dudka <kdudka@redhat.com> 7.54.0-2
+- nss: use libnssckbi.so as the default source of trust
+- nss: do not leak PKCS #11 slot while loading a key (#1444860)
+
+* Thu Apr 20 2017 Kamil Dudka <kdudka@redhat.com> 7.54.0-1
+- new upstream release (fixes CVE-2017-7468)
+
+* Thu Apr 13 2017 Paul Howarth <paul@city-fan.org> 7.53.1-7
+- add %%post and %%postun scriptlets for libcurl-minimal
+- libcurl-minimal provides both libcurl and libcurl%%{?_isa}
+- remove some legacy spec file cruft
+
+* Wed Apr 12 2017 Kamil Dudka <kdudka@redhat.com> 7.53.1-6
+- provide (lib)curl-minimal subpackages with lightweight build of (lib)curl
+
+* Mon Apr 10 2017 Kamil Dudka <kdudka@redhat.com> 7.53.1-5
+- disable upstream test 2033 (flaky test for HTTP/1 pipelining)
+
+* Fri Apr 07 2017 Kamil Dudka <kdudka@redhat.com> 7.53.1-4
+- fix out of bounds read in curl --write-out (CVE-2017-7407)
+
+* Mon Mar 06 2017 Kamil Dudka <kdudka@redhat.com> 7.53.1-3
+- make the dependency on nss-pem arch-specific (#1428550)
+
+* Thu Mar 02 2017 Kamil Dudka <kdudka@redhat.com> 7.53.1-2
+- re-enable valgrind on ix86 because sqlite is fixed (#1428286)
+
+* Fri Feb 24 2017 Kamil Dudka <kdudka@redhat.com> 7.53.1-1
+- new upstream release
+
+* Wed Feb 22 2017 Kamil Dudka <kdudka@redhat.com> 7.53.0-1
+- do not use valgrind on ix86 until sqlite is rebuilt by patched GCC (#1423434)
+- new upstream release (fixes CVE-2017-2629)
+
+* Fri Feb 10 2017 Fedora Release Engineering <releng@fedoraproject.org> - 7.52.1-2
+- Rebuilt for https://fedoraproject.org/wiki/Fedora_26_Mass_Rebuild
+
+* Fri Dec 23 2016 Kamil Dudka <kdudka@redhat.com> 7.52.1-1
+- new upstream release (fixes CVE-2016-9586)
+
+* Mon Nov 21 2016 Kamil Dudka <kdudka@redhat.com> 7.51.0-3
+- map CURL_SSLVERSION_DEFAULT to NSS default, add support for TLS 1.3 (#1396719)
+
+* Tue Nov 15 2016 Kamil Dudka <kdudka@redhat.com> 7.51.0-2
+- stricter host name checking for file:// URLs
+- ssh: check md5 fingerprints case insensitively
+
+* Wed Nov 02 2016 Kamil Dudka <kdudka@redhat.com> 7.51.0-1
+- temporarily disable failing libidn2 test-cases
+- new upstream release, which fixes the following vulnerabilities
+    CVE-2016-8615 - Cookie injection for other servers
+    CVE-2016-8616 - Case insensitive password comparison
+    CVE-2016-8617 - Out-of-bounds write via unchecked multiplication
+    CVE-2016-8618 - Double-free in curl_maprintf
+    CVE-2016-8619 - Double-free in krb5 code
+    CVE-2016-8620 - Glob parser write/read out of bounds
+    CVE-2016-8621 - curl_getdate out-of-bounds read
+    CVE-2016-8622 - URL unescape heap overflow via integer truncation
+    CVE-2016-8623 - Use-after-free via shared cookies
+    CVE-2016-8624 - Invalid URL parsing with '#'
+    CVE-2016-8625 - IDNA 2003 makes curl use wrong host
+
+* Thu Oct 20 2016 Kamil Dudka <kdudka@redhat.com> 7.50.3-3
+- drop 0103-curl-7.50.0-stunnel.patch no longer needed
+
+* Fri Oct 07 2016 Kamil Dudka <kdudka@redhat.com> 7.50.3-2
+- use the just built version of libcurl while generating zsh completion
+
+* Wed Sep 14 2016 Kamil Dudka <kdudka@redhat.com> 7.50.3-1
+- new upstream release (fixes CVE-2016-7167)
+
+* Wed Sep 07 2016 Kamil Dudka <kdudka@redhat.com> 7.50.2-1
+- new upstream release
+
+* Fri Aug 26 2016 Kamil Dudka <kdudka@redhat.com> 7.50.1-2
+- work around race condition in PK11_FindSlotByName()
+- fix incorrect use of a previously loaded certificate from file
+  (related to CVE-2016-5420)
+
+* Wed Aug 03 2016 Kamil Dudka <kdudka@redhat.com> 7.50.1-1
+- new upstream release (fixes CVE-2016-5419, CVE-2016-5420, and CVE-2016-5421)
+
+* Tue Jul 26 2016 Kamil Dudka <kdudka@redhat.com> 7.50.0-2
+- run HTTP/2 tests on all architectures (#1360319 now worked around in nghttp2)
+
+* Thu Jul 21 2016 Kamil Dudka <kdudka@redhat.com> 7.50.0-1
+- run HTTP/2 tests only on Intel for now to work around #1358845
+- require nss-pem because it is no longer included in the nss package (#1347336)
+- fix HTTPS and FTPS tests (work around stunnel bug #1358810)
+- new upstream release
+
+* Fri Jun 17 2016 Kamil Dudka <kdudka@redhat.com> 7.49.1-3
+- use multilib-rpm-config to install arch-dependent header files
+
+* Fri Jun 03 2016 Kamil Dudka <kdudka@redhat.com> 7.49.1-2
+- fix SIGSEGV of the curl tool while parsing URL with too many globs (#1340757)
+
+* Mon May 30 2016 Kamil Dudka <kdudka@redhat.com> 7.49.1-1
+- new upstream release
+
+* Wed May 18 2016 Kamil Dudka <kdudka@redhat.com> 7.49.0-1
+- new upstream release
+
+* Wed Mar 23 2016 Kamil Dudka <kdudka@redhat.com> 7.48.0-1
+- new upstream release
+
+* Wed Mar 02 2016 Kamil Dudka <kdudka@redhat.com> 7.47.1-4
+- do not refuse cookies for localhost (#1308791)
+
+* Wed Feb 17 2016 Kamil Dudka <kdudka@redhat.com> 7.47.1-3
+- make SCP and SFTP test-cases work with up2date OpenSSH
+
+* Wed Feb 10 2016 Kamil Dudka <kdudka@redhat.com> 7.47.1-2
+- enable support for Public Suffix List (#1305701)
+
+* Mon Feb 08 2016 Kamil Dudka <kdudka@redhat.com> 7.47.1-1
+- new upstream release
+
+* Wed Feb 03 2016 Fedora Release Engineering <releng@fedoraproject.org> - 7.47.0-2
+- Rebuilt for https://fedoraproject.org/wiki/Fedora_24_Mass_Rebuild
+
+* Wed Jan 27 2016 Kamil Dudka <kdudka@redhat.com> 7.47.0-1
+- new upstream release (fixes CVE-2016-0755)
+
+* Fri Dec  4 2015 Kamil Dudka <kdudka@redhat.com> 7.46.0-2
+- own /usr/share/zsh/site-functions instead of requiring zsh (#1288529)
+
+* Wed Dec  2 2015 Kamil Dudka <kdudka@redhat.com> 7.46.0-1
+- disable silent builds (suggested by Paul Howarth)
+- use default port numbers when running the upstream test-suite
+- install zsh completion script
+- new upstream release
+
+* Wed Oct  7 2015 Paul Howarth <paul@city-fan.org> 7.45.0-1
+- new upstream release
+- drop %%defattr, redundant since rpm 4.4
+
+* Fri Sep 18 2015 Kamil Dudka <kdudka@redhat.com> 7.44.0-2
+- prevent NSS from incorrectly re-using a session (#1104597)
+
+* Wed Aug 12 2015 Kamil Dudka <kdudka@redhat.com> 7.44.0-1
+- new upstream release
+
+* Thu Jul 30 2015 Kamil Dudka <kdudka@redhat.com> 7.43.0-3
+- prevent dnf from crashing when using both FTP and HTTP (#1248389)
+
+* Thu Jul 16 2015 Kamil Dudka <kdudka@redhat.com> 7.43.0-2
+- build support for the HTTP/2 protocol
+
+* Wed Jun 17 2015 Kamil Dudka <kdudka@redhat.com> 7.43.0-1
+- new upstream release (fixes CVE-2015-3236 and CVE-2015-3237)
+
+* Wed Jun 17 2015 Fedora Release Engineering <rel-eng@lists.fedoraproject.org> - 7.42.1-3
+- Rebuilt for https://fedoraproject.org/wiki/Fedora_23_Mass_Rebuild
+
+* Fri Jun 05 2015 Kamil Dudka <kdudka@redhat.com> 7.42.1-2
+- curl-config --libs now works on x86_64 without libcurl-devel.x86_64 (#1228363)
+
+* Wed Apr 29 2015 Kamil Dudka <kdudka@redhat.com> 7.42.1-1
+- new upstream release (fixes CVE-2015-3153)
+
+* Wed Apr 22 2015 Kamil Dudka <kdudka@redhat.com> 7.42.0-1
+- new upstream release (fixes CVE-2015-3143, CVE-2015-3144, CVE-2015-3145,
+  and CVE-2015-3148)
+- implement public key pinning for NSS backend (#1195771)
+- do not run flaky test-cases in %%check
+
+* Wed Feb 25 2015 Kamil Dudka <kdudka@redhat.com> 7.41.0-1
+- new upstream release
+- include extern-scan.pl to make test1135 succeed (upstream commit 1514b718)
+
+* Mon Feb 23 2015 Kamil Dudka <kdudka@redhat.com> 7.40.0-3
+- fix a spurious connect failure on dual-stacked hosts (#1187531)
+
+* Sat Feb 21 2015 Till Maas <opensource@till.name> - 7.40.0-2
+- Rebuilt for Fedora 23 Change
+  https://fedoraproject.org/wiki/Changes/Harden_all_packages_with_position-independent_code
+
+* Thu Jan 08 2015 Kamil Dudka <kdudka@redhat.com> 7.40.0-1
+- new upstream release (fixes CVE-2014-8150)
+
+* Wed Nov 05 2014 Kamil Dudka <kdudka@redhat.com> 7.39.0-1
+- new upstream release (fixes CVE-2014-3707)
+
+* Tue Oct 21 2014 Kamil Dudka <kdudka@redhat.com> 7.38.0-2
+- fix a connection failure when FTPS handle is reused
+
+* Wed Sep 10 2014 Kamil Dudka <kdudka@redhat.com> 7.38.0-1
+- new upstream release (fixes CVE-2014-3613 and CVE-2014-3620)
+
+* Sat Aug 16 2014 Fedora Release Engineering <rel-eng@lists.fedoraproject.org> - 7.37.1-3
+- Rebuilt for https://fedoraproject.org/wiki/Fedora_21_22_Mass_Rebuild
+
+* Wed Aug 13 2014 Rex Dieter <rdieter@fedoraproject.org> 7.37.1-2
+- include arch'd Requires/Provides
+
+* Wed Jul 16 2014 Kamil Dudka <kdudka@redhat.com> 7.37.1-1
+- new upstream release
+- fix endless loop with GSSAPI proxy auth (patches by David Woodhouse, #1118751)
+
+* Fri Jul 11 2014 Tom Callaway <spot@fedoraproject.org> 7.37.0-4
+- fix license handling
+
+* Fri Jul 04 2014 Kamil Dudka <kdudka@redhat.com> 7.37.0-3
+- various SSL-related fixes (mainly crash on connection failure)
+
+* Sat Jun 07 2014 Fedora Release Engineering <rel-eng@lists.fedoraproject.org> - 7.37.0-2
+- Rebuilt for https://fedoraproject.org/wiki/Fedora_21_Mass_Rebuild
+
+* Wed May 21 2014 Kamil Dudka <kdudka@redhat.com> 7.37.0-1
+- new upstream release
+
+* Fri May 09 2014 Kamil Dudka <kdudka@redhat.com> 7.36.0-4
+- auth failure on duplicated 'WWW-Authenticate: Negotiate' header (#1093348)
+
+* Fri Apr 25 2014 Kamil Dudka <kdudka@redhat.com> 7.36.0-3
+- nss: implement non-blocking SSL handshake
+
+* Wed Apr 02 2014 Kamil Dudka <kdudka@redhat.com> 7.36.0-2
+- extend URL parser to support IPv6 zone identifiers (#680996)
+
+* Wed Mar 26 2014 Kamil Dudka <kdudka@redhat.com> 7.36.0-1
+- new upstream release (fixes CVE-2014-0138)
+
+* Mon Mar 17 2014 Paul Howarth <paul@city-fan.org> 7.35.0-5
+- add all perl build requirements for the test suite, in a portable way
+
+* Mon Mar 17 2014 Kamil Dudka <kdudka@redhat.com> 7.35.0-4
+- add BR for perl-Digest-MD5, which is required by the test-suite
+
+* Wed Mar 05 2014 Kamil Dudka <kdudka@redhat.com> 7.35.0-3
+- avoid spurious failure of test1086 on s390(x) koji builders (#1072273)
+
+* Tue Feb 25 2014 Kamil Dudka <kdudka@redhat.com> 7.35.0-2
+- refresh expired cookie in test172 from upstream test-suite (#1068967)
+
+* Wed Jan 29 2014 Kamil Dudka <kdudka@redhat.com> 7.35.0-1
+- new upstream release (fixes CVE-2014-0015)
+
+* Wed Dec 18 2013 Kamil Dudka <kdudka@redhat.com> 7.34.0-1
+- new upstream release
+
+* Mon Dec 02 2013 Kamil Dudka <kdudka@redhat.com> 7.33.0-2
+- allow to use TLS > 1.0 if built against recent NSS
+
+* Mon Oct 14 2013 Kamil Dudka <kdudka@redhat.com> 7.33.0-1
+- new upstream release
+- fix missing initialization in NTLM code causing test 906 to fail
+- fix missing initialization in SSH code causing test 619 to fail
+
+* Fri Oct 11 2013 Kamil Dudka <kdudka@redhat.com> 7.32.0-3
+- do not limit the speed of SCP upload on a fast connection
+
+* Mon Sep 09 2013 Kamil Dudka <kdudka@redhat.com> 7.32.0-2
+- avoid delay if FTP is aborted in CURLOPT_HEADERFUNCTION callback (#1005686)
+
+* Mon Aug 12 2013 Kamil Dudka <kdudka@redhat.com> 7.32.0-1
+- new upstream release
+- make sure that NSS is initialized prior to calling PK11_GenerateRandom()
+
+* Sat Aug 03 2013 Fedora Release Engineering <rel-eng@lists.fedoraproject.org> - 7.31.0-5
+- Rebuilt for https://fedoraproject.org/wiki/Fedora_20_Mass_Rebuild
+
+* Tue Jul 09 2013 Kamil Dudka <kdudka@redaht.com> 7.31.0-4
+- mention all option listed in 'curl --help' in curl.1 man page
+
+* Tue Jul 02 2013 Kamil Dudka <kdudka@redhat.com> 7.31.0-3
+- restore the functionality of 'curl -u :'
+
+* Wed Jun 26 2013 Kamil Dudka <kdudka@redhat.com> 7.31.0-2
+- build the curl tool with metalink support
+
+* Sat Jun 22 2013 Kamil Dudka <kdudka@redhat.com> 7.31.0-1
+- new upstream release (fixes CVE-2013-2174)
+
+* Fri Apr 26 2013 Kamil Dudka <kdudka@redhat.com> 7.30.0-2
+- prevent an artificial timeout event due to stale speed-check data (#906031)
+
+* Fri Apr 12 2013 Kamil Dudka <kdudka@redhat.com> 7.30.0-1
+- new upstream release (fixes CVE-2013-1944)
+- prevent test-suite failure due to using non-default port ranges in tests
+
+* Tue Mar 12 2013 Kamil Dudka <kdudka@redhat.com> 7.29.0-4
+- do not ignore poll() failures other than EINTR (#919127)
+- curl_global_init() now accepts the CURL_GLOBAL_ACK_EINTR flag (#919127)
+
+* Wed Mar 06 2013 Kamil Dudka <kdudka@redhat.com> 7.29.0-3
+- switch SSL socket into non-blocking mode after handshake
+- drop the hide_selinux.c hack no longer needed in %%check
+
+* Fri Feb 22 2013 Kamil Dudka <kdudka@redhat.com> 7.29.0-2
+- fix a SIGSEGV when closing an unused multi handle (#914411)
+
+* Wed Feb 06 2013 Kamil Dudka <kdudka@redhat.com> 7.29.0-1
+- new upstream release (fixes CVE-2013-0249)
+
+* Tue Jan 15 2013 Kamil Dudka <kdudka@redhat.com> 7.28.1-3
+- require valgrind for build only on i386 and x86_64 (#886891)
+
+* Tue Jan 15 2013 Kamil Dudka <kdudka@redhat.com> 7.28.1-2
+- prevent NSS from crashing on client auth hook failure
+- clear session cache if a client cert from file is used
+- fix error messages for CURLE_SSL_{CACERT,CRL}_BADFILE
+
+* Tue Nov 20 2012 Kamil Dudka <kdudka@redhat.com> 7.28.1-1
+- new upstream release
+
+* Wed Oct 31 2012 Kamil Dudka <kdudka@redhat.com> 7.28.0-1
+- new upstream release
+
+* Mon Oct 01 2012 Kamil Dudka <kdudka@redhat.com> 7.27.0-3
+- use the upstream facility to disable problematic tests
+- do not crash if MD5 fingerprint is not provided by libssh2
+
+* Wed Aug 01 2012 Kamil Dudka <kdudka@redhat.com> 7.27.0-2
+- eliminate unnecessary inotify events on upload via file protocol (#844385)
+
+* Sat Jul 28 2012 Kamil Dudka <kdudka@redhat.com> 7.27.0-1
+- new upstream release
+
+* Mon Jul 23 2012 Kamil Dudka <kdudka@redhat.com> 7.26.0-6
+- print reason phrase from HTTP status line on error (#676596)
+
+* Wed Jul 18 2012 Fedora Release Engineering <rel-eng@lists.fedoraproject.org> - 7.26.0-5
+- Rebuilt for https://fedoraproject.org/wiki/Fedora_18_Mass_Rebuild
+
+* Sat Jun 09 2012 Kamil Dudka <kdudka@redhat.com> 7.26.0-4
+- fix duplicated SSL handshake with multi interface and proxy (#788526)
+
+* Wed May 30 2012 Karsten Hopp <karsten@redhat.com> 7.26.0-3
+- disable test 1319 on ppc64, server times out
+
+* Mon May 28 2012 Kamil Dudka <kdudka@redhat.com> 7.26.0-2
+- use human-readable error messages provided by NSS (upstream commit 72f4b534)
+
+* Fri May 25 2012 Kamil Dudka <kdudka@redhat.com> 7.26.0-1
+- new upstream release
+
+* Wed Apr 25 2012 Karsten Hopp <karsten@redhat.com> 7.25.0-3
+- valgrind on ppc64 works fine, disable ppc32 only
+
+* Wed Apr 25 2012 Karsten Hopp <karsten@redhat.com> 7.25.0-3
+- drop BR valgrind on PPC(64) until bugzilla #810992 gets fixed
+
+* Fri Apr 13 2012 Kamil Dudka <kdudka@redhat.com> 7.25.0-2
+- use NSS_InitContext() to initialize NSS if available (#738456)
+- provide human-readable names for NSS errors (upstream commit a60edcc6)
+
+* Fri Mar 23 2012 Paul Howarth <paul@city-fan.org> 7.25.0-1
+- new upstream release (#806264)
+- fix character encoding of docs with a patch rather than just iconv
+- update debug and multilib patches
+- don't use macros for commands
+- reduce size of %%prep output for readability
+
+* Tue Jan 24 2012 Kamil Dudka <kdudka@redhat.com> 7.24.0-1
+- new upstream release (fixes CVE-2012-0036)
+
+* Thu Jan 05 2012 Paul Howarth <paul@city-fan.org> 7.23.0-6
+- rebuild for gcc 4.7
+
+* Mon Jan 02 2012 Kamil Dudka <kdudka@redhat.com> 7.23.0-5
+- upstream patch that allows to run FTPS tests with nss-3.13 (#760060)
+
+* Tue Dec 27 2011 Kamil Dudka <kdudka@redhat.com> 7.23.0-4
+- allow to run FTPS tests with nss-3.13 (#760060)
+
+* Sun Dec 25 2011 Kamil Dudka <kdudka@redhat.com> 7.23.0-3
+- avoid unnecessary timeout event when waiting for 100-continue (#767490)
+
+* Mon Nov 21 2011 Kamil Dudka <kdudka@redhat.com> 7.23.0-2
+- curl -JO now uses -O name if no C-D header comes (upstream commit c532604)
+
+* Wed Nov 16 2011 Kamil Dudka <kdudka@redhat.com> 7.23.0-1
+- new upstream release (#754391)
+
+* Mon Sep 19 2011 Kamil Dudka <kdudka@redhat.com> 7.22.0-2
+- nss: select client certificates by DER (#733657)
+
+* Tue Sep 13 2011 Kamil Dudka <kdudka@redhat.com> 7.22.0-1
+- new upstream release
+- curl-config now provides dummy --static-libs option (#733956)
+
+* Sun Aug 21 2011 Paul Howarth <paul@city-fan.org> 7.21.7-4
+- actually fix SIGSEGV of curl -O -J given more than one URL (#723075)
+
+* Mon Aug 15 2011 Kamil Dudka <kdudka@redhat.com> 7.21.7-3
+- fix SIGSEGV of curl -O -J given more than one URL (#723075)
+- introduce the --delegation option of curl (#730444)
+- initialize NSS with no database if the selected database is broken (#728562)
+
+* Wed Aug 03 2011 Kamil Dudka <kdudka@redhat.com> 7.21.7-2
+- add a new option CURLOPT_GSSAPI_DELEGATION (#719939)
+
+* Thu Jun 23 2011 Kamil Dudka <kdudka@redhat.com> 7.21.7-1
+- new upstream release (fixes CVE-2011-2192)
+
+* Wed Jun 08 2011 Kamil Dudka <kdudka@redhat.com> 7.21.6-2
+- avoid an invalid timeout event on a reused handle (#679709)
+
+* Sat Apr 23 2011 Paul Howarth <paul@city-fan.org> 7.21.6-1
+- new upstream release
+
+* Mon Apr 18 2011 Kamil Dudka <kdudka@redhat.com> 7.21.5-2
+- fix the output of curl-config --version (upstream commit 82ecc85)
+
+* Mon Apr 18 2011 Kamil Dudka <kdudka@redhat.com> 7.21.5-1
+- new upstream release
+
+* Sat Apr 16 2011 Peter Robinson <pbrobinson@gmail.com> 7.21.4-4
+- no valgrind on ARMv5 arches
+
+* Sat Mar 05 2011 Dennis Gilmore <dennis@ausil.us> 7.21.4-3
+- no valgrind on sparc arches
+
+* Tue Feb 22 2011 Kamil Dudka <kdudka@redhat.com> 7.21.4-2
+- do not ignore failure of SSL handshake (upstream commit 7aa2d10)
+
+* Fri Feb 18 2011 Kamil Dudka <kdudka@redhat.com> 7.21.4-1
+- new upstream release
+- avoid memory leak on SSL connection failure (upstream commit a40f58d)
+- work around valgrind bug (#678518)
+
+* Tue Feb 08 2011 Fedora Release Engineering <rel-eng@lists.fedoraproject.org> - 7.21.3-3
+- Rebuilt for https://fedoraproject.org/wiki/Fedora_15_Mass_Rebuild
+
+* Wed Jan 12 2011 Kamil Dudka <kdudka@redhat.com> 7.21.3-2
+- build libcurl with --enable-hidden-symbols
+
+* Thu Dec 16 2010 Paul Howarth <paul@city-fan.org> 7.21.3-1
+- update to 7.21.3:
+  - added --noconfigure switch to testcurl.pl
+  - added --xattr option
+  - added CURLOPT_RESOLVE and --resolve
+  - added CURLAUTH_ONLY
+  - added version-check.pl to the examples dir
+  - check for libcurl features for some command line options
+  - Curl_setopt: disallow CURLOPT_USE_SSL without SSL support
+  - http_chunks: remove debug output
+  - URL-parsing: consider ? a divider
+  - SSH: avoid using the libssh2_ prefix
+  - SSH: use libssh2_session_handshake() to work on win64
+  - ftp: prevent server from hanging on closed data connection when stopping
+    a transfer before the end of the full transfer (ranges)
+  - LDAP: detect non-binary attributes properly
+  - ftp: treat server's response 421 as CURLE_OPERATION_TIMEDOUT
+  - gnutls->handshake: improved timeout handling
+  - security: pass the right parameter to init
+  - krb5: use GSS_ERROR to check for error
+  - TFTP: resend the correct data
+  - configure: fix autoconf 2.68 warning: no AC_LANG_SOURCE call detected
+  - GnuTLS: now detects socket errors on Windows
+  - symbols-in-versions: updated en masse
+  - added a couple of examples that were missing from the tarball
+  - Curl_send/recv_plain: return errno on failure
+  - Curl_wait_for_resolv (for c-ares): correct timeout
+  - ossl_connect_common: detect connection re-use
+  - configure: prevent link errors with --librtmp
+  - openldap: use remote port in URL passed to ldap_init_fd()
+  - url: provide dead_connection flag in Curl_handler::disconnect
+  - lots of compiler warning fixes
+  - ssh: fix a download resume point calculation
+  - fix getinfo CURLINFO_LOCAL* for reused connections
+  - multi: the returned running handles counter could turn negative
+  - multi: only ever consider pipelining for connections doing HTTP(S)
+- drop upstream patches now in tarball
+- update bz650255 and disable-test1112 patches to apply against new codebase
+- add workaround for false-positive glibc-detected buffer overflow in tftpd
+  test server with FORTIFY_SOURCE (similar to #515361)
+
+* Fri Nov 12 2010 Kamil Dudka <kdudka@redhat.com> 7.21.2-5
+- do not send QUIT to a dead FTP control connection (#650255)
+- pull back glibc's implementation of str[n]casecmp(), #626470 appears fixed
+
+* Tue Nov 09 2010 Kamil Dudka <kdudka@redhat.com> 7.21.2-4
+- prevent FTP client from hanging on unrecognized ABOR response (#649347)
+- return more appropriate error code in case FTP server session idle
+  timeout has exceeded (#650255)
+
+* Fri Oct 29 2010 Kamil Dudka <kdudka@redhat.com> 7.21.2-3
+- prevent FTP server from hanging on closed data connection (#643656)
+
+* Thu Oct 14 2010 Paul Howarth <paul@city-fan.org> 7.21.2-2
+- enforce versioned libssh2 dependency for libcurl (#642796)
+
+* Wed Oct 13 2010 Kamil Dudka <kdudka@redhat.com> 7.21.2-1
+- new upstream release, drop applied patches
+- make 0102-curl-7.21.2-debug.patch less intrusive
+
+* Wed Sep 29 2010 jkeating - 7.21.1-6
+- Rebuilt for gcc bug 634757
+
+* Sat Sep 11 2010 Kamil Dudka <kdudka@redhat.com> 7.21.1-5
+- make it possible to run SCP/SFTP tests on x86_64 (#632914)
+
+* Tue Sep 07 2010 Kamil Dudka <kdudka@redhat.com> 7.21.1-4
+- work around glibc/valgrind problem on x86_64 (#631449)
+
+* Tue Aug 24 2010 Paul Howarth <paul@city-fan.org> 7.21.1-3
+- fix up patches so there's no need to run autotools in the rpm build
+- drop buildreq automake
+- drop dependency on automake for devel package from F-14, where
+  %%{_datadir}/aclocal is included in the filesystem package
+- drop dependency on pkgconfig for devel package from F-11, where
+  pkgconfig dependencies are auto-generated
+
+* Mon Aug 23 2010 Kamil Dudka <kdudka@redhat.com> 7.21.1-2
+- re-enable test575 on s390(x), already fixed (upstream commit d63bdba)
+- modify system headers to work around gcc bug (#617757)
+- curl -T now ignores file size of special files (#622520)
+- fix kerberos proxy authentication for https (#625676)
+- work around glibc/valgrind problem on x86_64 (#626470)
+
+* Thu Aug 12 2010 Kamil Dudka <kdudka@redhat.com> 7.21.1-1
+- new upstream release
+
+* Mon Jul 12 2010 Dan Horák <dan[at]danny.cz> 7.21.0-3
+- disable test 575 on s390(x)
+
+* Mon Jun 28 2010 Kamil Dudka <kdudka@redhat.com> 7.21.0-2
+- add support for NTLM authentication (#603783)
+
+* Wed Jun 16 2010 Kamil Dudka <kdudka@redhat.com> 7.21.0-1
+- new upstream release, drop applied patches
+- update of %%description
+- disable valgrind for certain test-cases (libssh2 problem)
+
+* Tue May 25 2010 Kamil Dudka <kdudka@redhat.com> 7.20.1-6
+- fix -J/--remote-header-name to strip CR-LF (upstream patch)
+
+* Wed Apr 28 2010 Kamil Dudka <kdudka@redhat.com> 7.20.1-5
+- CRL support now works again (#581926)
+- make it possible to start a testing OpenSSH server when building with SELinux
+  in the enforcing mode (#521087)
+
+* Sat Apr 24 2010 Kamil Dudka <kdudka@redhat.com> 7.20.1-4
+- upstream patch preventing failure of test536 with threaded DNS resolver
+- upstream patch preventing SSL handshake timeout underflow
+
+* Thu Apr 22 2010 Paul Howarth <paul@city-fan.org> 7.20.1-3
+- replace Rawhide s390-sleep patch with a more targeted patch adding a
+  delay after tests 513 and 514 rather than after all tests
+
+* Wed Apr 21 2010 Kamil Dudka <kdudka@redhat.com> 7.20.1-2
+- experimentally enabled threaded DNS lookup
+- make curl-config multilib ready again (#584107)
+
+* Mon Apr 19 2010 Kamil Dudka <kdudka@redhat.com> 7.20.1-1
+- new upstream release
+
+* Tue Mar 23 2010 Kamil Dudka <kdudka@redhat.com> 7.20.0-4
+- add missing quote in libcurl.m4 (#576252)
+
+* Fri Mar 19 2010 Kamil Dudka <kdudka@redhat.com> 7.20.0-3
+- throw CURLE_SSL_CERTPROBLEM in case peer rejects a certificate (#565972)
+- valgrind temporarily disabled (#574889)
+- kerberos installation prefix has been changed
+
+* Wed Feb 24 2010 Kamil Dudka <kdudka@redhat.com> 7.20.0-2
+- exclude test1112 from the test suite (#565305)
+
+* Thu Feb 11 2010 Kamil Dudka <kdudka@redhat.com> 7.20.0-1
+- new upstream release - added support for IMAP(S), POP3(S), SMTP(S) and RTSP
+- dropped patches applied upstream
+- dropped curl-7.16.0-privlibs.patch no longer useful
+- a new patch forcing -lrt when linking the curl tool and test-cases
+
+* Fri Jan 29 2010 Kamil Dudka <kdudka@redhat.com> 7.19.7-11
+- upstream patch adding a new option -J/--remote-header-name
+- dropped temporary workaround for #545779
+
+* Thu Jan 14 2010 Chris Weyl <cweyl@alumni.drew.edu> 7.19.7-10
+- bump for libssh2 rebuild
+
+* Sun Dec 20 2009 Kamil Dudka <kdudka@redhat.com> 7.19.7-9
+- temporary workaround for #548269
+  (restored behavior of 7.19.7-4)
+
+* Wed Dec 09 2009 Kamil Dudka <kdudka@redhat.com> 7.19.7-8
+- replace hard wired port numbers in the test suite
+
+* Wed Dec 09 2009 Kamil Dudka <kdudka@redhat.com> 7.19.7-7
+- use different port numbers for 32bit and 64bit builds
+- temporary workaround for #545779
+
+* Tue Dec 08 2009 Kamil Dudka <kdudka@redhat.com> 7.19.7-6
+- make it possible to run test241
+- re-enable SCP/SFTP tests (#539444)
+
+* Sat Dec 05 2009 Kamil Dudka <kdudka@redhat.com> 7.19.7-5
+- avoid use of uninitialized value in lib/nss.c
+- suppress failure of test513 on s390
+
+* Tue Dec 01 2009 Kamil Dudka <kdudka@redhat.com> 7.19.7-4
+- do not require valgrind on s390 and s390x
+- temporarily disabled SCP/SFTP test-suite (#539444)
+
+* Thu Nov 12 2009 Kamil Dudka <kdudka@redhat.com> 7.19.7-3
+- fix crash on doubly closed NSPR descriptor, patch contributed
+  by Kevin Baughman (#534176)
+- new version of patch for broken TLS servers (#525496, #527771)
+
+* Wed Nov 04 2009 Kamil Dudka <kdudka@redhat.com> 7.19.7-2
+- increased release number (CVS problem)
+
+* Wed Nov 04 2009 Kamil Dudka <kdudka@redhat.com> 7.19.7-1
+- new upstream release, dropped applied patches
+- workaround for broken TLS servers (#525496, #527771)
+
+* Wed Oct 14 2009 Kamil Dudka <kdudka@redhat.com> 7.19.6-13
+- fix timeout issues and gcc warnings within lib/nss.c
+
+* Tue Oct 06 2009 Kamil Dudka <kdudka@redhat.com> 7.19.6-12
+- upstream patch for NSS support written by Guenter Knauf
+
+* Wed Sep 30 2009 Kamil Dudka <kdudka@redhat.com> 7.19.6-11
+- build libcurl with c-ares support (#514771)
+
+* Sun Sep 27 2009 Kamil Dudka <kdudka@redhat.com> 7.19.6-10
+- require libssh2>=1.2 properly (#525002)
+
+* Sat Sep 26 2009 Kamil Dudka <kdudka@redhat.com> 7.19.6-9
+- let curl test-suite use valgrind
+- require libssh2>=1.2 (#525002)
+
+* Mon Sep 21 2009 Chris Weyl <cweyl@alumni.drew.edu> - 7.19.6-8
+- rebuild for libssh2 1.2
+
+* Thu Sep 17 2009 Kamil Dudka <kdudka@redhat.com> 7.19.6-7
+- make curl test-suite more verbose
+
+* Wed Sep 16 2009 Kamil Dudka <kdudka@redhat.com> 7.19.6-6
+- update polling patch to the latest upstream version
+
+* Thu Sep 03 2009 Kamil Dudka <kdudka@redhat.com> 7.19.6-5
+- cover ssh and stunnel support by the test-suite
+
+* Wed Sep 02 2009 Kamil Dudka <kdudka@redhat.com> 7.19.6-4
+- use pkg-config to find nss and libssh2 if possible
+- better patch (not only) for SCP/SFTP polling
+- improve error message for not matching common name (#516056)
+
+* Fri Aug 21 2009 Kamil Dudka <kdudka@redhat.com> 7.19.6-3
+- avoid tight loop during a sftp upload
+- http://permalink.gmane.org/gmane.comp.web.curl.library/24744
+
+* Tue Aug 18 2009 Kamil Dudka <kdudka@redhat.com> 7.19.6-2
+- let curl package depend on the same version of libcurl
+
+* Fri Aug 14 2009 Kamil Dudka <kdudka@redhat.com> 7.19.6-1
+- new upstream release, dropped applied patches
+- changed NSS code to not ignore the value of ssl.verifyhost and produce more
+  verbose error messages (#516056)
+
+* Wed Aug 12 2009 Ville Skyttä <ville.skytta@iki.fi> - 7.19.5-10
+- Use lzma compressed upstream tarball.
+
+* Fri Jul 24 2009 Fedora Release Engineering <rel-eng@lists.fedoraproject.org> - 7.19.5-9
+- Rebuilt for https://fedoraproject.org/wiki/Fedora_12_Mass_Rebuild
+
+* Wed Jul 22 2009 Kamil Dudka <kdudka@redhat.com> 7.19.5-8
+- do not pre-login to all PKCS11 slots, it causes problems with HW tokens
+- try to select client certificate automatically when not specified, thanks
+  to Claes Jakobsson
+
+* Fri Jul 10 2009 Kamil Dudka <kdudka@redhat.com> 7.19.5-7
+- fix SIGSEGV when using NSS client certificates, thanks to Claes Jakobsson
+
+* Sun Jul 05 2009 Kamil Dudka <kdudka@redhat.com> 7.19.5-6
+- force test suite to use the just built libcurl, thanks to Paul Howarth
+
+* Thu Jul 02 2009 Kamil Dudka <kdudka@redhat.com> 7.19.5-5
+- run test suite after build
+- enable built-in manual
+
+* Wed Jun 24 2009 Kamil Dudka <kdudka@redhat.com> 7.19.5-4
+- fix bug introduced by the last build (#504857)
+
+* Wed Jun 24 2009 Kamil Dudka <kdudka@redhat.com> 7.19.5-3
+- exclude curlbuild.h content from spec (#504857)
+
+* Wed Jun 10 2009 Kamil Dudka <kdudka@redhat.com> 7.19.5-2
+- avoid unguarded comparison in the spec file, thanks to R P Herrold (#504857)
+
+* Tue May 19 2009 Kamil Dudka <kdudka@redhat.com> 7.19.5-1
+- update to 7.19.5, dropped applied patches
+
+* Mon May 11 2009 Kamil Dudka <kdudka@redhat.com> 7.19.4-11
+- fix infinite loop while loading a private key, thanks to Michael Cronenworth
+  (#453612)
+
+* Mon Apr 27 2009 Kamil Dudka <kdudka@redhat.com> 7.19.4-10
+- fix curl/nss memory leaks while using client certificate (#453612, accepted
+  by upstream)
+
+* Wed Apr 22 2009 Kamil Dudka <kdudka@redhat.com> 7.19.4-9
+- add missing BuildRequire for autoconf
+
+* Wed Apr 22 2009 Kamil Dudka <kdudka@redhat.com> 7.19.4-8
+- fix configure.ac to not discard -g in CFLAGS (#496778)
+
+* Tue Apr 21 2009 Debarshi Ray <rishi@fedoraproject.org> 7.19.4-7
+- Fixed configure to respect the environment's CFLAGS and CPPFLAGS settings.
+
+* Tue Apr 14 2009 Kamil Dudka <kdudka@redhat.com> 7.19.4-6
+- upstream patch fixing memory leak in lib/nss.c (#453612)
+- remove redundant dependency of libcurl-devel on libssh2-devel
+
+* Wed Mar 18 2009 Kamil Dudka <kdudka@redhat.com> 7.19.4-5
+- enable 6 additional crypto algorithms by default (#436781,
+  accepted by upstream)
+
+* Thu Mar 12 2009 Kamil Dudka <kdudka@redhat.com> 7.19.4-4
+- fix memory leak in src/main.c (accepted by upstream)
+- avoid using %%ifarch
+
+* Wed Mar 11 2009 Kamil Dudka <kdudka@redhat.com> 7.19.4-3
+- make libcurl-devel multilib-ready (bug #488922)
+
+* Fri Mar 06 2009 Jindrich Novy <jnovy@redhat.com> 7.19.4-2
+- drop .easy-leak patch, causes problems in pycurl (#488791)
+- fix libcurl-devel dependencies (#488895)
+
+* Tue Mar 03 2009 Jindrich Novy <jnovy@redhat.com> 7.19.4-1
+- update to 7.19.4 (fixes CVE-2009-0037)
+- fix leak in curl_easy* functions, thanks to Kamil Dudka
+- drop nss-fix patch, applied upstream
+
+* Tue Feb 24 2009 Fedora Release Engineering <rel-eng@lists.fedoraproject.org> - 7.19.3-2
+- Rebuilt for https://fedoraproject.org/wiki/Fedora_11_Mass_Rebuild
+
+* Tue Feb 17 2009 Kamil Dudka <kdudka@redhat.com> 7.19.3-1
+- update to 7.19.3, dropped applied nss patches
+- add patch fixing 7.19.3 curl/nss bugs
+
+* Mon Dec 15 2008 Jindrich Novy <jnovy@redhat.com> 7.18.2-9
+- rebuild for f10/rawhide cvs tag clashes
+
+* Sat Dec 06 2008 Jindrich Novy <jnovy@redhat.com> 7.18.2-8
+- use improved NSS patch, thanks to Rob Crittenden (#472489)
+
+* Tue Sep 09 2008 Jindrich Novy <jnovy@redhat.com> 7.18.2-7
+- update the thread safety patch, thanks to Rob Crittenden (#462217)
+
+* Wed Sep 03 2008 Warren Togami <wtogami@redhat.com> 7.18.2-6
+- add thread safety to libcurl NSS cleanup() functions (#459297)
+
+* Fri Aug 22 2008 Tom "spot" Callaway <tcallawa@redhat.com> 7.18.2-5
+- undo mini libcurl.so.3
+
+* Mon Aug 11 2008 Tom "spot" Callaway <tcallawa@redhat.com> 7.18.2-4
+- make miniature library for libcurl.so.3
+
+* Fri Jul  4 2008 Jindrich Novy <jnovy@redhat.com> 7.18.2-3
+- enable support for libssh2 (#453958)
+
+* Wed Jun 18 2008 Jindrich Novy <jnovy@redhat.com> 7.18.2-2
+- fix curl_multi_perform() over a proxy (#450140), thanks to
+  Rob Crittenden
+
+* Wed Jun  4 2008 Jindrich Novy <jnovy@redhat.com> 7.18.2-1
+- update to 7.18.2
+
+* Wed May  7 2008 Jindrich Novy <jnovy@redhat.com> 7.18.1-2
+- spec cleanup, thanks to Paul Howarth (#225671)
+  - drop BR: libtool
+  - convert CHANGES and README to UTF-8
+  - _GNU_SOURCE in CFLAGS is no more needed
+  - remove bogus rpath
+
+* Mon Mar 31 2008 Jindrich Novy <jnovy@redhat.com> 7.18.1-1
+- update to curl 7.18.1 (fixes #397911)
+- add ABI docs for libcurl
+- remove --static-libs from curl-config
+- drop curl-config patch, obsoleted by @SSL_ENABLED@ autoconf
+  substitution (#432667)
+
+* Fri Feb 15 2008 Jindrich Novy <jnovy@redhat.com> 7.18.0-2
+- define _GNU_SOURCE so that NI_MAXHOST gets defined from glibc
+
+* Mon Jan 28 2008 Jindrich Novy <jnovy@redhat.com> 7.18.0-1
+- update to curl-7.18.0
+- drop sslgen patch -> applied upstream
+- fix typo in description
+
+* Tue Jan 22 2008 Jindrich Novy <jnovy@redhat.com> 7.17.1-6
+- fix curl-devel obsoletes so that we don't break F8->F9 upgrade
+  path (#429612)
+
+* Tue Jan  8 2008 Jindrich Novy <jnovy@redhat.com> 7.17.1-5
+- do not attempt to close a bad socket (#427966),
+  thanks to Caolan McNamara
+
+* Tue Dec  4 2007 Jindrich Novy <jnovy@redhat.com> 7.17.1-4
+- rebuild because of the openldap soname bump
+- remove old nsspem patch
+
+* Fri Nov 30 2007 Jindrich Novy <jnovy@redhat.com> 7.17.1-3
+- drop useless ldap library detection since curl doesn't
+  dlopen()s it but links to it -> BR: openldap-devel
+- enable LDAPS support (#225671), thanks to Paul Howarth
+- BR: krb5-devel to reenable GSSAPI support
+- simplify build process
+- update description
+
+* Wed Nov 21 2007 Jindrich Novy <jnovy@redhat.com> 7.17.1-2
+- update description to contain complete supported servers list (#393861)
+
+* Sat Nov 17 2007 Jindrich Novy <jnovy@redhat.com> 7.17.1-1
+- update to curl 7.17.1
+- include patch to enable SSL usage in NSS when a socket is opened
+  nonblocking, thanks to Rob Crittenden (rcritten@redhat.com)
+
+* Wed Oct 24 2007 Jindrich Novy <jnovy@redhat.com> 7.16.4-10
+- correctly provide/obsolete curl-devel (#130251)
+
+* Wed Oct 24 2007 Jindrich Novy <jnovy@redhat.com> 7.16.4-9
+- create libcurl and libcurl-devel subpackages (#130251)
+
+* Thu Oct 11 2007 Jindrich Novy <jnovy@redhat.com> 7.16.4-8
+- list features correctly when curl is compiled against NSS (#316191)
+
+* Mon Sep 17 2007 Jindrich Novy <jnovy@redhat.com> 7.16.4-7
+- add zlib-devel BR to enable gzip compressed transfers in curl (#292211)
+
+* Mon Sep 10 2007 Jindrich Novy <jnovy@redhat.com> 7.16.4-6
+- provide webclient (#225671)
+
+* Thu Sep  6 2007 Jindrich Novy <jnovy@redhat.com> 7.16.4-5
+- add support for the NSS PKCS#11 pem reader so the command-line is the
+  same for both OpenSSL and NSS by Rob Crittenden (rcritten@redhat.com)
+- switch to NSS again
+
+* Mon Sep  3 2007 Jindrich Novy <jnovy@redhat.com> 7.16.4-4
+- revert back to use OpenSSL (#266021)
+
+* Mon Aug 27 2007 Jindrich Novy <jnovy@redhat.com> 7.16.4-3
+- don't use openssl, use nss instead
+
+* Fri Aug 10 2007 Jindrich Novy <jnovy@redhat.com> 7.16.4-2
+- fix anonymous ftp login (#251570), thanks to David Cantrell
+
+* Wed Jul 11 2007 Jindrich Novy <jnovy@redhat.com> 7.16.4-1
+- update to 7.16.4
+
+* Mon Jun 25 2007 Jindrich Novy <jnovy@redhat.com> 7.16.3-1
+- update to 7.16.3
+- drop .print patch, applied upstream
+- next series of merge review fixes by Paul Howarth
+- remove aclocal stuff, no more needed
+- simplify makefile arguments
+- don't reference standard library paths in libcurl.pc
+- include docs/CONTRIBUTE
+
+* Mon Jun 18 2007 Jindrich Novy <jnovy@redhat.com> 7.16.2-5
+- don't print like crazy (#236981), backported from upstream CVS
+
+* Fri Jun 15 2007 Jindrich Novy <jnovy@redhat.com> 7.16.2-4
+- another series of review fixes (#225671),
+  thanks to Paul Howarth
+- check version of ldap library automatically
+- don't use %%makeinstall and preserve timestamps
+- drop useless patches
+
+* Fri May 11 2007 Jindrich Novy <jnovy@redhat.com> 7.16.2-3
+- add automake BR to curl-devel to fix aclocal dir. ownership,
+  thanks to Patrice Dumas
+
+* Thu May 10 2007 Jindrich Novy <jnovy@redhat.com> 7.16.2-2
+- package libcurl.m4 in curl-devel (#239664), thanks to Quy Tonthat
+
+* Wed Apr 11 2007 Jindrich Novy <jnovy@redhat.com> 7.16.2-1
+- update to 7.16.2
+
+* Mon Feb 19 2007 Jindrich Novy <jnovy@redhat.com> 7.16.1-3
+- don't create/ship static libraries (#225671)
+
+* Mon Feb  5 2007 Jindrich Novy <jnovy@redhat.com> 7.16.1-2
+- merge review related spec fixes (#225671)
+
+* Mon Jan 29 2007 Jindrich Novy <jnovy@redhat.com> 7.16.1-1
+- update to 7.16.1
+
+* Tue Jan 16 2007 Jindrich Novy <jnovy@redhat.com> 7.16.0-5
+- don't package generated makefiles for docs/examples to avoid
+  multilib conflicts
+
+* Mon Dec 18 2006 Jindrich Novy <jnovy@redhat.com> 7.16.0-4
+- convert spec to UTF-8
+- don't delete BuildRoot in %%prep phase
+- rpmlint fixes
+
+* Thu Nov 16 2006 Jindrich Novy <jnovy@redhat.com> -7.16.0-3
+- prevent curl from dlopen()ing missing ldap libraries so that
+  ldap:// requests work (#215928)
+
+* Tue Oct 31 2006 Jindrich Novy <jnovy@redhat.com> - 7.16.0-2
+- fix BuildRoot
+- add Requires: pkgconfig for curl-devel
+- move LDFLAGS and LIBS to Libs.private in libcurl.pc.in (#213278)
+
+* Mon Oct 30 2006 Jindrich Novy <jnovy@redhat.com> - 7.16.0-1
+- update to curl-7.16.0
+
+* Thu Aug 24 2006 Jindrich Novy <jnovy@redhat.com> - 7.15.5-1.fc6
+- update to curl-7.15.5
+- use %%{?dist}
+
+* Fri Jun 30 2006 Ivana Varekova <varekova@redhat.com> - 7.15.4-1
+- update to 7.15.4
+
+* Mon Mar 20 2006 Ivana Varekova <varekova@redhat.com> - 7.15.3-1
+- fix multilib problem using pkg-config
+- update to 7.15.3
+
+* Thu Feb 23 2006 Ivana Varekova <varekova@redhat.com> - 7.15.1-2
+- fix multilib problem - #181290 - 
+  curl-devel.i386 not installable together with curl-devel.x86-64
+
+* Fri Feb 10 2006 Jesse Keating <jkeating@redhat.com> - 7.15.1-1.2.1
+- bump again for double-long bug on ppc(64)
+
+* Tue Feb 07 2006 Jesse Keating <jkeating@redhat.com> - 7.15.1-1.2
+- rebuilt for new gcc4.1 snapshot and glibc changes
+
+* Fri Dec 09 2005 Jesse Keating <jkeating@redhat.com>
+- rebuilt
+
+* Thu Dec  8 2005 Ivana Varekova <varekova@redhat.com> 7.15.1-1
+- update to 7.15.1 (bug 175191)
+
+* Wed Nov 30 2005 Ivana Varekova <varekova@redhat.com> 7.15.0-3
+- fix curl-config bug 174556 - missing vernum value
+
+* Wed Nov  9 2005 Ivana Varekova <varekova@redhat.com> 7.15.0-2
+- rebuilt
+
+* Tue Oct 18 2005 Ivana Varekova <varekova@redhat.com> 7.15.0-1
+- update to 7.15.0
+
+* Thu Oct 13 2005 Ivana Varekova <varekova@redhat.com> 7.14.1-1
+- update to 7.14.1
+
+* Thu Jun 16 2005 Ivana Varekova <varekova@redhat.com> 7.14.0-1
+- rebuild new version 
+
+* Tue May 03 2005 Ivana Varekova <varekova@redhat.com> 7.13.1-3
+- fix bug 150768 - curl-7.12.3-2 breaks basic authentication
+  used Daniel Stenberg patch 
+
+* Mon Apr 25 2005 Joe Orton <jorton@redhat.com> 7.13.1-2
+- update to use ca-bundle in /etc/pki
+- mark License as MIT not MPL
+
+* Wed Mar  9 2005 Ivana Varekova <varekova@redhat.com> 7.13.1-1
+- rebuilt (7.13.1)
+
+* Tue Mar  1 2005 Tomas Mraz <tmraz@redhat.com> 7.13.0-2
+- rebuild with openssl-0.9.7e
+
+* Sun Feb 13 2005 Florian La Roche <laroche@redhat.com>
+- 7.13.0
+
+* Wed Feb  9 2005 Joe Orton <jorton@redhat.com> 7.12.3-3
+- don't pass /usr to --with-libidn to remove "-L/usr/lib" from
+  'curl-config --libs' output on x86_64.
+
+* Fri Jan 28 2005 Adrian Havill <havill@redhat.com> 7.12.3-1
+- Upgrade to 7.12.3, which uses poll() for FDSETSIZE limit (#134794)
+- require libidn-devel for devel subpkg (#141341)
+- remove proftpd kludge; included upstream
+
+* Wed Oct 06 2004 Adrian Havill <havill@redhat.com> 7.12.1-1
+- upgrade to 7.12.1
+- enable GSSAPI auth (#129353)
+- enable I18N domain names (#134595)
+- workaround for broken ProFTPD SSL auth (#134133). Thanks to
+  Aleksandar Milivojevic
+
+* Wed Sep 29 2004 Adrian Havill <havill@redhat.com> 7.12.0-4
+- move new docs position so defattr gets applied
+
+* Mon Sep 27 2004 Warren Togami <wtogami@redhat.com> 7.12.0-3
+- remove INSTALL, move libcurl docs to -devel
+
+* Mon Jul 26 2004 Jindrich Novy <jnovy@redhat.com>
+- updated to 7.12.0
+- updated nousr patch
+
+* Tue Jun 15 2004 Elliot Lee <sopwith@redhat.com>
+- rebuilt
+
+* Wed Apr 07 2004 Adrian Havill <havill@redhat.com> 7.11.1-1
+- upgraded; updated nousr patch
+- added COPYING (#115956)
+- 
+
+* Tue Mar 02 2004 Elliot Lee <sopwith@redhat.com>
+- rebuilt
+
+* Fri Feb 13 2004 Elliot Lee <sopwith@redhat.com>
+- rebuilt
+
+* Sat Jan 31 2004 Florian La Roche <Florian.LaRoche@redhat.de>
+- update to 7.10.8
+- remove patch2, already upstream
+
+* Wed Oct 15 2003 Adrian Havill <havill@redhat.com> 7.10.6-7
+- aclocal before libtoolize
+- move OpenLDAP license so it's present as a doc file, present in
+  both the source and binary as per conditions
+
+* Mon Oct 13 2003 Adrian Havill <havill@redhat.com> 7.10.6-6
+- add OpenLDAP copyright notice for usage of code, add OpenLDAP
+  license for this code
+
+* Tue Oct 07 2003 Adrian Havill <havill@redhat.com> 7.10.6-5
+- match serverAltName certs with SSL (#106168)
+
+* Tue Sep 16 2003 Adrian Havill <havill@redhat.com> 7.10.6-4.1
+- bump n-v-r for RHEL
+
+* Tue Sep 16 2003 Adrian Havill <havill@redhat.com> 7.10.6-4
+- restore ca cert bundle (#104400)
+- require openssl, we want to use its ca-cert bundle
+
+* Sun Sep  7 2003 Joe Orton <jorton@redhat.com> 7.10.6-3
+- rebuild
+
+* Fri Sep  5 2003 Joe Orton <jorton@redhat.com> 7.10.6-2.2
+- fix to include libcurl.so
+
+* Mon Aug 25 2003 Adrian Havill <havill@redhat.com> 7.10.6-2.1
+- bump n-v-r for RHEL
+
+* Mon Aug 25 2003 Adrian Havill <havill@redhat.com> 7.10.6-2
+- devel subpkg needs openssl-devel as a Require (#102963)
+
+* Mon Jul 28 2003 Adrian Havill <havill@redhat.com> 7.10.6-1
+- bumped version
+
+* Tue Jul 01 2003 Adrian Havill <havill@redhat.com> 7.10.5-1
+- bumped version
+
+* Wed Jun 04 2003 Elliot Lee <sopwith@redhat.com>
+- rebuilt
+
+* Sat Apr 12 2003 Florian La Roche <Florian.LaRoche@redhat.de>
+- update to 7.10.4
+- adapt nousr patch
+
+* Wed Jan 22 2003 Tim Powers <timp@redhat.com>
+- rebuilt
+
+* Tue Jan 21 2003 Joe Orton <jorton@redhat.com> 7.9.8-4
+- don't add -L/usr/lib to 'curl-config --libs' output
+
+* Tue Jan  7 2003 Nalin Dahyabhai <nalin@redhat.com> 7.9.8-3
+- rebuild
+
+* Wed Nov  6 2002 Joe Orton <jorton@redhat.com> 7.9.8-2
+- fix `curl-config --libs` output for libdir!=/usr/lib
+- remove docs/LIBCURL from docs list; remove unpackaged libcurl.la
+- libtoolize and reconf
+
+* Mon Jul 22 2002 Trond Eivind Glomsrød <teg@redhat.com> 7.9.8-1
+- 7.9.8 (# 69473)
+
+* Fri Jun 21 2002 Tim Powers <timp@redhat.com>
+- automated rebuild
+
+* Sun May 26 2002 Tim Powers <timp@redhat.com>
+- automated rebuild
+
+* Thu May 16 2002 Trond Eivind Glomsrød <teg@redhat.com> 7.9.7-1
+- 7.9.7
+
+* Wed Apr 24 2002 Trond Eivind Glomsrød <teg@redhat.com> 7.9.6-1
+- 7.9.6
+
+* Thu Mar 21 2002 Trond Eivind Glomsrød <teg@redhat.com> 7.9.5-2
+- Stop the curl-config script from printing -I/usr/include 
+  and -L/usr/lib (#59497)
+
+* Fri Mar  8 2002 Trond Eivind Glomsrød <teg@redhat.com> 7.9.5-1
+- 7.9.5
+
+* Tue Feb 26 2002 Trond Eivind Glomsrød <teg@redhat.com> 7.9.3-2
+- Rebuild
+
+* Wed Jan 23 2002 Nalin Dahyabhai <nalin@redhat.com> 7.9.3-1
+- update to 7.9.3
+
+* Wed Jan 09 2002 Tim Powers <timp@redhat.com> 7.9.2-2
+- automated rebuild
+
+* Wed Jan  9 2002 Trond Eivind Glomsrød <teg@redhat.com> 7.9.2-1
+- 7.9.2
+
+* Fri Aug 17 2001 Nalin Dahyabhai <nalin@redhat.com>
+- include curl-config in curl-devel
+- update to 7.8 to fix memory leak and strlcat() symbol pollution from libcurl
+
+* Wed Jul 18 2001 Crutcher Dunnavant <crutcher@redhat.com>
+- added openssl-devel build req
+
+* Mon May 21 2001 Tim Powers <timp@redhat.com>
+- built for the distro
+
+* Tue Apr 24 2001 Jeff Johnson <jbj@redhat.com>
+- upgrade to curl-7.7.2.
+- enable IPv6.
+
+* Fri Mar  2 2001 Tim Powers <timp@redhat.com>
+- rebuilt against openssl-0.9.6-1
+
+* Thu Jan  4 2001 Tim Powers <timp@redhat.com>
+- fixed mising ldconfigs
+- updated to 7.5.2, bug fixes
+
+* Mon Dec 11 2000 Tim Powers <timp@redhat.com>
+- updated to 7.5.1
+
+* Mon Nov  6 2000 Tim Powers <timp@redhat.com>
+- update to 7.4.1 to fix bug #20337, problems with curl -c
+- not using patch anymore, it's included in the new source. Keeping
+  for reference
+
+* Fri Oct 20 2000 Nalin Dahyabhai <nalin@redhat.com>
+- fix bogus req in -devel package
+
+* Fri Oct 20 2000 Tim Powers <timp@redhat.com> 
+- devel package needed defattr so that root owns the files
+
+* Mon Oct 16 2000 Nalin Dahyabhai <nalin@redhat.com>
+- update to 7.3
+- apply vsprintf/vsnprintf patch from Colin Phipps via Debian
+
+* Mon Aug 21 2000 Nalin Dahyabhai <nalin@redhat.com>
+- enable SSL support
+- fix packager tag
+- move buildroot to %%{_tmppath}
+
+* Tue Aug 1 2000 Tim Powers <timp@redhat.com>
+- fixed vendor tag for bug #15028
+
+* Mon Jul 24 2000 Prospector <prospector@redhat.com>
+- rebuilt
+
+* Tue Jul 11 2000 Tim Powers <timp@redhat.com>
+- workaround alpha build problems with optimizations
+
+* Mon Jul 10 2000 Tim Powers <timp@redhat.com>
+- rebuilt
+
+* Mon Jun 5 2000 Tim Powers <timp@redhat.com>
+- put man pages in correct place
+- use %%makeinstall
+
+* Mon Apr 24 2000 Tim Powers <timp@redhat.com>
+- updated to 6.5.2
+
+* Wed Nov 3 1999 Tim Powers <timp@redhat.com>
+- updated sources to 6.2
+- gzip man page
+
+* Mon Aug 30 1999 Tim Powers <timp@redhat.com>
+- changed group
+
+* Thu Aug 26 1999 Tim Powers <timp@redhat.com>
+- changelog started
+- general cleanups, changed prefix to /usr, added manpage to files section
+- including in Powertools