aboutsummaryrefslogtreecommitdiffstats
path: root/lib/http.c
diff options
context:
space:
mode:
authorElliott Hughes <enh@google.com>2017-09-20 17:00:17 -0700
committerElliott Hughes <enh@google.com>2017-09-21 09:38:15 -0700
commit82be86df6ec7baa34d6169c053fd1dfe56fa858e (patch)
treeafd04b931f1f59dfe81b0daa450306ff0204319b /lib/http.c
parentbfa505f61d33429f40bf843b7871fc793dd017d4 (diff)
downloadexternal_curl-82be86df6ec7baa34d6169c053fd1dfe56fa858e.tar.gz
external_curl-82be86df6ec7baa34d6169c053fd1dfe56fa858e.tar.bz2
external_curl-82be86df6ec7baa34d6169c053fd1dfe56fa858e.zip
Update libcurl from 7.54.1 to 7.55.1.
Bug: http://b/64610131 Test: builds, boots, `vendor/google/tools/fake-ota on streaming` works Change-Id: I7ecaf9c83e9496ac4a379507791bec637deaa4cb
Diffstat (limited to 'lib/http.c')
-rw-r--r--lib/http.c257
1 files changed, 148 insertions, 109 deletions
diff --git a/lib/http.c b/lib/http.c
index 36317f56..d66b8482 100644
--- a/lib/http.c
+++ b/lib/http.c
@@ -5,7 +5,7 @@
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
- * Copyright (C) 1998 - 2016, Daniel Stenberg, <daniel@haxx.se>, et al.
+ * Copyright (C) 1998 - 2017, Daniel Stenberg, <daniel@haxx.se>, et al.
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms
@@ -119,6 +119,7 @@ const struct Curl_handler Curl_handler_http = {
ZERO_NULL, /* perform_getsock */
ZERO_NULL, /* disconnect */
ZERO_NULL, /* readwrite */
+ ZERO_NULL, /* connection_check */
PORT_HTTP, /* defport */
CURLPROTO_HTTP, /* protocol */
PROTOPT_CREDSPERREQUEST /* flags */
@@ -143,6 +144,7 @@ const struct Curl_handler Curl_handler_https = {
ZERO_NULL, /* perform_getsock */
ZERO_NULL, /* disconnect */
ZERO_NULL, /* readwrite */
+ ZERO_NULL, /* connection_check */
PORT_HTTPS, /* defport */
CURLPROTO_HTTPS, /* protocol */
PROTOPT_SSL | PROTOPT_CREDSPERREQUEST | PROTOPT_ALPN_NPN /* flags */
@@ -285,6 +287,7 @@ static CURLcode http_output_basic(struct connectdata *conn, bool proxy)
const char *user;
const char *pwd;
CURLcode result;
+ char *out;
if(proxy) {
userp = &conn->allocptr.proxyuserpwd;
@@ -297,26 +300,32 @@ static CURLcode http_output_basic(struct connectdata *conn, bool proxy)
pwd = conn->passwd;
}
- snprintf(data->state.buffer, sizeof(data->state.buffer), "%s:%s", user, pwd);
+ out = aprintf("%s:%s", user, pwd);
+ if(!out)
+ return CURLE_OUT_OF_MEMORY;
- result = Curl_base64_encode(data,
- data->state.buffer, strlen(data->state.buffer),
- &authorization, &size);
+ result = Curl_base64_encode(data, out, strlen(out), &authorization, &size);
if(result)
- return result;
+ goto fail;
- if(!authorization)
- return CURLE_REMOTE_ACCESS_DENIED;
+ if(!authorization) {
+ result = CURLE_REMOTE_ACCESS_DENIED;
+ goto fail;
+ }
free(*userp);
*userp = aprintf("%sAuthorization: Basic %s\r\n",
proxy ? "Proxy-" : "",
authorization);
free(authorization);
- if(!*userp)
- return CURLE_OUT_OF_MEMORY;
+ if(!*userp) {
+ result = CURLE_OUT_OF_MEMORY;
+ goto fail;
+ }
- return CURLE_OK;
+ fail:
+ free(out);
+ return result;
}
/* pickoneauth() selects the most favourable authentication method from the
@@ -412,8 +421,6 @@ static CURLcode http_perhapsrewind(struct connectdata *conn)
case HTTPREQ_POST:
if(data->state.infilesize != -1)
expectsend = data->state.infilesize;
- else if(data->set.postfields)
- expectsend = (curl_off_t)strlen(data->set.postfields);
break;
case HTTPREQ_PUT:
if(data->state.infilesize != -1)
@@ -545,8 +552,8 @@ CURLcode Curl_http_auth_act(struct connectdata *conn)
}
}
if(http_should_fail(conn)) {
- failf (data, "The requested URL returned error: %d",
- data->req.httpcode);
+ failf(data, "The requested URL returned error: %d",
+ data->req.httpcode);
result = CURLE_HTTP_RETURNED_ERROR;
}
@@ -644,10 +651,10 @@ output_auth_headers(struct connectdata *conn,
proxy ? "Proxy" : "Server", auth,
proxy ? (conn->http_proxy.user ? conn->http_proxy.user : "") :
(conn->user ? conn->user : ""));
- authstatus->multi = (!authstatus->done) ? TRUE : FALSE;
+ authstatus->multipass = (!authstatus->done) ? TRUE : FALSE;
}
else
- authstatus->multi = FALSE;
+ authstatus->multipass = FALSE;
return CURLE_OK;
}
@@ -840,9 +847,11 @@ CURLcode Curl_http_input_auth(struct connectdata *conn, bool proxy,
auth += strlen("NTLM");
while(*auth && ISSPACE(*auth))
auth++;
- if(*auth)
- if((conn->challenge_header = strdup(auth)) == NULL)
+ if(*auth) {
+ conn->challenge_header = strdup(auth);
+ if(!conn->challenge_header)
return CURLE_OUT_OF_MEMORY;
+ }
}
}
#endif
@@ -1108,7 +1117,7 @@ CURLcode Curl_add_buffer_send(Curl_send_buffer *in,
buffer is using this size.
*/
- sendsize = (size > CURL_MAX_WRITE_SIZE) ? CURL_MAX_WRITE_SIZE : size;
+ sendsize = CURLMIN(size, CURL_MAX_WRITE_SIZE);
/* OpenSSL is very picky and we must send the SAME buffer pointer to the
library when we attempt to re-send this buffer. Sending the same data
@@ -1194,8 +1203,7 @@ CURLcode Curl_add_buffer_send(Curl_send_buffer *in,
This needs FIXing.
*/
return CURLE_SEND_ERROR;
- else
- Curl_pipeline_leave_write(conn);
+ Curl_pipeline_leave_write(conn);
}
}
Curl_add_buffer_free(in);
@@ -1356,14 +1364,18 @@ CURLcode Curl_http_connect(struct connectdata *conn, bool *done)
if(result)
return result;
+ if(conn->bits.proxy_connect_closed)
+ /* this is not an error, just part of the connection negotiation */
+ return CURLE_OK;
+
if(CONNECT_FIRSTSOCKET_PROXY_SSL())
return CURLE_OK; /* wait for HTTPS proxy SSL initialization to complete */
- if(conn->tunnel_state[FIRSTSOCKET] == TUNNEL_CONNECT)
+ if(!Curl_connect_complete(conn))
/* nothing else to do except wait right now - we're not done here. */
return CURLE_OK;
- if(conn->given->flags & PROTOPT_SSL) {
+ if(conn->given->protocol & CURLPROTO_HTTPS) {
/* perform SSL initialization */
result = https_connecting(conn, done);
if(result)
@@ -1423,7 +1435,10 @@ CURLcode Curl_http_done(struct connectdata *conn,
struct Curl_easy *data = conn->data;
struct HTTP *http = data->req.protop;
- infof(data, "Curl_http_done: called premature == %d\n", premature);
+ /* Clear multipass flag. If authentication isn't done yet, then it will get
+ * a chance to be set back to true when we output the next auth header */
+ data->state.authhost.multipass = FALSE;
+ data->state.authproxy.multipass = FALSE;
Curl_unencode_cleanup(conn);
@@ -1509,6 +1524,20 @@ static bool use_http_1_1plus(const struct Curl_easy *data,
(data->set.httpversion >= CURL_HTTP_VERSION_1_1));
}
+static const char *get_http_string(const struct Curl_easy *data,
+ const struct connectdata *conn)
+{
+#ifdef USE_NGHTTP2
+ if(conn->proto.httpc.h2)
+ return "2";
+#endif
+
+ if(use_http_1_1plus(data, conn))
+ return "1.1";
+
+ return "1.0";
+}
+
/* check and possibly add an Expect: header */
static CURLcode expect100(struct Curl_easy *data,
struct connectdata *conn,
@@ -1666,9 +1695,10 @@ CURLcode Curl_add_timecondition(struct Curl_easy *data,
Curl_send_buffer *req_buffer)
{
const struct tm *tm;
- char *buf = data->state.buffer;
struct tm keeptime;
CURLcode result;
+ char datestr[80];
+ const char *condp;
if(data->set.timecondition == CURL_TIMECOND_NONE)
/* no condition was asked for */
@@ -1681,6 +1711,21 @@ CURLcode Curl_add_timecondition(struct Curl_easy *data,
}
tm = &keeptime;
+ switch(data->set.timecondition) {
+ default:
+ return CURLE_BAD_FUNCTION_ARGUMENT;
+
+ case CURL_TIMECOND_IFMODSINCE:
+ condp = "If-Modified-Since";
+ break;
+ case CURL_TIMECOND_IFUNMODSINCE:
+ condp = "If-Unmodified-Since";
+ break;
+ case CURL_TIMECOND_LASTMOD:
+ condp = "Last-Modified";
+ break;
+ }
+
/* The If-Modified-Since header family should have their times set in
* GMT as RFC2616 defines: "All HTTP date/time stamps MUST be
* represented in Greenwich Mean Time (GMT), without exception. For the
@@ -1689,8 +1734,9 @@ CURLcode Curl_add_timecondition(struct Curl_easy *data,
*/
/* format: "Tue, 15 Nov 1994 12:45:26 GMT" */
- snprintf(buf, BUFSIZE-1,
- "%s, %02d %s %4d %02d:%02d:%02d GMT",
+ snprintf(datestr, sizeof(datestr),
+ "%s: %s, %02d %s %4d %02d:%02d:%02d GMT\r\n",
+ condp,
Curl_wkday[tm->tm_wday?tm->tm_wday-1:6],
tm->tm_mday,
Curl_month[tm->tm_mon],
@@ -1699,22 +1745,7 @@ CURLcode Curl_add_timecondition(struct Curl_easy *data,
tm->tm_min,
tm->tm_sec);
- switch(data->set.timecondition) {
- default:
- break;
- case CURL_TIMECOND_IFMODSINCE:
- result = Curl_add_bufferf(req_buffer,
- "If-Modified-Since: %s\r\n", buf);
- break;
- case CURL_TIMECOND_IFUNMODSINCE:
- result = Curl_add_bufferf(req_buffer,
- "If-Unmodified-Since: %s\r\n", buf);
- break;
- case CURL_TIMECOND_LASTMOD:
- result = Curl_add_bufferf(req_buffer,
- "Last-Modified: %s\r\n", buf);
- break;
- }
+ result = Curl_add_buffer(req_buffer, datestr, strlen(datestr));
return result;
}
@@ -1822,6 +1853,9 @@ CURLcode Curl_http(struct connectdata *conn, bool *done)
case HTTPREQ_PUT:
request = "PUT";
break;
+ case HTTPREQ_OPTIONS:
+ request = "OPTIONS";
+ break;
default: /* this should never happen */
case HTTPREQ_GET:
request = "GET";
@@ -1847,7 +1881,7 @@ CURLcode Curl_http(struct connectdata *conn, bool *done)
if(result)
return result;
- if((data->state.authhost.multi || data->state.authproxy.multi) &&
+ if((data->state.authhost.multipass || data->state.authproxy.multipass) &&
(httpreq != HTTPREQ_GET) &&
(httpreq != HTTPREQ_HEAD)) {
/* Auth is required and we are not authenticated yet. Make a PUT or POST
@@ -2063,7 +2097,7 @@ CURLcode Curl_http(struct connectdata *conn, bool *done)
/* when doing ftp, append ;type=<a|i> if not present */
char *type = strstr(ppath, ";type=");
if(type && type[6] && type[7] == 0) {
- switch (Curl_raw_toupper(type[6])) {
+ switch(Curl_raw_toupper(type[6])) {
case 'A':
case 'D':
case 'I':
@@ -2132,32 +2166,32 @@ CURLcode Curl_http(struct connectdata *conn, bool *done)
}
if(seekerr != CURL_SEEKFUNC_OK) {
+ curl_off_t passed=0;
+
if(seekerr != CURL_SEEKFUNC_CANTSEEK) {
failf(data, "Could not seek stream");
return CURLE_READ_ERROR;
}
/* when seekerr == CURL_SEEKFUNC_CANTSEEK (can't seek to offset) */
- else {
- curl_off_t passed=0;
- do {
- size_t readthisamountnow =
- (data->state.resume_from - passed > CURL_OFF_T_C(BUFSIZE)) ?
- BUFSIZE : curlx_sotouz(data->state.resume_from - passed);
-
- size_t actuallyread =
- data->state.fread_func(data->state.buffer, 1, readthisamountnow,
- data->state.in);
-
- passed += actuallyread;
- if((actuallyread == 0) || (actuallyread > readthisamountnow)) {
- /* this checks for greater-than only to make sure that the
- CURL_READFUNC_ABORT return code still aborts */
- failf(data, "Could only read %" CURL_FORMAT_CURL_OFF_T
- " bytes from the input", passed);
- return CURLE_READ_ERROR;
- }
- } while(passed < data->state.resume_from);
- }
+ do {
+ size_t readthisamountnow =
+ (data->state.resume_from - passed > data->set.buffer_size) ?
+ (size_t)data->set.buffer_size :
+ curlx_sotouz(data->state.resume_from - passed);
+
+ size_t actuallyread =
+ data->state.fread_func(data->state.buffer, 1, readthisamountnow,
+ data->state.in);
+
+ passed += actuallyread;
+ if((actuallyread == 0) || (actuallyread > readthisamountnow)) {
+ /* this checks for greater-than only to make sure that the
+ CURL_READFUNC_ABORT return code still aborts */
+ failf(data, "Could only read %" CURL_FORMAT_CURL_OFF_T
+ " bytes from the input", passed);
+ return CURLE_READ_ERROR;
+ }
+ } while(passed < data->state.resume_from);
}
/* now, decrease the size of the read */
@@ -2223,9 +2257,7 @@ CURLcode Curl_http(struct connectdata *conn, bool *done)
}
}
- /* Use 1.1 unless the user specifically asked for 1.0 or the server only
- supports 1.0 */
- httpstring= use_http_1_1plus(data, conn)?"1.1":"1.0";
+ httpstring = get_http_string(data, conn);
/* initialize a dynamic send-buffer */
req_buffer = Curl_add_buffer_init();
@@ -2239,6 +2271,9 @@ CURLcode Curl_http(struct connectdata *conn, bool *done)
if(result)
return result;
+ if(data->set.str[STRING_TARGET])
+ ppath = data->set.str[STRING_TARGET];
+
/* url */
if(paste_ftp_userpwd)
result = Curl_add_bufferf(req_buffer, "ftp://%s:%s@%s",
@@ -2292,20 +2327,10 @@ CURLcode Curl_http(struct connectdata *conn, bool *done)
te
);
- /* clear userpwd to avoid re-using credentials from re-used connections */
+ /* clear userpwd and proxyuserpwd to avoid re-using old credentials
+ * from re-used connections */
Curl_safefree(conn->allocptr.userpwd);
-
- /*
- * Free proxyuserpwd for Negotiate/NTLM. Cannot reuse as it is associated
- * with the connection and shouldn't be repeated over it either.
- */
- switch (data->state.authproxy.picked) {
- case CURLAUTH_NEGOTIATE:
- case CURLAUTH_NTLM:
- case CURLAUTH_NTLM_WB:
- Curl_safefree(conn->allocptr.proxyuserpwd);
- break;
- }
+ Curl_safefree(conn->allocptr.proxyuserpwd);
if(result)
return result;
@@ -2499,7 +2524,7 @@ CURLcode Curl_http(struct connectdata *conn, bool *done)
postsize = data->state.infilesize;
if((postsize != -1) && !data->req.upload_chunky &&
- !Curl_checkheaders(conn, "Content-Length:")) {
+ (conn->bits.authneg || !Curl_checkheaders(conn, "Content-Length:"))) {
/* only add Content-Length if not uploading chunked */
result = Curl_add_bufferf(req_buffer,
"Content-Length: %" CURL_FORMAT_CURL_OFF_T
@@ -2540,18 +2565,15 @@ CURLcode Curl_http(struct connectdata *conn, bool *done)
if(conn->bits.authneg)
postsize = 0;
- else {
- /* figure out the size of the postfields */
- postsize = (data->state.infilesize != -1)?
- data->state.infilesize:
- (data->set.postfields? (curl_off_t)strlen(data->set.postfields):-1);
- }
+ else
+ /* the size of the post body */
+ postsize = data->state.infilesize;
/* We only set Content-Length and allow a custom Content-Length if
we don't upload data chunked, as RFC2616 forbids us to set both
kinds of headers (Transfer-Encoding: chunked and Content-Length) */
if((postsize != -1) && !data->req.upload_chunky &&
- !Curl_checkheaders(conn, "Content-Length:")) {
+ (conn->bits.authneg || !Curl_checkheaders(conn, "Content-Length:"))) {
/* we allow replacing this header if not during auth negotiation,
although it isn't very wise to actually set your own */
result = Curl_add_bufferf(req_buffer,
@@ -2578,7 +2600,7 @@ CURLcode Curl_http(struct connectdata *conn, bool *done)
data->state.expect100header =
Curl_compareheader(ptr, "Expect:", "100-continue");
}
- else if(postsize > TINY_INITIAL_POST_SIZE || postsize < 0) {
+ else if(postsize > EXPECT_100_THRESHOLD || postsize < 0) {
result = expect100(data, conn, req_buffer);
if(result)
return result;
@@ -2727,6 +2749,7 @@ CURLcode Curl_http(struct connectdata *conn, bool *done)
data->req.upload_done = TRUE;
data->req.keepon &= ~KEEP_SEND; /* we're done writing */
data->req.exp100 = EXP100_SEND_DATA; /* already sent */
+ Curl_expire_done(data, EXPIRE_100_TIMEOUT);
}
}
@@ -2753,7 +2776,7 @@ checkhttpprefix(struct Curl_easy *data,
/* convert from the network encoding using a scratch area */
char *scratch = strdup(s);
if(NULL == scratch) {
- failf (data, "Failed to allocate memory for conversion!");
+ failf(data, "Failed to allocate memory for conversion!");
return FALSE; /* can't return CURLE_OUT_OF_MEMORY so return FALSE */
}
if(CURLE_OK != Curl_convert_from_network(data, scratch, strlen(s)+1)) {
@@ -2791,7 +2814,7 @@ checkrtspprefix(struct Curl_easy *data,
/* convert from the network encoding using a scratch area */
char *scratch = strdup(s);
if(NULL == scratch) {
- failf (data, "Failed to allocate memory for conversion!");
+ failf(data, "Failed to allocate memory for conversion!");
return FALSE; /* can't return CURLE_OUT_OF_MEMORY so return FALSE */
}
if(CURLE_OK != Curl_convert_from_network(data, scratch, strlen(s)+1)) {
@@ -2805,8 +2828,7 @@ checkrtspprefix(struct Curl_easy *data,
#endif /* CURL_DOES_CONVERSIONS */
if(checkprefix("RTSP/", s))
return TRUE;
- else
- return FALSE;
+ return FALSE;
}
#endif /* CURL_DISABLE_RTSP */
@@ -2843,8 +2865,8 @@ static CURLcode header_append(struct Curl_easy *data,
/* The reason to have a max limit for this is to avoid the risk of a bad
server feeding libcurl with a never-ending header that will cause
reallocs infinitely */
- failf (data, "Avoided giant realloc for header (max is %d)!",
- CURL_MAX_HTTP_HEADER);
+ failf(data, "Avoided giant realloc for header (max is %d)!",
+ CURL_MAX_HTTP_HEADER);
return CURLE_OUT_OF_MEMORY;
}
@@ -2852,7 +2874,7 @@ static CURLcode header_append(struct Curl_easy *data,
hbufp_index = k->hbufp - data->state.headerbuff;
newbuff = realloc(data->state.headerbuff, newsize);
if(!newbuff) {
- failf (data, "Failed to alloc memory for big header!");
+ failf(data, "Failed to alloc memory for big header!");
return CURLE_OUT_OF_MEMORY;
}
data->state.headersize=newsize;
@@ -3024,6 +3046,7 @@ CURLcode Curl_http_readwrite_headers(struct Curl_easy *data,
if(k->exp100 > EXP100_SEND_DATA) {
k->exp100 = EXP100_SEND_DATA;
k->keepon |= KEEP_SEND;
+ Curl_expire_done(data, EXPIRE_100_TIMEOUT);
}
break;
case 101:
@@ -3093,8 +3116,8 @@ CURLcode Curl_http_readwrite_headers(struct Curl_easy *data,
* up and return an error.
*/
if(http_should_fail(conn)) {
- failf (data, "The requested URL returned error: %d",
- k->httpcode);
+ failf(data, "The requested URL returned error: %d",
+ k->httpcode);
return CURLE_HTTP_RETURNED_ERROR;
}
@@ -3150,6 +3173,7 @@ CURLcode Curl_http_readwrite_headers(struct Curl_easy *data,
* request body has been sent we stop sending and mark the
* connection for closure after we've read the entire response.
*/
+ Curl_expire_done(data, EXPIRE_100_TIMEOUT);
if(!k->upload_done) {
if(data->set.http_keep_sending_on_error) {
infof(data, "HTTP error before end of send, keep sending\n");
@@ -3227,9 +3251,17 @@ CURLcode Curl_http_readwrite_headers(struct Curl_easy *data,
k->maxdownload = k->size;
}
- /* If max download size is *zero* (nothing) we already
- have nothing and can safely return ok now! */
- if(0 == k->maxdownload)
+ /* If max download size is *zero* (nothing) we already have
+ nothing and can safely return ok now! But for HTTP/2, we'd
+ like to call http2_handle_stream_close to properly close a
+ stream. In order to do this, we keep reading until we
+ close the stream. */
+ if(0 == k->maxdownload
+#if defined(USE_NGHTTP2)
+ && !((conn->handler->protocol & PROTO_FAMILY_HTTP) &&
+ conn->httpversion == 20)
+#endif
+ )
*stop_reading = TRUE;
if(*stop_reading) {
@@ -3286,23 +3318,26 @@ CURLcode Curl_http_readwrite_headers(struct Curl_easy *data,
/*
* https://tools.ietf.org/html/rfc7230#section-3.1.2
*
- * The reponse code is always a three-digit number in HTTP as the spec
+ * The response code is always a three-digit number in HTTP as the spec
* says. We try to allow any number here, but we cannot make
* guarantees on future behaviors since it isn't within the protocol.
*/
+ char separator;
nc = sscanf(HEADER1,
- " HTTP/%d.%d %d",
+ " HTTP/%1d.%1d%c%3d",
&httpversion_major,
&conn->httpversion,
+ &separator,
&k->httpcode);
if(nc == 1 && httpversion_major == 2 &&
1 == sscanf(HEADER1, " HTTP/2 %d", &k->httpcode)) {
conn->httpversion = 0;
- nc = 3;
+ nc = 4;
+ separator = ' ';
}
- if(nc==3) {
+ if((nc==4) && (' ' == separator)) {
conn->httpversion += 10 * httpversion_major;
if(k->upgr101 == UPGR101_RECEIVED) {
@@ -3311,7 +3346,7 @@ CURLcode Curl_http_readwrite_headers(struct Curl_easy *data,
infof(data, "Lying server, not serving HTTP/2\n");
}
}
- else {
+ else if(!nc) {
/* this is the real world, not a Nirvana
NCSA 1.5.x returns this crap when asked for HTTP/1.1
*/
@@ -3329,6 +3364,10 @@ CURLcode Curl_http_readwrite_headers(struct Curl_easy *data,
}
}
}
+ else {
+ failf(data, "Unsupported HTTP version in response\n");
+ return CURLE_UNSUPPORTED_PROTOCOL;
+ }
}
else if(conn->handler->protocol & CURLPROTO_RTSP) {
nc = sscanf(HEADER1,