aboutsummaryrefslogtreecommitdiffstats
path: root/lib/http.c
diff options
context:
space:
mode:
Diffstat (limited to 'lib/http.c')
-rw-r--r--lib/http.c339
1 files changed, 164 insertions, 175 deletions
diff --git a/lib/http.c b/lib/http.c
index 378d8f77..36317f56 100644
--- a/lib/http.c
+++ b/lib/http.c
@@ -53,7 +53,6 @@
#include "progress.h"
#include "curl_base64.h"
#include "cookie.h"
-#include "strequal.h"
#include "vauth/vauth.h"
#include "vtls/vtls.h"
#include "http_digest.h"
@@ -68,7 +67,7 @@
#include "parsedate.h" /* for the week day and month names */
#include "strtoofft.h"
#include "multiif.h"
-#include "rawstr.h"
+#include "strcase.h"
#include "content_encoding.h"
#include "http_proxy.h"
#include "warnless.h"
@@ -77,6 +76,7 @@
#include "pipeline.h"
#include "http2.h"
#include "connect.h"
+#include "strdup.h"
/* The last 3 #include files should be in this order */
#include "curl_printf.h"
@@ -182,7 +182,7 @@ char *Curl_checkheaders(const struct connectdata *conn,
struct Curl_easy *data = conn->data;
for(head = data->set.headers;head; head=head->next) {
- if(Curl_raw_nequal(head->data, thisheader, thislen))
+ if(strncasecompare(head->data, thisheader, thislen))
return head->data;
}
@@ -208,7 +208,7 @@ char *Curl_checkProxyheaders(const struct connectdata *conn,
for(head = (conn->bits.proxy && data->set.sep_headers) ?
data->set.proxyheaders : data->set.headers;
head; head=head->next) {
- if(Curl_raw_nequal(head->data, thisheader, thislen))
+ if(strncasecompare(head->data, thisheader, thislen))
return head->data;
}
@@ -288,8 +288,8 @@ static CURLcode http_output_basic(struct connectdata *conn, bool proxy)
if(proxy) {
userp = &conn->allocptr.proxyuserpwd;
- user = conn->proxyuser;
- pwd = conn->proxypasswd;
+ user = conn->http_proxy.user;
+ pwd = conn->http_proxy.passwd;
}
else {
userp = &conn->allocptr.userpwd;
@@ -462,7 +462,7 @@ static CURLcode http_perhapsrewind(struct connectdata *conn)
#endif
/* This is not NTLM or many bytes left to send: close */
- connclose(conn, "Mid-auth HTTP and much data left to send");
+ streamclose(conn, "Mid-auth HTTP and much data left to send");
data->req.size = 0; /* don't download any more than 0 bytes */
/* There still is data left to send, but this connection is marked for
@@ -642,7 +642,7 @@ output_auth_headers(struct connectdata *conn,
if(auth) {
infof(data, "%s auth using %s with user '%s'\n",
proxy ? "Proxy" : "Server", auth,
- proxy ? (conn->proxyuser ? conn->proxyuser : "") :
+ proxy ? (conn->http_proxy.user ? conn->http_proxy.user : "") :
(conn->user ? conn->user : ""));
authstatus->multi = (!authstatus->done) ? TRUE : FALSE;
}
@@ -726,7 +726,7 @@ Curl_http_output_auth(struct connectdata *conn,
conn->bits.netrc ||
!data->state.first_host ||
data->set.http_disable_hostname_check_before_authentication ||
- Curl_raw_equal(data->state.first_host, conn->host.name)) {
+ strcasecompare(data->state.first_host, conn->host.name)) {
result = output_auth_headers(conn, authhost, request, path, FALSE);
}
else
@@ -784,23 +784,27 @@ CURLcode Curl_http_input_auth(struct connectdata *conn, bool proxy,
while(*auth) {
#ifdef USE_SPNEGO
if(checkprefix("Negotiate", auth)) {
- *availp |= CURLAUTH_NEGOTIATE;
- authp->avail |= CURLAUTH_NEGOTIATE;
-
- if(authp->picked == CURLAUTH_NEGOTIATE) {
- if(negdata->state == GSS_AUTHSENT || negdata->state == GSS_AUTHNONE) {
- CURLcode result = Curl_input_negotiate(conn, proxy, auth);
- if(!result) {
- DEBUGASSERT(!data->req.newurl);
- data->req.newurl = strdup(data->change.url);
- if(!data->req.newurl)
- return CURLE_OUT_OF_MEMORY;
- data->state.authproblem = FALSE;
- /* we received a GSS auth token and we dealt with it fine */
- negdata->state = GSS_AUTHRECV;
+ if((authp->avail & CURLAUTH_NEGOTIATE) ||
+ Curl_auth_is_spnego_supported()) {
+ *availp |= CURLAUTH_NEGOTIATE;
+ authp->avail |= CURLAUTH_NEGOTIATE;
+
+ if(authp->picked == CURLAUTH_NEGOTIATE) {
+ if(negdata->state == GSS_AUTHSENT ||
+ negdata->state == GSS_AUTHNONE) {
+ CURLcode result = Curl_input_negotiate(conn, proxy, auth);
+ if(!result) {
+ DEBUGASSERT(!data->req.newurl);
+ data->req.newurl = strdup(data->change.url);
+ if(!data->req.newurl)
+ return CURLE_OUT_OF_MEMORY;
+ data->state.authproblem = FALSE;
+ /* we received a GSS auth token and we dealt with it fine */
+ negdata->state = GSS_AUTHRECV;
+ }
+ else
+ data->state.authproblem = TRUE;
}
- else
- data->state.authproblem = TRUE;
}
}
}
@@ -809,39 +813,44 @@ CURLcode Curl_http_input_auth(struct connectdata *conn, bool proxy,
#ifdef USE_NTLM
/* NTLM support requires the SSL crypto libs */
if(checkprefix("NTLM", auth)) {
- *availp |= CURLAUTH_NTLM;
- authp->avail |= CURLAUTH_NTLM;
- if(authp->picked == CURLAUTH_NTLM ||
- authp->picked == CURLAUTH_NTLM_WB) {
- /* NTLM authentication is picked and activated */
- CURLcode result = Curl_input_ntlm(conn, proxy, auth);
- if(!result) {
- data->state.authproblem = FALSE;
+ if((authp->avail & CURLAUTH_NTLM) ||
+ (authp->avail & CURLAUTH_NTLM_WB) ||
+ Curl_auth_is_ntlm_supported()) {
+ *availp |= CURLAUTH_NTLM;
+ authp->avail |= CURLAUTH_NTLM;
+
+ if(authp->picked == CURLAUTH_NTLM ||
+ authp->picked == CURLAUTH_NTLM_WB) {
+ /* NTLM authentication is picked and activated */
+ CURLcode result = Curl_input_ntlm(conn, proxy, auth);
+ if(!result) {
+ data->state.authproblem = FALSE;
#ifdef NTLM_WB_ENABLED
- if(authp->picked == CURLAUTH_NTLM_WB) {
- *availp &= ~CURLAUTH_NTLM;
- authp->avail &= ~CURLAUTH_NTLM;
- *availp |= CURLAUTH_NTLM_WB;
- authp->avail |= CURLAUTH_NTLM_WB;
-
- /* Get the challenge-message which will be passed to
- * ntlm_auth for generating the type 3 message later */
- while(*auth && ISSPACE(*auth))
- auth++;
- if(checkprefix("NTLM", auth)) {
- auth += strlen("NTLM");
+ if(authp->picked == CURLAUTH_NTLM_WB) {
+ *availp &= ~CURLAUTH_NTLM;
+ authp->avail &= ~CURLAUTH_NTLM;
+ *availp |= CURLAUTH_NTLM_WB;
+ authp->avail |= CURLAUTH_NTLM_WB;
+
+ /* Get the challenge-message which will be passed to
+ * ntlm_auth for generating the type 3 message later */
while(*auth && ISSPACE(*auth))
auth++;
- if(*auth)
- if((conn->challenge_header = strdup(auth)) == NULL)
- return CURLE_OUT_OF_MEMORY;
+ if(checkprefix("NTLM", auth)) {
+ auth += strlen("NTLM");
+ while(*auth && ISSPACE(*auth))
+ auth++;
+ if(*auth)
+ if((conn->challenge_header = strdup(auth)) == NULL)
+ return CURLE_OUT_OF_MEMORY;
+ }
}
- }
#endif
- }
- else {
- infof(data, "Authentication problem. Ignoring this.\n");
- data->state.authproblem = TRUE;
+ }
+ else {
+ infof(data, "Authentication problem. Ignoring this.\n");
+ data->state.authproblem = TRUE;
+ }
}
}
}
@@ -849,18 +858,18 @@ CURLcode Curl_http_input_auth(struct connectdata *conn, bool proxy,
#endif
#ifndef CURL_DISABLE_CRYPTO_AUTH
if(checkprefix("Digest", auth)) {
- if((authp->avail & CURLAUTH_DIGEST) != 0) {
+ if((authp->avail & CURLAUTH_DIGEST) != 0)
infof(data, "Ignoring duplicate digest auth header.\n");
- }
- else {
+ else if(Curl_auth_is_digest_supported()) {
CURLcode result;
+
*availp |= CURLAUTH_DIGEST;
authp->avail |= CURLAUTH_DIGEST;
/* We call this function on input Digest headers even if Digest
* authentication isn't activated yet, as we need to store the
- * incoming data from this header in case we are gonna use
- * Digest. */
+ * incoming data from this header in case we are going to use
+ * Digest */
result = Curl_input_digest(conn, proxy, auth);
if(result) {
infof(data, "Authentication problem. Ignoring this.\n");
@@ -1090,7 +1099,9 @@ CURLcode Curl_add_buffer_send(Curl_send_buffer *in,
return result;
}
- if((conn->handler->flags & PROTOPT_SSL) && conn->httpversion != 20) {
+ if((conn->handler->flags & PROTOPT_SSL ||
+ conn->http_proxy.proxytype == CURLPROXY_HTTPS)
+ && conn->httpversion != 20) {
/* We never send more than CURL_MAX_WRITE_SIZE bytes in one single chunk
when we speak HTTPS, as if only a fraction of it is sent now, this data
needs to fit into the normal read-callback buffer later on and that
@@ -1247,14 +1258,13 @@ CURLcode Curl_add_buffer(Curl_send_buffer *in, const void *inptr, size_t size)
if(in->buffer)
/* we have a buffer, enlarge the existing one */
- new_rb = realloc(in->buffer, new_size);
+ new_rb = Curl_saferealloc(in->buffer, new_size);
else
/* create a new buffer */
new_rb = malloc(new_size);
if(!new_rb) {
/* If we failed, we cleanup the whole buffer and return error */
- Curl_safefree(in->buffer);
free(in);
return CURLE_OUT_OF_MEMORY;
}
@@ -1296,7 +1306,7 @@ Curl_compareheader(const char *headerline, /* line to check */
const char *start;
const char *end;
- if(!Curl_raw_nequal(headerline, header, hlen))
+ if(!strncasecompare(headerline, header, hlen))
return FALSE; /* doesn't start with header */
/* pass the header */
@@ -1322,7 +1332,7 @@ Curl_compareheader(const char *headerline, /* line to check */
/* find the content string in the rest of the line */
for(;len>=clen;len--, start++) {
- if(Curl_raw_nequal(start, content, clen))
+ if(strncasecompare(start, content, clen))
return TRUE; /* match! */
}
@@ -1342,10 +1352,13 @@ CURLcode Curl_http_connect(struct connectdata *conn, bool *done)
connkeep(conn, "HTTP default");
/* the CONNECT procedure might not have been completed */
- result = Curl_proxy_connect(conn);
+ result = Curl_proxy_connect(conn, FIRSTSOCKET);
if(result)
return result;
+ if(CONNECT_FIRSTSOCKET_PROXY_SSL())
+ return CURLE_OK; /* wait for HTTPS proxy SSL initialization to complete */
+
if(conn->tunnel_state[FIRSTSOCKET] == TUNNEL_CONNECT)
/* nothing else to do except wait right now - we're not done here. */
return CURLE_OK;
@@ -1388,50 +1401,16 @@ static CURLcode https_connecting(struct connectdata *conn, bool *done)
return result;
}
-#endif
-#if defined(USE_OPENSSL) || defined(USE_GNUTLS) || defined(USE_SCHANNEL) || \
- defined(USE_DARWINSSL) || defined(USE_POLARSSL) || defined(USE_NSS) || \
- defined(USE_MBEDTLS)
-/* This function is for OpenSSL, GnuTLS, darwinssl, schannel and polarssl only.
- It should be made to query the generic SSL layer instead. */
static int https_getsock(struct connectdata *conn,
curl_socket_t *socks,
int numsocks)
{
- if(conn->handler->flags & PROTOPT_SSL) {
- struct ssl_connect_data *connssl = &conn->ssl[FIRSTSOCKET];
-
- if(!numsocks)
- return GETSOCK_BLANK;
-
- if(connssl->connecting_state == ssl_connect_2_writing) {
- /* write mode */
- socks[0] = conn->sock[FIRSTSOCKET];
- return GETSOCK_WRITESOCK(0);
- }
- else if(connssl->connecting_state == ssl_connect_2_reading) {
- /* read mode */
- socks[0] = conn->sock[FIRSTSOCKET];
- return GETSOCK_READSOCK(0);
- }
- }
-
- return CURLE_OK;
-}
-#else
-#ifdef USE_SSL
-static int https_getsock(struct connectdata *conn,
- curl_socket_t *socks,
- int numsocks)
-{
- (void)conn;
- (void)socks;
- (void)numsocks;
+ if(conn->handler->flags & PROTOPT_SSL)
+ return Curl_ssl_getsock(conn, socks, numsocks);
return GETSOCK_BLANK;
}
#endif /* USE_SSL */
-#endif /* USE_OPENSSL || USE_GNUTLS || USE_SCHANNEL */
/*
* Curl_http_done() gets called after a single HTTP request has been
@@ -1443,9 +1422,8 @@ CURLcode Curl_http_done(struct connectdata *conn,
{
struct Curl_easy *data = conn->data;
struct HTTP *http = data->req.protop;
-#ifdef USE_NGHTTP2
- struct http_conn *httpc = &conn->proto.httpc;
-#endif
+
+ infof(data, "Curl_http_done: called premature == %d\n", premature);
Curl_unencode_cleanup(conn);
@@ -1458,7 +1436,7 @@ CURLcode Curl_http_done(struct connectdata *conn,
* Do not close CONNECT_ONLY connections. */
if((data->req.httpcode != 401) && (data->req.httpcode != 407) &&
!data->set.connect_only)
- connclose(conn, "Negotiate transfer completed");
+ streamclose(conn, "Negotiate transfer completed");
Curl_cleanup_negotiate(data);
}
#endif
@@ -1475,27 +1453,7 @@ CURLcode Curl_http_done(struct connectdata *conn,
http->send_buffer = NULL; /* clear the pointer */
}
-#ifdef USE_NGHTTP2
- if(http->header_recvbuf) {
- DEBUGF(infof(data, "free header_recvbuf!!\n"));
- Curl_add_buffer_free(http->header_recvbuf);
- http->header_recvbuf = NULL; /* clear the pointer */
- Curl_add_buffer_free(http->trailer_recvbuf);
- http->trailer_recvbuf = NULL; /* clear the pointer */
- if(http->push_headers) {
- /* if they weren't used and then freed before */
- for(; http->push_headers_used > 0; --http->push_headers_used) {
- free(http->push_headers[http->push_headers_used - 1]);
- }
- free(http->push_headers);
- http->push_headers = NULL;
- }
- }
- if(http->stream_id) {
- nghttp2_session_set_stream_user_data(httpc->h2, http->stream_id, 0);
- http->stream_id = 0;
- }
-#endif
+ Curl_http2_done(conn, premature);
if(HTTPREQ_POST_FORM == data->set.httpreq) {
data->req.bytecount = http->readbytecount + http->writebytecount;
@@ -1660,6 +1618,10 @@ CURLcode Curl_add_custom_headers(struct connectdata *conn,
Connection: */
checkprefix("Connection", headers->data))
;
+ else if((conn->httpversion == 20) &&
+ checkprefix("Transfer-Encoding:", headers->data))
+ /* HTTP/2 doesn't support chunked requests */
+ ;
else {
CURLcode result = Curl_add_bufferf(req_buffer, "%s\r\n",
headers->data);
@@ -1946,47 +1908,42 @@ CURLcode Curl_http(struct connectdata *conn, bool *done)
}
#endif
- if(conn->httpversion == 20)
- /* In HTTP2 forbids Transfer-Encoding: chunked */
- ptr = NULL;
+ ptr = Curl_checkheaders(conn, "Transfer-Encoding:");
+ if(ptr) {
+ /* Some kind of TE is requested, check if 'chunked' is chosen */
+ data->req.upload_chunky =
+ Curl_compareheader(ptr, "Transfer-Encoding:", "chunked");
+ }
else {
- ptr = Curl_checkheaders(conn, "Transfer-Encoding:");
- if(ptr) {
- /* Some kind of TE is requested, check if 'chunked' is chosen */
- data->req.upload_chunky =
- Curl_compareheader(ptr, "Transfer-Encoding:", "chunked");
- }
- else {
- if((conn->handler->protocol&PROTO_FAMILY_HTTP) &&
- data->set.upload &&
- (data->state.infilesize == -1)) {
- if(conn->bits.authneg)
- /* don't enable chunked during auth neg */
- ;
- else if(use_http_1_1plus(data, conn)) {
- /* HTTP, upload, unknown file size and not HTTP 1.0 */
- data->req.upload_chunky = TRUE;
- }
- else {
- failf(data, "Chunky upload is not supported by HTTP 1.0");
- return CURLE_UPLOAD_FAILED;
- }
+ if((conn->handler->protocol&PROTO_FAMILY_HTTP) &&
+ data->set.upload &&
+ (data->state.infilesize == -1)) {
+ if(conn->bits.authneg)
+ /* don't enable chunked during auth neg */
+ ;
+ else if(use_http_1_1plus(data, conn)) {
+ /* HTTP, upload, unknown file size and not HTTP 1.0 */
+ data->req.upload_chunky = TRUE;
}
else {
- /* else, no chunky upload */
- data->req.upload_chunky = FALSE;
+ failf(data, "Chunky upload is not supported by HTTP 1.0");
+ return CURLE_UPLOAD_FAILED;
}
-
- if(data->req.upload_chunky)
- te = "Transfer-Encoding: chunked\r\n";
}
+ else {
+ /* else, no chunky upload */
+ data->req.upload_chunky = FALSE;
+ }
+
+ if(data->req.upload_chunky)
+ te = "Transfer-Encoding: chunked\r\n";
}
Curl_safefree(conn->allocptr.host);
ptr = Curl_checkheaders(conn, "Host:");
if(ptr && (!data->state.this_is_a_follow ||
- Curl_raw_equal(data->state.first_host, conn->host.name))) {
+ strcasecompare(data->state.first_host, conn->host.name))) {
#if !defined(CURL_DISABLE_COOKIES)
/* If we have a given custom Host: header, we extract the host name in
order to possibly use it for cookie reasons later on. We only allow the
@@ -2305,6 +2262,7 @@ CURLcode Curl_http(struct connectdata *conn, bool *done)
"%s" /* TE: */
"%s" /* accept-encoding */
"%s" /* referer */
+ "%s" /* Proxy-Connection */
"%s",/* transfer-encoding */
ftp_typecode,
@@ -2327,6 +2285,10 @@ CURLcode Curl_http(struct connectdata *conn, bool *done)
conn->allocptr.accept_encoding:"",
(data->change.referer && conn->allocptr.ref)?
conn->allocptr.ref:"" /* Referer: <data> */,
+ (conn->bits.httpproxy &&
+ !conn->bits.tunnel_proxy &&
+ !Curl_checkProxyheaders(conn, "Proxy-Connection:"))?
+ "Proxy-Connection: Keep-Alive\r\n":"",
te
);
@@ -2392,7 +2354,7 @@ CURLcode Curl_http(struct connectdata *conn, bool *done)
}
co = co->next; /* next cookie please */
}
- Curl_cookie_freelist(store, FALSE); /* free the cookie list */
+ Curl_cookie_freelist(store);
}
if(addcookies && !result) {
if(!count)
@@ -2768,6 +2730,11 @@ CURLcode Curl_http(struct connectdata *conn, bool *done)
}
}
+ if((conn->httpversion == 20) && data->req.upload_chunky)
+ /* upload_chunky was set above to set up the request in a chunky fashion,
+ but is disabled here again to avoid that the chunked encoded version is
+ actually used when sending the request body over h2 */
+ data->req.upload_chunky = FALSE;
return result;
}
@@ -3040,19 +3007,19 @@ CURLcode Curl_http_readwrite_headers(struct Curl_easy *data,
#endif /* CURL_DOES_CONVERSIONS */
if(100 <= k->httpcode && 199 >= k->httpcode) {
- /*
- * We have made a HTTP PUT or POST and this is 1.1-lingo
- * that tells us that the server is OK with this and ready
- * to receive the data.
- * However, we'll get more headers now so we must get
- * back into the header-parsing state!
- */
- k->header = TRUE;
- k->headerline = 0; /* restart the header line counter */
-
/* "A user agent MAY ignore unexpected 1xx status responses." */
switch(k->httpcode) {
case 100:
+ /*
+ * We have made a HTTP PUT or POST and this is 1.1-lingo
+ * that tells us that the server is OK with this and ready
+ * to receive the data.
+ * However, we'll get more headers now so we must get
+ * back into the header-parsing state!
+ */
+ k->header = TRUE;
+ k->headerline = 0; /* restart the header line counter */
+
/* if we did wait for this do enable write now! */
if(k->exp100 > EXP100_SEND_DATA) {
k->exp100 = EXP100_SEND_DATA;
@@ -3062,9 +3029,14 @@ CURLcode Curl_http_readwrite_headers(struct Curl_easy *data,
case 101:
/* Switching Protocols */
if(k->upgr101 == UPGR101_REQUESTED) {
+ /* Switching to HTTP/2 */
infof(data, "Received 101\n");
k->upgr101 = UPGR101_RECEIVED;
+ /* we'll get more headers (HTTP/2 response) */
+ k->header = TRUE;
+ k->headerline = 0; /* restart the header line counter */
+
/* switch to http2 now. The bytes after response headers
are also processed here, otherwise they are lost. */
result = Curl_http2_switched(conn, k->str, *nread);
@@ -3072,8 +3044,16 @@ CURLcode Curl_http_readwrite_headers(struct Curl_easy *data,
return result;
*nread = 0;
}
+ else {
+ /* Switching to another protocol (e.g. WebSocket) */
+ k->header = FALSE; /* no more header to parse! */
+ }
break;
default:
+ /* the status code 1xx indicates a provisional response, so
+ we'll get another set of headers */
+ k->header = TRUE;
+ k->headerline = 0; /* restart the header line counter */
break;
}
}
@@ -3091,7 +3071,7 @@ CURLcode Curl_http_readwrite_headers(struct Curl_easy *data,
signal the end of the document. */
infof(data, "no chunk, no close, no size. Assume close to "
"signal end\n");
- connclose(conn, "HTTP: No end-of-message indicator");
+ streamclose(conn, "HTTP: No end-of-message indicator");
}
}
@@ -3171,12 +3151,21 @@ CURLcode Curl_http_readwrite_headers(struct Curl_easy *data,
* connection for closure after we've read the entire response.
*/
if(!k->upload_done) {
- infof(data, "HTTP error before end of send, stop sending\n");
- connclose(conn, "Stop sending data before everything sent");
- k->upload_done = TRUE;
- k->keepon &= ~KEEP_SEND; /* don't send */
- if(data->state.expect100header)
- k->exp100 = EXP100_FAILED;
+ if(data->set.http_keep_sending_on_error) {
+ infof(data, "HTTP error before end of send, keep sending\n");
+ if(k->exp100 > EXP100_SEND_DATA) {
+ k->exp100 = EXP100_SEND_DATA;
+ k->keepon |= KEEP_SEND;
+ }
+ }
+ else {
+ infof(data, "HTTP error before end of send, stop sending\n");
+ streamclose(conn, "Stop sending data before everything sent");
+ k->upload_done = TRUE;
+ k->keepon &= ~KEEP_SEND; /* don't send */
+ if(data->state.expect100header)
+ k->exp100 = EXP100_FAILED;
+ }
}
break;
@@ -3476,7 +3465,7 @@ CURLcode Curl_http_readwrite_headers(struct Curl_easy *data,
/* Negative Content-Length is really odd, and we know it
happens for example when older Apache servers send large
files */
- connclose(conn, "negative content-length");
+ streamclose(conn, "negative content-length");
infof(data, "Negative content-length: %" CURL_FORMAT_CURL_OFF_T
", closing after transfer\n", contentlength);
}
@@ -3549,7 +3538,7 @@ CURLcode Curl_http_readwrite_headers(struct Curl_easy *data,
* the connection will close when this request has been
* served.
*/
- connclose(conn, "Connection: close used");
+ streamclose(conn, "Connection: close used");
}
else if(checkprefix("Transfer-Encoding:", k->p)) {
/* One or more encodings. We check for chunked and/or a compression