aboutsummaryrefslogtreecommitdiffstats
path: root/gcc-4.8.1/libgo/go/net/http
diff options
context:
space:
mode:
Diffstat (limited to 'gcc-4.8.1/libgo/go/net/http')
-rw-r--r--gcc-4.8.1/libgo/go/net/http/cgi/child.go201
-rw-r--r--gcc-4.8.1/libgo/go/net/http/cgi/child_test.go109
-rw-r--r--gcc-4.8.1/libgo/go/net/http/cgi/host.go350
-rw-r--r--gcc-4.8.1/libgo/go/net/http/cgi/host_test.go467
-rw-r--r--gcc-4.8.1/libgo/go/net/http/cgi/matryoshka_test.go93
-rw-r--r--gcc-4.8.1/libgo/go/net/http/cgi/testdata/test.cgi90
-rw-r--r--gcc-4.8.1/libgo/go/net/http/chunked.go183
-rw-r--r--gcc-4.8.1/libgo/go/net/http/chunked_test.go93
-rw-r--r--gcc-4.8.1/libgo/go/net/http/client.go391
-rw-r--r--gcc-4.8.1/libgo/go/net/http/client_test.go678
-rw-r--r--gcc-4.8.1/libgo/go/net/http/cookie.go262
-rw-r--r--gcc-4.8.1/libgo/go/net/http/cookie_test.go228
-rw-r--r--gcc-4.8.1/libgo/go/net/http/doc.go80
-rw-r--r--gcc-4.8.1/libgo/go/net/http/example_test.go56
-rw-r--r--gcc-4.8.1/libgo/go/net/http/export_test.go50
-rw-r--r--gcc-4.8.1/libgo/go/net/http/fcgi/child.go271
-rw-r--r--gcc-4.8.1/libgo/go/net/http/fcgi/fcgi.go274
-rw-r--r--gcc-4.8.1/libgo/go/net/http/fcgi/fcgi_test.go150
-rw-r--r--gcc-4.8.1/libgo/go/net/http/filetransport.go123
-rw-r--r--gcc-4.8.1/libgo/go/net/http/filetransport_test.go65
-rw-r--r--gcc-4.8.1/libgo/go/net/http/fs.go525
-rw-r--r--gcc-4.8.1/libgo/go/net/http/fs_test.go749
-rw-r--r--gcc-4.8.1/libgo/go/net/http/header.go189
-rw-r--r--gcc-4.8.1/libgo/go/net/http/header_test.go212
-rw-r--r--gcc-4.8.1/libgo/go/net/http/httptest/recorder.go72
-rw-r--r--gcc-4.8.1/libgo/go/net/http/httptest/recorder_test.go90
-rw-r--r--gcc-4.8.1/libgo/go/net/http/httptest/server.go216
-rw-r--r--gcc-4.8.1/libgo/go/net/http/httptest/server_test.go29
-rw-r--r--gcc-4.8.1/libgo/go/net/http/httputil/chunked.go185
-rw-r--r--gcc-4.8.1/libgo/go/net/http/httputil/chunked_test.go95
-rw-r--r--gcc-4.8.1/libgo/go/net/http/httputil/dump.go229
-rw-r--r--gcc-4.8.1/libgo/go/net/http/httputil/dump_test.go152
-rw-r--r--gcc-4.8.1/libgo/go/net/http/httputil/persist.go422
-rw-r--r--gcc-4.8.1/libgo/go/net/http/httputil/reverseproxy.go188
-rw-r--r--gcc-4.8.1/libgo/go/net/http/httputil/reverseproxy_test.go193
-rw-r--r--gcc-4.8.1/libgo/go/net/http/jar.go25
-rw-r--r--gcc-4.8.1/libgo/go/net/http/lex.go96
-rw-r--r--gcc-4.8.1/libgo/go/net/http/lex_test.go31
-rw-r--r--gcc-4.8.1/libgo/go/net/http/pprof/pprof.go205
-rw-r--r--gcc-4.8.1/libgo/go/net/http/proxy_test.go78
-rw-r--r--gcc-4.8.1/libgo/go/net/http/range_test.go79
-rw-r--r--gcc-4.8.1/libgo/go/net/http/readrequest_test.go331
-rw-r--r--gcc-4.8.1/libgo/go/net/http/request.go814
-rw-r--r--gcc-4.8.1/libgo/go/net/http/request_test.go403
-rw-r--r--gcc-4.8.1/libgo/go/net/http/requestwrite_test.go438
-rw-r--r--gcc-4.8.1/libgo/go/net/http/response.go233
-rw-r--r--gcc-4.8.1/libgo/go/net/http/response_test.go526
-rw-r--r--gcc-4.8.1/libgo/go/net/http/responsewrite_test.go109
-rw-r--r--gcc-4.8.1/libgo/go/net/http/serve_test.go1526
-rw-r--r--gcc-4.8.1/libgo/go/net/http/server.go1537
-rw-r--r--gcc-4.8.1/libgo/go/net/http/server_test.go95
-rw-r--r--gcc-4.8.1/libgo/go/net/http/sniff.go214
-rw-r--r--gcc-4.8.1/libgo/go/net/http/sniff_test.go138
-rw-r--r--gcc-4.8.1/libgo/go/net/http/status.go108
-rw-r--r--gcc-4.8.1/libgo/go/net/http/testdata/file1
-rw-r--r--gcc-4.8.1/libgo/go/net/http/testdata/index.html1
-rw-r--r--gcc-4.8.1/libgo/go/net/http/testdata/style.css1
-rw-r--r--gcc-4.8.1/libgo/go/net/http/transfer.go656
-rw-r--r--gcc-4.8.1/libgo/go/net/http/transfer_test.go37
-rw-r--r--gcc-4.8.1/libgo/go/net/http/transport.go906
-rw-r--r--gcc-4.8.1/libgo/go/net/http/transport_test.go1214
-rw-r--r--gcc-4.8.1/libgo/go/net/http/triv.go141
62 files changed, 0 insertions, 17703 deletions
diff --git a/gcc-4.8.1/libgo/go/net/http/cgi/child.go b/gcc-4.8.1/libgo/go/net/http/cgi/child.go
deleted file mode 100644
index 100b8b777..000000000
--- a/gcc-4.8.1/libgo/go/net/http/cgi/child.go
+++ /dev/null
@@ -1,201 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This file implements CGI from the perspective of a child
-// process.
-
-package cgi
-
-import (
- "bufio"
- "crypto/tls"
- "errors"
- "fmt"
- "io"
- "io/ioutil"
- "net"
- "net/http"
- "net/url"
- "os"
- "strconv"
- "strings"
-)
-
-// Request returns the HTTP request as represented in the current
-// environment. This assumes the current program is being run
-// by a web server in a CGI environment.
-// The returned Request's Body is populated, if applicable.
-func Request() (*http.Request, error) {
- r, err := RequestFromMap(envMap(os.Environ()))
- if err != nil {
- return nil, err
- }
- if r.ContentLength > 0 {
- r.Body = ioutil.NopCloser(io.LimitReader(os.Stdin, r.ContentLength))
- }
- return r, nil
-}
-
-func envMap(env []string) map[string]string {
- m := make(map[string]string)
- for _, kv := range env {
- if idx := strings.Index(kv, "="); idx != -1 {
- m[kv[:idx]] = kv[idx+1:]
- }
- }
- return m
-}
-
-// RequestFromMap creates an http.Request from CGI variables.
-// The returned Request's Body field is not populated.
-func RequestFromMap(params map[string]string) (*http.Request, error) {
- r := new(http.Request)
- r.Method = params["REQUEST_METHOD"]
- if r.Method == "" {
- return nil, errors.New("cgi: no REQUEST_METHOD in environment")
- }
-
- r.Proto = params["SERVER_PROTOCOL"]
- var ok bool
- r.ProtoMajor, r.ProtoMinor, ok = http.ParseHTTPVersion(r.Proto)
- if !ok {
- return nil, errors.New("cgi: invalid SERVER_PROTOCOL version")
- }
-
- r.Close = true
- r.Trailer = http.Header{}
- r.Header = http.Header{}
-
- r.Host = params["HTTP_HOST"]
-
- if lenstr := params["CONTENT_LENGTH"]; lenstr != "" {
- clen, err := strconv.ParseInt(lenstr, 10, 64)
- if err != nil {
- return nil, errors.New("cgi: bad CONTENT_LENGTH in environment: " + lenstr)
- }
- r.ContentLength = clen
- }
-
- if ct := params["CONTENT_TYPE"]; ct != "" {
- r.Header.Set("Content-Type", ct)
- }
-
- // Copy "HTTP_FOO_BAR" variables to "Foo-Bar" Headers
- for k, v := range params {
- if !strings.HasPrefix(k, "HTTP_") || k == "HTTP_HOST" {
- continue
- }
- r.Header.Add(strings.Replace(k[5:], "_", "-", -1), v)
- }
-
- // TODO: cookies. parsing them isn't exported, though.
-
- uriStr := params["REQUEST_URI"]
- if uriStr == "" {
- // Fallback to SCRIPT_NAME, PATH_INFO and QUERY_STRING.
- uriStr = params["SCRIPT_NAME"] + params["PATH_INFO"]
- s := params["QUERY_STRING"]
- if s != "" {
- uriStr += "?" + s
- }
- }
- if r.Host != "" {
- // Hostname is provided, so we can reasonably construct a URL,
- // even if we have to assume 'http' for the scheme.
- rawurl := "http://" + r.Host + uriStr
- url, err := url.Parse(rawurl)
- if err != nil {
- return nil, errors.New("cgi: failed to parse host and REQUEST_URI into a URL: " + rawurl)
- }
- r.URL = url
- }
- // Fallback logic if we don't have a Host header or the URL
- // failed to parse
- if r.URL == nil {
- url, err := url.Parse(uriStr)
- if err != nil {
- return nil, errors.New("cgi: failed to parse REQUEST_URI into a URL: " + uriStr)
- }
- r.URL = url
- }
-
- // There's apparently a de-facto standard for this.
- // http://docstore.mik.ua/orelly/linux/cgi/ch03_02.htm#ch03-35636
- if s := params["HTTPS"]; s == "on" || s == "ON" || s == "1" {
- r.TLS = &tls.ConnectionState{HandshakeComplete: true}
- }
-
- // Request.RemoteAddr has its port set by Go's standard http
- // server, so we do here too. We don't have one, though, so we
- // use a dummy one.
- r.RemoteAddr = net.JoinHostPort(params["REMOTE_ADDR"], "0")
-
- return r, nil
-}
-
-// Serve executes the provided Handler on the currently active CGI
-// request, if any. If there's no current CGI environment
-// an error is returned. The provided handler may be nil to use
-// http.DefaultServeMux.
-func Serve(handler http.Handler) error {
- req, err := Request()
- if err != nil {
- return err
- }
- if handler == nil {
- handler = http.DefaultServeMux
- }
- rw := &response{
- req: req,
- header: make(http.Header),
- bufw: bufio.NewWriter(os.Stdout),
- }
- handler.ServeHTTP(rw, req)
- rw.Write(nil) // make sure a response is sent
- if err = rw.bufw.Flush(); err != nil {
- return err
- }
- return nil
-}
-
-type response struct {
- req *http.Request
- header http.Header
- bufw *bufio.Writer
- headerSent bool
-}
-
-func (r *response) Flush() {
- r.bufw.Flush()
-}
-
-func (r *response) Header() http.Header {
- return r.header
-}
-
-func (r *response) Write(p []byte) (n int, err error) {
- if !r.headerSent {
- r.WriteHeader(http.StatusOK)
- }
- return r.bufw.Write(p)
-}
-
-func (r *response) WriteHeader(code int) {
- if r.headerSent {
- // Note: explicitly using Stderr, as Stdout is our HTTP output.
- fmt.Fprintf(os.Stderr, "CGI attempted to write header twice on request for %s", r.req.URL)
- return
- }
- r.headerSent = true
- fmt.Fprintf(r.bufw, "Status: %d %s\r\n", code, http.StatusText(code))
-
- // Set a default Content-Type
- if _, hasType := r.header["Content-Type"]; !hasType {
- r.header.Add("Content-Type", "text/html; charset=utf-8")
- }
-
- r.header.Write(r.bufw)
- r.bufw.WriteString("\r\n")
- r.bufw.Flush()
-}
diff --git a/gcc-4.8.1/libgo/go/net/http/cgi/child_test.go b/gcc-4.8.1/libgo/go/net/http/cgi/child_test.go
deleted file mode 100644
index 74e068014..000000000
--- a/gcc-4.8.1/libgo/go/net/http/cgi/child_test.go
+++ /dev/null
@@ -1,109 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Tests for CGI (the child process perspective)
-
-package cgi
-
-import (
- "testing"
-)
-
-func TestRequest(t *testing.T) {
- env := map[string]string{
- "SERVER_PROTOCOL": "HTTP/1.1",
- "REQUEST_METHOD": "GET",
- "HTTP_HOST": "example.com",
- "HTTP_REFERER": "elsewhere",
- "HTTP_USER_AGENT": "goclient",
- "HTTP_FOO_BAR": "baz",
- "REQUEST_URI": "/path?a=b",
- "CONTENT_LENGTH": "123",
- "CONTENT_TYPE": "text/xml",
- "HTTPS": "1",
- "REMOTE_ADDR": "5.6.7.8",
- }
- req, err := RequestFromMap(env)
- if err != nil {
- t.Fatalf("RequestFromMap: %v", err)
- }
- if g, e := req.UserAgent(), "goclient"; e != g {
- t.Errorf("expected UserAgent %q; got %q", e, g)
- }
- if g, e := req.Method, "GET"; e != g {
- t.Errorf("expected Method %q; got %q", e, g)
- }
- if g, e := req.Header.Get("Content-Type"), "text/xml"; e != g {
- t.Errorf("expected Content-Type %q; got %q", e, g)
- }
- if g, e := req.ContentLength, int64(123); e != g {
- t.Errorf("expected ContentLength %d; got %d", e, g)
- }
- if g, e := req.Referer(), "elsewhere"; e != g {
- t.Errorf("expected Referer %q; got %q", e, g)
- }
- if req.Header == nil {
- t.Fatalf("unexpected nil Header")
- }
- if g, e := req.Header.Get("Foo-Bar"), "baz"; e != g {
- t.Errorf("expected Foo-Bar %q; got %q", e, g)
- }
- if g, e := req.URL.String(), "http://example.com/path?a=b"; e != g {
- t.Errorf("expected URL %q; got %q", e, g)
- }
- if g, e := req.FormValue("a"), "b"; e != g {
- t.Errorf("expected FormValue(a) %q; got %q", e, g)
- }
- if req.Trailer == nil {
- t.Errorf("unexpected nil Trailer")
- }
- if req.TLS == nil {
- t.Errorf("expected non-nil TLS")
- }
- if e, g := "5.6.7.8:0", req.RemoteAddr; e != g {
- t.Errorf("RemoteAddr: got %q; want %q", g, e)
- }
-}
-
-func TestRequestWithoutHost(t *testing.T) {
- env := map[string]string{
- "SERVER_PROTOCOL": "HTTP/1.1",
- "HTTP_HOST": "",
- "REQUEST_METHOD": "GET",
- "REQUEST_URI": "/path?a=b",
- "CONTENT_LENGTH": "123",
- }
- req, err := RequestFromMap(env)
- if err != nil {
- t.Fatalf("RequestFromMap: %v", err)
- }
- if req.URL == nil {
- t.Fatalf("unexpected nil URL")
- }
- if g, e := req.URL.String(), "/path?a=b"; e != g {
- t.Errorf("URL = %q; want %q", g, e)
- }
-}
-
-func TestRequestWithoutRequestURI(t *testing.T) {
- env := map[string]string{
- "SERVER_PROTOCOL": "HTTP/1.1",
- "HTTP_HOST": "example.com",
- "REQUEST_METHOD": "GET",
- "SCRIPT_NAME": "/dir/scriptname",
- "PATH_INFO": "/p1/p2",
- "QUERY_STRING": "a=1&b=2",
- "CONTENT_LENGTH": "123",
- }
- req, err := RequestFromMap(env)
- if err != nil {
- t.Fatalf("RequestFromMap: %v", err)
- }
- if req.URL == nil {
- t.Fatalf("unexpected nil URL")
- }
- if g, e := req.URL.String(), "http://example.com/dir/scriptname/p1/p2?a=1&b=2"; e != g {
- t.Errorf("URL = %q; want %q", g, e)
- }
-}
diff --git a/gcc-4.8.1/libgo/go/net/http/cgi/host.go b/gcc-4.8.1/libgo/go/net/http/cgi/host.go
deleted file mode 100644
index d27cc4dc9..000000000
--- a/gcc-4.8.1/libgo/go/net/http/cgi/host.go
+++ /dev/null
@@ -1,350 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This file implements the host side of CGI (being the webserver
-// parent process).
-
-// Package cgi implements CGI (Common Gateway Interface) as specified
-// in RFC 3875.
-//
-// Note that using CGI means starting a new process to handle each
-// request, which is typically less efficient than using a
-// long-running server. This package is intended primarily for
-// compatibility with existing systems.
-package cgi
-
-import (
- "bufio"
- "fmt"
- "io"
- "log"
- "net/http"
- "os"
- "os/exec"
- "path/filepath"
- "regexp"
- "runtime"
- "strconv"
- "strings"
-)
-
-var trailingPort = regexp.MustCompile(`:([0-9]+)$`)
-
-var osDefaultInheritEnv = map[string][]string{
- "darwin": {"DYLD_LIBRARY_PATH"},
- "freebsd": {"LD_LIBRARY_PATH"},
- "hpux": {"LD_LIBRARY_PATH", "SHLIB_PATH"},
- "irix": {"LD_LIBRARY_PATH", "LD_LIBRARYN32_PATH", "LD_LIBRARY64_PATH"},
- "linux": {"LD_LIBRARY_PATH"},
- "openbsd": {"LD_LIBRARY_PATH"},
- "solaris": {"LD_LIBRARY_PATH", "LD_LIBRARY_PATH_32", "LD_LIBRARY_PATH_64"},
- "windows": {"SystemRoot", "COMSPEC", "PATHEXT", "WINDIR"},
-}
-
-// Handler runs an executable in a subprocess with a CGI environment.
-type Handler struct {
- Path string // path to the CGI executable
- Root string // root URI prefix of handler or empty for "/"
-
- // Dir specifies the CGI executable's working directory.
- // If Dir is empty, the base directory of Path is used.
- // If Path has no base directory, the current working
- // directory is used.
- Dir string
-
- Env []string // extra environment variables to set, if any, as "key=value"
- InheritEnv []string // environment variables to inherit from host, as "key"
- Logger *log.Logger // optional log for errors or nil to use log.Print
- Args []string // optional arguments to pass to child process
-
- // PathLocationHandler specifies the root http Handler that
- // should handle internal redirects when the CGI process
- // returns a Location header value starting with a "/", as
- // specified in RFC 3875 ยง 6.3.2. This will likely be
- // http.DefaultServeMux.
- //
- // If nil, a CGI response with a local URI path is instead sent
- // back to the client and not redirected internally.
- PathLocationHandler http.Handler
-}
-
-// removeLeadingDuplicates remove leading duplicate in environments.
-// It's possible to override environment like following.
-// cgi.Handler{
-// ...
-// Env: []string{"SCRIPT_FILENAME=foo.php"},
-// }
-func removeLeadingDuplicates(env []string) (ret []string) {
- n := len(env)
- for i := 0; i < n; i++ {
- e := env[i]
- s := strings.SplitN(e, "=", 2)[0]
- found := false
- for j := i + 1; j < n; j++ {
- if s == strings.SplitN(env[j], "=", 2)[0] {
- found = true
- break
- }
- }
- if !found {
- ret = append(ret, e)
- }
- }
- return
-}
-
-func (h *Handler) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
- root := h.Root
- if root == "" {
- root = "/"
- }
-
- if len(req.TransferEncoding) > 0 && req.TransferEncoding[0] == "chunked" {
- rw.WriteHeader(http.StatusBadRequest)
- rw.Write([]byte("Chunked request bodies are not supported by CGI."))
- return
- }
-
- pathInfo := req.URL.Path
- if root != "/" && strings.HasPrefix(pathInfo, root) {
- pathInfo = pathInfo[len(root):]
- }
-
- port := "80"
- if matches := trailingPort.FindStringSubmatch(req.Host); len(matches) != 0 {
- port = matches[1]
- }
-
- env := []string{
- "SERVER_SOFTWARE=go",
- "SERVER_NAME=" + req.Host,
- "SERVER_PROTOCOL=HTTP/1.1",
- "HTTP_HOST=" + req.Host,
- "GATEWAY_INTERFACE=CGI/1.1",
- "REQUEST_METHOD=" + req.Method,
- "QUERY_STRING=" + req.URL.RawQuery,
- "REQUEST_URI=" + req.URL.RequestURI(),
- "PATH_INFO=" + pathInfo,
- "SCRIPT_NAME=" + root,
- "SCRIPT_FILENAME=" + h.Path,
- "REMOTE_ADDR=" + req.RemoteAddr,
- "REMOTE_HOST=" + req.RemoteAddr,
- "SERVER_PORT=" + port,
- }
-
- if req.TLS != nil {
- env = append(env, "HTTPS=on")
- }
-
- for k, v := range req.Header {
- k = strings.Map(upperCaseAndUnderscore, k)
- joinStr := ", "
- if k == "COOKIE" {
- joinStr = "; "
- }
- env = append(env, "HTTP_"+k+"="+strings.Join(v, joinStr))
- }
-
- if req.ContentLength > 0 {
- env = append(env, fmt.Sprintf("CONTENT_LENGTH=%d", req.ContentLength))
- }
- if ctype := req.Header.Get("Content-Type"); ctype != "" {
- env = append(env, "CONTENT_TYPE="+ctype)
- }
-
- if h.Env != nil {
- env = append(env, h.Env...)
- }
-
- envPath := os.Getenv("PATH")
- if envPath == "" {
- envPath = "/bin:/usr/bin:/usr/ucb:/usr/bsd:/usr/local/bin"
- }
- env = append(env, "PATH="+envPath)
-
- for _, e := range h.InheritEnv {
- if v := os.Getenv(e); v != "" {
- env = append(env, e+"="+v)
- }
- }
-
- for _, e := range osDefaultInheritEnv[runtime.GOOS] {
- if v := os.Getenv(e); v != "" {
- env = append(env, e+"="+v)
- }
- }
-
- env = removeLeadingDuplicates(env)
-
- var cwd, path string
- if h.Dir != "" {
- path = h.Path
- cwd = h.Dir
- } else {
- cwd, path = filepath.Split(h.Path)
- }
- if cwd == "" {
- cwd = "."
- }
-
- internalError := func(err error) {
- rw.WriteHeader(http.StatusInternalServerError)
- h.printf("CGI error: %v", err)
- }
-
- cmd := &exec.Cmd{
- Path: path,
- Args: append([]string{h.Path}, h.Args...),
- Dir: cwd,
- Env: env,
- Stderr: os.Stderr, // for now
- }
- if req.ContentLength != 0 {
- cmd.Stdin = req.Body
- }
- stdoutRead, err := cmd.StdoutPipe()
- if err != nil {
- internalError(err)
- return
- }
-
- err = cmd.Start()
- if err != nil {
- internalError(err)
- return
- }
- defer cmd.Wait()
- defer stdoutRead.Close()
-
- linebody := bufio.NewReaderSize(stdoutRead, 1024)
- headers := make(http.Header)
- statusCode := 0
- for {
- line, isPrefix, err := linebody.ReadLine()
- if isPrefix {
- rw.WriteHeader(http.StatusInternalServerError)
- h.printf("cgi: long header line from subprocess.")
- return
- }
- if err == io.EOF {
- break
- }
- if err != nil {
- rw.WriteHeader(http.StatusInternalServerError)
- h.printf("cgi: error reading headers: %v", err)
- return
- }
- if len(line) == 0 {
- break
- }
- parts := strings.SplitN(string(line), ":", 2)
- if len(parts) < 2 {
- h.printf("cgi: bogus header line: %s", string(line))
- continue
- }
- header, val := parts[0], parts[1]
- header = strings.TrimSpace(header)
- val = strings.TrimSpace(val)
- switch {
- case header == "Status":
- if len(val) < 3 {
- h.printf("cgi: bogus status (short): %q", val)
- return
- }
- code, err := strconv.Atoi(val[0:3])
- if err != nil {
- h.printf("cgi: bogus status: %q", val)
- h.printf("cgi: line was %q", line)
- return
- }
- statusCode = code
- default:
- headers.Add(header, val)
- }
- }
-
- if loc := headers.Get("Location"); loc != "" {
- if strings.HasPrefix(loc, "/") && h.PathLocationHandler != nil {
- h.handleInternalRedirect(rw, req, loc)
- return
- }
- if statusCode == 0 {
- statusCode = http.StatusFound
- }
- }
-
- if statusCode == 0 {
- statusCode = http.StatusOK
- }
-
- // Copy headers to rw's headers, after we've decided not to
- // go into handleInternalRedirect, which won't want its rw
- // headers to have been touched.
- for k, vv := range headers {
- for _, v := range vv {
- rw.Header().Add(k, v)
- }
- }
-
- rw.WriteHeader(statusCode)
-
- _, err = io.Copy(rw, linebody)
- if err != nil {
- h.printf("cgi: copy error: %v", err)
- }
-}
-
-func (h *Handler) printf(format string, v ...interface{}) {
- if h.Logger != nil {
- h.Logger.Printf(format, v...)
- } else {
- log.Printf(format, v...)
- }
-}
-
-func (h *Handler) handleInternalRedirect(rw http.ResponseWriter, req *http.Request, path string) {
- url, err := req.URL.Parse(path)
- if err != nil {
- rw.WriteHeader(http.StatusInternalServerError)
- h.printf("cgi: error resolving local URI path %q: %v", path, err)
- return
- }
- // TODO: RFC 3875 isn't clear if only GET is supported, but it
- // suggests so: "Note that any message-body attached to the
- // request (such as for a POST request) may not be available
- // to the resource that is the target of the redirect." We
- // should do some tests against Apache to see how it handles
- // POST, HEAD, etc. Does the internal redirect get the same
- // method or just GET? What about incoming headers?
- // (e.g. Cookies) Which headers, if any, are copied into the
- // second request?
- newReq := &http.Request{
- Method: "GET",
- URL: url,
- Proto: "HTTP/1.1",
- ProtoMajor: 1,
- ProtoMinor: 1,
- Header: make(http.Header),
- Host: url.Host,
- RemoteAddr: req.RemoteAddr,
- TLS: req.TLS,
- }
- h.PathLocationHandler.ServeHTTP(rw, newReq)
-}
-
-func upperCaseAndUnderscore(r rune) rune {
- switch {
- case r >= 'a' && r <= 'z':
- return r - ('a' - 'A')
- case r == '-':
- return '_'
- case r == '=':
- // Maybe not part of the CGI 'spec' but would mess up
- // the environment in any case, as Go represents the
- // environment as a slice of "key=value" strings.
- return '_'
- }
- // TODO: other transformations in spec or practice?
- return r
-}
diff --git a/gcc-4.8.1/libgo/go/net/http/cgi/host_test.go b/gcc-4.8.1/libgo/go/net/http/cgi/host_test.go
deleted file mode 100644
index cb6f1df1f..000000000
--- a/gcc-4.8.1/libgo/go/net/http/cgi/host_test.go
+++ /dev/null
@@ -1,467 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Tests for package cgi
-
-package cgi
-
-import (
- "bufio"
- "fmt"
- "io"
- "net"
- "net/http"
- "net/http/httptest"
- "os"
- "os/exec"
- "path/filepath"
- "runtime"
- "strconv"
- "strings"
- "syscall"
- "testing"
- "time"
-)
-
-func newRequest(httpreq string) *http.Request {
- buf := bufio.NewReader(strings.NewReader(httpreq))
- req, err := http.ReadRequest(buf)
- if err != nil {
- panic("cgi: bogus http request in test: " + httpreq)
- }
- req.RemoteAddr = "1.2.3.4"
- return req
-}
-
-func runCgiTest(t *testing.T, h *Handler, httpreq string, expectedMap map[string]string) *httptest.ResponseRecorder {
- rw := httptest.NewRecorder()
- req := newRequest(httpreq)
- h.ServeHTTP(rw, req)
-
- // Make a map to hold the test map that the CGI returns.
- m := make(map[string]string)
- m["_body"] = rw.Body.String()
- linesRead := 0
-readlines:
- for {
- line, err := rw.Body.ReadString('\n')
- switch {
- case err == io.EOF:
- break readlines
- case err != nil:
- t.Fatalf("unexpected error reading from CGI: %v", err)
- }
- linesRead++
- trimmedLine := strings.TrimRight(line, "\r\n")
- split := strings.SplitN(trimmedLine, "=", 2)
- if len(split) != 2 {
- t.Fatalf("Unexpected %d parts from invalid line number %v: %q; existing map=%v",
- len(split), linesRead, line, m)
- }
- m[split[0]] = split[1]
- }
-
- for key, expected := range expectedMap {
- got := m[key]
- if key == "cwd" {
- // For Windows. golang.org/issue/4645.
- fi1, _ := os.Stat(got)
- fi2, _ := os.Stat(expected)
- if os.SameFile(fi1, fi2) {
- got = expected
- }
- }
- if got != expected {
- t.Errorf("for key %q got %q; expected %q", key, got, expected)
- }
- }
- return rw
-}
-
-var cgiTested, cgiWorks bool
-
-func check(t *testing.T) {
- if !cgiTested {
- cgiTested = true
- cgiWorks = exec.Command("./testdata/test.cgi").Run() == nil
- }
- if !cgiWorks {
- // No Perl on Windows, needed by test.cgi
- // TODO: make the child process be Go, not Perl.
- t.Skip("Skipping test: test.cgi failed.")
- }
-}
-
-func TestCGIBasicGet(t *testing.T) {
- check(t)
- h := &Handler{
- Path: "testdata/test.cgi",
- Root: "/test.cgi",
- }
- expectedMap := map[string]string{
- "test": "Hello CGI",
- "param-a": "b",
- "param-foo": "bar",
- "env-GATEWAY_INTERFACE": "CGI/1.1",
- "env-HTTP_HOST": "example.com",
- "env-PATH_INFO": "",
- "env-QUERY_STRING": "foo=bar&a=b",
- "env-REMOTE_ADDR": "1.2.3.4",
- "env-REMOTE_HOST": "1.2.3.4",
- "env-REQUEST_METHOD": "GET",
- "env-REQUEST_URI": "/test.cgi?foo=bar&a=b",
- "env-SCRIPT_FILENAME": "testdata/test.cgi",
- "env-SCRIPT_NAME": "/test.cgi",
- "env-SERVER_NAME": "example.com",
- "env-SERVER_PORT": "80",
- "env-SERVER_SOFTWARE": "go",
- }
- replay := runCgiTest(t, h, "GET /test.cgi?foo=bar&a=b HTTP/1.0\nHost: example.com\n\n", expectedMap)
-
- if expected, got := "text/html", replay.Header().Get("Content-Type"); got != expected {
- t.Errorf("got a Content-Type of %q; expected %q", got, expected)
- }
- if expected, got := "X-Test-Value", replay.Header().Get("X-Test-Header"); got != expected {
- t.Errorf("got a X-Test-Header of %q; expected %q", got, expected)
- }
-}
-
-func TestCGIBasicGetAbsPath(t *testing.T) {
- check(t)
- pwd, err := os.Getwd()
- if err != nil {
- t.Fatalf("getwd error: %v", err)
- }
- h := &Handler{
- Path: pwd + "/testdata/test.cgi",
- Root: "/test.cgi",
- }
- expectedMap := map[string]string{
- "env-REQUEST_URI": "/test.cgi?foo=bar&a=b",
- "env-SCRIPT_FILENAME": pwd + "/testdata/test.cgi",
- "env-SCRIPT_NAME": "/test.cgi",
- }
- runCgiTest(t, h, "GET /test.cgi?foo=bar&a=b HTTP/1.0\nHost: example.com\n\n", expectedMap)
-}
-
-func TestPathInfo(t *testing.T) {
- check(t)
- h := &Handler{
- Path: "testdata/test.cgi",
- Root: "/test.cgi",
- }
- expectedMap := map[string]string{
- "param-a": "b",
- "env-PATH_INFO": "/extrapath",
- "env-QUERY_STRING": "a=b",
- "env-REQUEST_URI": "/test.cgi/extrapath?a=b",
- "env-SCRIPT_FILENAME": "testdata/test.cgi",
- "env-SCRIPT_NAME": "/test.cgi",
- }
- runCgiTest(t, h, "GET /test.cgi/extrapath?a=b HTTP/1.0\nHost: example.com\n\n", expectedMap)
-}
-
-func TestPathInfoDirRoot(t *testing.T) {
- check(t)
- h := &Handler{
- Path: "testdata/test.cgi",
- Root: "/myscript/",
- }
- expectedMap := map[string]string{
- "env-PATH_INFO": "bar",
- "env-QUERY_STRING": "a=b",
- "env-REQUEST_URI": "/myscript/bar?a=b",
- "env-SCRIPT_FILENAME": "testdata/test.cgi",
- "env-SCRIPT_NAME": "/myscript/",
- }
- runCgiTest(t, h, "GET /myscript/bar?a=b HTTP/1.0\nHost: example.com\n\n", expectedMap)
-}
-
-func TestDupHeaders(t *testing.T) {
- check(t)
- h := &Handler{
- Path: "testdata/test.cgi",
- }
- expectedMap := map[string]string{
- "env-REQUEST_URI": "/myscript/bar?a=b",
- "env-SCRIPT_FILENAME": "testdata/test.cgi",
- "env-HTTP_COOKIE": "nom=NOM; yum=YUM",
- "env-HTTP_X_FOO": "val1, val2",
- }
- runCgiTest(t, h, "GET /myscript/bar?a=b HTTP/1.0\n"+
- "Cookie: nom=NOM\n"+
- "Cookie: yum=YUM\n"+
- "X-Foo: val1\n"+
- "X-Foo: val2\n"+
- "Host: example.com\n\n",
- expectedMap)
-}
-
-func TestPathInfoNoRoot(t *testing.T) {
- check(t)
- h := &Handler{
- Path: "testdata/test.cgi",
- Root: "",
- }
- expectedMap := map[string]string{
- "env-PATH_INFO": "/bar",
- "env-QUERY_STRING": "a=b",
- "env-REQUEST_URI": "/bar?a=b",
- "env-SCRIPT_FILENAME": "testdata/test.cgi",
- "env-SCRIPT_NAME": "/",
- }
- runCgiTest(t, h, "GET /bar?a=b HTTP/1.0\nHost: example.com\n\n", expectedMap)
-}
-
-func TestCGIBasicPost(t *testing.T) {
- check(t)
- postReq := `POST /test.cgi?a=b HTTP/1.0
-Host: example.com
-Content-Type: application/x-www-form-urlencoded
-Content-Length: 15
-
-postfoo=postbar`
- h := &Handler{
- Path: "testdata/test.cgi",
- Root: "/test.cgi",
- }
- expectedMap := map[string]string{
- "test": "Hello CGI",
- "param-postfoo": "postbar",
- "env-REQUEST_METHOD": "POST",
- "env-CONTENT_LENGTH": "15",
- "env-REQUEST_URI": "/test.cgi?a=b",
- }
- runCgiTest(t, h, postReq, expectedMap)
-}
-
-func chunk(s string) string {
- return fmt.Sprintf("%x\r\n%s\r\n", len(s), s)
-}
-
-// The CGI spec doesn't allow chunked requests.
-func TestCGIPostChunked(t *testing.T) {
- check(t)
- postReq := `POST /test.cgi?a=b HTTP/1.1
-Host: example.com
-Content-Type: application/x-www-form-urlencoded
-Transfer-Encoding: chunked
-
-` + chunk("postfoo") + chunk("=") + chunk("postbar") + chunk("")
-
- h := &Handler{
- Path: "testdata/test.cgi",
- Root: "/test.cgi",
- }
- expectedMap := map[string]string{}
- resp := runCgiTest(t, h, postReq, expectedMap)
- if got, expected := resp.Code, http.StatusBadRequest; got != expected {
- t.Fatalf("Expected %v response code from chunked request body; got %d",
- expected, got)
- }
-}
-
-func TestRedirect(t *testing.T) {
- check(t)
- h := &Handler{
- Path: "testdata/test.cgi",
- Root: "/test.cgi",
- }
- rec := runCgiTest(t, h, "GET /test.cgi?loc=http://foo.com/ HTTP/1.0\nHost: example.com\n\n", nil)
- if e, g := 302, rec.Code; e != g {
- t.Errorf("expected status code %d; got %d", e, g)
- }
- if e, g := "http://foo.com/", rec.Header().Get("Location"); e != g {
- t.Errorf("expected Location header of %q; got %q", e, g)
- }
-}
-
-func TestInternalRedirect(t *testing.T) {
- check(t)
- baseHandler := http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {
- fmt.Fprintf(rw, "basepath=%s\n", req.URL.Path)
- fmt.Fprintf(rw, "remoteaddr=%s\n", req.RemoteAddr)
- })
- h := &Handler{
- Path: "testdata/test.cgi",
- Root: "/test.cgi",
- PathLocationHandler: baseHandler,
- }
- expectedMap := map[string]string{
- "basepath": "/foo",
- "remoteaddr": "1.2.3.4",
- }
- runCgiTest(t, h, "GET /test.cgi?loc=/foo HTTP/1.0\nHost: example.com\n\n", expectedMap)
-}
-
-// TestCopyError tests that we kill the process if there's an error copying
-// its output. (for example, from the client having gone away)
-func TestCopyError(t *testing.T) {
- check(t)
- if runtime.GOOS == "windows" {
- t.Skipf("skipping test on %q", runtime.GOOS)
- }
- h := &Handler{
- Path: "testdata/test.cgi",
- Root: "/test.cgi",
- }
- ts := httptest.NewServer(h)
- defer ts.Close()
-
- conn, err := net.Dial("tcp", ts.Listener.Addr().String())
- if err != nil {
- t.Fatal(err)
- }
- req, _ := http.NewRequest("GET", "http://example.com/test.cgi?bigresponse=1", nil)
- err = req.Write(conn)
- if err != nil {
- t.Fatalf("Write: %v", err)
- }
-
- res, err := http.ReadResponse(bufio.NewReader(conn), req)
- if err != nil {
- t.Fatalf("ReadResponse: %v", err)
- }
-
- pidstr := res.Header.Get("X-CGI-Pid")
- if pidstr == "" {
- t.Fatalf("expected an X-CGI-Pid header in response")
- }
- pid, err := strconv.Atoi(pidstr)
- if err != nil {
- t.Fatalf("invalid X-CGI-Pid value")
- }
-
- var buf [5000]byte
- n, err := io.ReadFull(res.Body, buf[:])
- if err != nil {
- t.Fatalf("ReadFull: %d bytes, %v", n, err)
- }
-
- childRunning := func() bool {
- p, err := os.FindProcess(pid)
- if err != nil {
- return false
- }
- return p.Signal(syscall.Signal(0)) == nil
- }
-
- if !childRunning() {
- t.Fatalf("pre-conn.Close, expected child to be running")
- }
- conn.Close()
-
- tries := 0
- for tries < 25 && childRunning() {
- time.Sleep(50 * time.Millisecond * time.Duration(tries))
- tries++
- }
- if childRunning() {
- t.Fatalf("post-conn.Close, expected child to be gone")
- }
-}
-
-func TestDirUnix(t *testing.T) {
- check(t)
- if runtime.GOOS == "windows" {
- t.Skipf("skipping test on %q", runtime.GOOS)
- }
- cwd, _ := os.Getwd()
- h := &Handler{
- Path: "testdata/test.cgi",
- Root: "/test.cgi",
- Dir: cwd,
- }
- expectedMap := map[string]string{
- "cwd": cwd,
- }
- runCgiTest(t, h, "GET /test.cgi HTTP/1.0\nHost: example.com\n\n", expectedMap)
-
- cwd, _ = os.Getwd()
- cwd = filepath.Join(cwd, "testdata")
- h = &Handler{
- Path: "testdata/test.cgi",
- Root: "/test.cgi",
- }
- abswd, _ := filepath.EvalSymlinks(cwd)
- expectedMap = map[string]string{
- "cwd": abswd,
- }
- runCgiTest(t, h, "GET /test.cgi HTTP/1.0\nHost: example.com\n\n", expectedMap)
-}
-
-func TestDirWindows(t *testing.T) {
- if runtime.GOOS != "windows" {
- t.Skip("Skipping windows specific test.")
- }
-
- cgifile, _ := filepath.Abs("testdata/test.cgi")
-
- var perl string
- var err error
- perl, err = exec.LookPath("perl")
- if err != nil {
- t.Skip("Skipping test: perl not found.")
- }
- perl, _ = filepath.Abs(perl)
-
- cwd, _ := os.Getwd()
- h := &Handler{
- Path: perl,
- Root: "/test.cgi",
- Dir: cwd,
- Args: []string{cgifile},
- Env: []string{"SCRIPT_FILENAME=" + cgifile},
- }
- expectedMap := map[string]string{
- "cwd": cwd,
- }
- runCgiTest(t, h, "GET /test.cgi HTTP/1.0\nHost: example.com\n\n", expectedMap)
-
- // If not specify Dir on windows, working directory should be
- // base directory of perl.
- cwd, _ = filepath.Split(perl)
- if cwd != "" && cwd[len(cwd)-1] == filepath.Separator {
- cwd = cwd[:len(cwd)-1]
- }
- h = &Handler{
- Path: perl,
- Root: "/test.cgi",
- Args: []string{cgifile},
- Env: []string{"SCRIPT_FILENAME=" + cgifile},
- }
- expectedMap = map[string]string{
- "cwd": cwd,
- }
- runCgiTest(t, h, "GET /test.cgi HTTP/1.0\nHost: example.com\n\n", expectedMap)
-}
-
-func TestEnvOverride(t *testing.T) {
- cgifile, _ := filepath.Abs("testdata/test.cgi")
-
- var perl string
- var err error
- perl, err = exec.LookPath("perl")
- if err != nil {
- t.Skipf("Skipping test: perl not found.")
- }
- perl, _ = filepath.Abs(perl)
-
- cwd, _ := os.Getwd()
- h := &Handler{
- Path: perl,
- Root: "/test.cgi",
- Dir: cwd,
- Args: []string{cgifile},
- Env: []string{
- "SCRIPT_FILENAME=" + cgifile,
- "REQUEST_URI=/foo/bar"},
- }
- expectedMap := map[string]string{
- "cwd": cwd,
- "env-SCRIPT_FILENAME": cgifile,
- "env-REQUEST_URI": "/foo/bar",
- }
- runCgiTest(t, h, "GET /test.cgi HTTP/1.0\nHost: example.com\n\n", expectedMap)
-}
diff --git a/gcc-4.8.1/libgo/go/net/http/cgi/matryoshka_test.go b/gcc-4.8.1/libgo/go/net/http/cgi/matryoshka_test.go
deleted file mode 100644
index e1a78c8f6..000000000
--- a/gcc-4.8.1/libgo/go/net/http/cgi/matryoshka_test.go
+++ /dev/null
@@ -1,93 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Tests a Go CGI program running under a Go CGI host process.
-// Further, the two programs are the same binary, just checking
-// their environment to figure out what mode to run in.
-
-package cgi
-
-import (
- "fmt"
- "net/http"
- "os"
- "testing"
-)
-
-// This test is a CGI host (testing host.go) that runs its own binary
-// as a child process testing the other half of CGI (child.go).
-func TestHostingOurselves(t *testing.T) {
- h := &Handler{
- Path: os.Args[0],
- Root: "/test.go",
- Args: []string{"-test.run=TestBeChildCGIProcess"},
- }
- expectedMap := map[string]string{
- "test": "Hello CGI-in-CGI",
- "param-a": "b",
- "param-foo": "bar",
- "env-GATEWAY_INTERFACE": "CGI/1.1",
- "env-HTTP_HOST": "example.com",
- "env-PATH_INFO": "",
- "env-QUERY_STRING": "foo=bar&a=b",
- "env-REMOTE_ADDR": "1.2.3.4",
- "env-REMOTE_HOST": "1.2.3.4",
- "env-REQUEST_METHOD": "GET",
- "env-REQUEST_URI": "/test.go?foo=bar&a=b",
- "env-SCRIPT_FILENAME": os.Args[0],
- "env-SCRIPT_NAME": "/test.go",
- "env-SERVER_NAME": "example.com",
- "env-SERVER_PORT": "80",
- "env-SERVER_SOFTWARE": "go",
- }
- replay := runCgiTest(t, h, "GET /test.go?foo=bar&a=b HTTP/1.0\nHost: example.com\n\n", expectedMap)
-
- if expected, got := "text/html; charset=utf-8", replay.Header().Get("Content-Type"); got != expected {
- t.Errorf("got a Content-Type of %q; expected %q", got, expected)
- }
- if expected, got := "X-Test-Value", replay.Header().Get("X-Test-Header"); got != expected {
- t.Errorf("got a X-Test-Header of %q; expected %q", got, expected)
- }
-}
-
-// Test that a child handler only writing headers works.
-func TestChildOnlyHeaders(t *testing.T) {
- h := &Handler{
- Path: os.Args[0],
- Root: "/test.go",
- Args: []string{"-test.run=TestBeChildCGIProcess"},
- }
- expectedMap := map[string]string{
- "_body": "",
- }
- replay := runCgiTest(t, h, "GET /test.go?no-body=1 HTTP/1.0\nHost: example.com\n\n", expectedMap)
- if expected, got := "X-Test-Value", replay.Header().Get("X-Test-Header"); got != expected {
- t.Errorf("got a X-Test-Header of %q; expected %q", got, expected)
- }
-}
-
-// Note: not actually a test.
-func TestBeChildCGIProcess(t *testing.T) {
- if os.Getenv("REQUEST_METHOD") == "" {
- // Not in a CGI environment; skipping test.
- return
- }
- Serve(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {
- rw.Header().Set("X-Test-Header", "X-Test-Value")
- req.ParseForm()
- if req.FormValue("no-body") == "1" {
- return
- }
- fmt.Fprintf(rw, "test=Hello CGI-in-CGI\n")
- for k, vv := range req.Form {
- for _, v := range vv {
- fmt.Fprintf(rw, "param-%s=%s\n", k, v)
- }
- }
- for _, kv := range os.Environ() {
- fmt.Fprintf(rw, "env-%s\n", kv)
- }
- }))
- os.Exit(0)
-}
diff --git a/gcc-4.8.1/libgo/go/net/http/cgi/testdata/test.cgi b/gcc-4.8.1/libgo/go/net/http/cgi/testdata/test.cgi
deleted file mode 100644
index 1b25bc299..000000000
--- a/gcc-4.8.1/libgo/go/net/http/cgi/testdata/test.cgi
+++ /dev/null
@@ -1,90 +0,0 @@
-#!/usr/bin/perl
-# Copyright 2011 The Go Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style
-# license that can be found in the LICENSE file.
-#
-# Test script run as a child process under cgi_test.go
-
-use strict;
-use Cwd;
-
-binmode STDOUT;
-
-my $q = MiniCGI->new;
-my $params = $q->Vars;
-
-if ($params->{"loc"}) {
- print "Location: $params->{loc}\r\n\r\n";
- exit(0);
-}
-
-print "Content-Type: text/html\r\n";
-print "X-CGI-Pid: $$\r\n";
-print "X-Test-Header: X-Test-Value\r\n";
-print "\r\n";
-
-if ($params->{"bigresponse"}) {
- for (1..1024) {
- print "A" x 1024, "\r\n";
- }
- exit 0;
-}
-
-print "test=Hello CGI\r\n";
-
-foreach my $k (sort keys %$params) {
- print "param-$k=$params->{$k}\r\n";
-}
-
-foreach my $k (sort keys %ENV) {
- my $clean_env = $ENV{$k};
- $clean_env =~ s/[\n\r]//g;
- print "env-$k=$clean_env\r\n";
-}
-
-# NOTE: msys perl returns /c/go/src/... not C:\go\....
-my $dir = getcwd();
-if ($^O eq 'MSWin32' || $^O eq 'msys') {
- if ($dir =~ /^.:/) {
- $dir =~ s!/!\\!g;
- } else {
- my $cmd = $ENV{'COMSPEC'} || 'c:\\windows\\system32\\cmd.exe';
- $cmd =~ s!\\!/!g;
- $dir = `$cmd /c cd`;
- chomp $dir;
- }
-}
-print "cwd=$dir\r\n";
-
-# A minimal version of CGI.pm, for people without the perl-modules
-# package installed. (CGI.pm used to be part of the Perl core, but
-# some distros now bundle perl-base and perl-modules separately...)
-package MiniCGI;
-
-sub new {
- my $class = shift;
- return bless {}, $class;
-}
-
-sub Vars {
- my $self = shift;
- my $pairs;
- if ($ENV{CONTENT_LENGTH}) {
- $pairs = do { local $/; <STDIN> };
- } else {
- $pairs = $ENV{QUERY_STRING};
- }
- my $vars = {};
- foreach my $kv (split(/&/, $pairs)) {
- my ($k, $v) = split(/=/, $kv, 2);
- $vars->{_urldecode($k)} = _urldecode($v);
- }
- return $vars;
-}
-
-sub _urldecode {
- my $v = shift;
- $v =~ tr/+/ /;
- $v =~ s/%([a-fA-F0-9][a-fA-F0-9])/pack("C", hex($1))/eg;
- return $v;
-}
diff --git a/gcc-4.8.1/libgo/go/net/http/chunked.go b/gcc-4.8.1/libgo/go/net/http/chunked.go
deleted file mode 100644
index 91db01724..000000000
--- a/gcc-4.8.1/libgo/go/net/http/chunked.go
+++ /dev/null
@@ -1,183 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// The wire protocol for HTTP's "chunked" Transfer-Encoding.
-
-// This code is duplicated in httputil/chunked.go.
-// Please make any changes in both files.
-
-package http
-
-import (
- "bufio"
- "errors"
- "fmt"
- "io"
-)
-
-const maxLineLength = 4096 // assumed <= bufio.defaultBufSize
-
-var ErrLineTooLong = errors.New("header line too long")
-
-// newChunkedReader returns a new chunkedReader that translates the data read from r
-// out of HTTP "chunked" format before returning it.
-// The chunkedReader returns io.EOF when the final 0-length chunk is read.
-//
-// newChunkedReader is not needed by normal applications. The http package
-// automatically decodes chunking when reading response bodies.
-func newChunkedReader(r io.Reader) io.Reader {
- br, ok := r.(*bufio.Reader)
- if !ok {
- br = bufio.NewReader(r)
- }
- return &chunkedReader{r: br}
-}
-
-type chunkedReader struct {
- r *bufio.Reader
- n uint64 // unread bytes in chunk
- err error
- buf [2]byte
-}
-
-func (cr *chunkedReader) beginChunk() {
- // chunk-size CRLF
- var line []byte
- line, cr.err = readLine(cr.r)
- if cr.err != nil {
- return
- }
- cr.n, cr.err = parseHexUint(line)
- if cr.err != nil {
- return
- }
- if cr.n == 0 {
- cr.err = io.EOF
- }
-}
-
-func (cr *chunkedReader) Read(b []uint8) (n int, err error) {
- if cr.err != nil {
- return 0, cr.err
- }
- if cr.n == 0 {
- cr.beginChunk()
- if cr.err != nil {
- return 0, cr.err
- }
- }
- if uint64(len(b)) > cr.n {
- b = b[0:cr.n]
- }
- n, cr.err = cr.r.Read(b)
- cr.n -= uint64(n)
- if cr.n == 0 && cr.err == nil {
- // end of chunk (CRLF)
- if _, cr.err = io.ReadFull(cr.r, cr.buf[:]); cr.err == nil {
- if cr.buf[0] != '\r' || cr.buf[1] != '\n' {
- cr.err = errors.New("malformed chunked encoding")
- }
- }
- }
- return n, cr.err
-}
-
-// Read a line of bytes (up to \n) from b.
-// Give up if the line exceeds maxLineLength.
-// The returned bytes are a pointer into storage in
-// the bufio, so they are only valid until the next bufio read.
-func readLine(b *bufio.Reader) (p []byte, err error) {
- if p, err = b.ReadSlice('\n'); err != nil {
- // We always know when EOF is coming.
- // If the caller asked for a line, there should be a line.
- if err == io.EOF {
- err = io.ErrUnexpectedEOF
- } else if err == bufio.ErrBufferFull {
- err = ErrLineTooLong
- }
- return nil, err
- }
- if len(p) >= maxLineLength {
- return nil, ErrLineTooLong
- }
- return trimTrailingWhitespace(p), nil
-}
-
-func trimTrailingWhitespace(b []byte) []byte {
- for len(b) > 0 && isASCIISpace(b[len(b)-1]) {
- b = b[:len(b)-1]
- }
- return b
-}
-
-func isASCIISpace(b byte) bool {
- return b == ' ' || b == '\t' || b == '\n' || b == '\r'
-}
-
-// newChunkedWriter returns a new chunkedWriter that translates writes into HTTP
-// "chunked" format before writing them to w. Closing the returned chunkedWriter
-// sends the final 0-length chunk that marks the end of the stream.
-//
-// newChunkedWriter is not needed by normal applications. The http
-// package adds chunking automatically if handlers don't set a
-// Content-Length header. Using newChunkedWriter inside a handler
-// would result in double chunking or chunking with a Content-Length
-// length, both of which are wrong.
-func newChunkedWriter(w io.Writer) io.WriteCloser {
- return &chunkedWriter{w}
-}
-
-// Writing to chunkedWriter translates to writing in HTTP chunked Transfer
-// Encoding wire format to the underlying Wire chunkedWriter.
-type chunkedWriter struct {
- Wire io.Writer
-}
-
-// Write the contents of data as one chunk to Wire.
-// NOTE: Note that the corresponding chunk-writing procedure in Conn.Write has
-// a bug since it does not check for success of io.WriteString
-func (cw *chunkedWriter) Write(data []byte) (n int, err error) {
-
- // Don't send 0-length data. It looks like EOF for chunked encoding.
- if len(data) == 0 {
- return 0, nil
- }
-
- if _, err = fmt.Fprintf(cw.Wire, "%x\r\n", len(data)); err != nil {
- return 0, err
- }
- if n, err = cw.Wire.Write(data); err != nil {
- return
- }
- if n != len(data) {
- err = io.ErrShortWrite
- return
- }
- _, err = io.WriteString(cw.Wire, "\r\n")
-
- return
-}
-
-func (cw *chunkedWriter) Close() error {
- _, err := io.WriteString(cw.Wire, "0\r\n")
- return err
-}
-
-func parseHexUint(v []byte) (n uint64, err error) {
- for _, b := range v {
- n <<= 4
- switch {
- case '0' <= b && b <= '9':
- b = b - '0'
- case 'a' <= b && b <= 'f':
- b = b - 'a' + 10
- case 'A' <= b && b <= 'F':
- b = b - 'A' + 10
- default:
- return 0, errors.New("invalid byte in chunk length")
- }
- n |= uint64(b)
- }
- return
-}
diff --git a/gcc-4.8.1/libgo/go/net/http/chunked_test.go b/gcc-4.8.1/libgo/go/net/http/chunked_test.go
deleted file mode 100644
index 0b18c7b55..000000000
--- a/gcc-4.8.1/libgo/go/net/http/chunked_test.go
+++ /dev/null
@@ -1,93 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This code is duplicated in httputil/chunked_test.go.
-// Please make any changes in both files.
-
-package http
-
-import (
- "bytes"
- "fmt"
- "io"
- "io/ioutil"
- "runtime"
- "testing"
-)
-
-func TestChunk(t *testing.T) {
- var b bytes.Buffer
-
- w := newChunkedWriter(&b)
- const chunk1 = "hello, "
- const chunk2 = "world! 0123456789abcdef"
- w.Write([]byte(chunk1))
- w.Write([]byte(chunk2))
- w.Close()
-
- if g, e := b.String(), "7\r\nhello, \r\n17\r\nworld! 0123456789abcdef\r\n0\r\n"; g != e {
- t.Fatalf("chunk writer wrote %q; want %q", g, e)
- }
-
- r := newChunkedReader(&b)
- data, err := ioutil.ReadAll(r)
- if err != nil {
- t.Logf(`data: "%s"`, data)
- t.Fatalf("ReadAll from reader: %v", err)
- }
- if g, e := string(data), chunk1+chunk2; g != e {
- t.Errorf("chunk reader read %q; want %q", g, e)
- }
-}
-
-func TestChunkReaderAllocs(t *testing.T) {
- // temporarily set GOMAXPROCS to 1 as we are testing memory allocations
- defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(1))
- var buf bytes.Buffer
- w := newChunkedWriter(&buf)
- a, b, c := []byte("aaaaaa"), []byte("bbbbbbbbbbbb"), []byte("cccccccccccccccccccccccc")
- w.Write(a)
- w.Write(b)
- w.Write(c)
- w.Close()
-
- r := newChunkedReader(&buf)
- readBuf := make([]byte, len(a)+len(b)+len(c)+1)
-
- var ms runtime.MemStats
- runtime.ReadMemStats(&ms)
- m0 := ms.Mallocs
-
- n, err := io.ReadFull(r, readBuf)
-
- runtime.ReadMemStats(&ms)
- mallocs := ms.Mallocs - m0
- if mallocs > 1 {
- t.Errorf("%d mallocs; want <= 1", mallocs)
- }
-
- if n != len(readBuf)-1 {
- t.Errorf("read %d bytes; want %d", n, len(readBuf)-1)
- }
- if err != io.ErrUnexpectedEOF {
- t.Errorf("read error = %v; want ErrUnexpectedEOF", err)
- }
-}
-
-func TestParseHexUint(t *testing.T) {
- for i := uint64(0); i <= 1234; i++ {
- line := []byte(fmt.Sprintf("%x", i))
- got, err := parseHexUint(line)
- if err != nil {
- t.Fatalf("on %d: %v", i, err)
- }
- if got != i {
- t.Errorf("for input %q = %d; want %d", line, got, i)
- }
- }
- _, err := parseHexUint([]byte("bogus"))
- if err == nil {
- t.Error("expected error on bogus input")
- }
-}
diff --git a/gcc-4.8.1/libgo/go/net/http/client.go b/gcc-4.8.1/libgo/go/net/http/client.go
deleted file mode 100644
index 5ee0804c7..000000000
--- a/gcc-4.8.1/libgo/go/net/http/client.go
+++ /dev/null
@@ -1,391 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// HTTP client. See RFC 2616.
-//
-// This is the high-level Client interface.
-// The low-level implementation is in transport.go.
-
-package http
-
-import (
- "encoding/base64"
- "errors"
- "fmt"
- "io"
- "log"
- "net/url"
- "strings"
-)
-
-// A Client is an HTTP client. Its zero value (DefaultClient) is a usable client
-// that uses DefaultTransport.
-//
-// The Client's Transport typically has internal state (cached
-// TCP connections), so Clients should be reused instead of created as
-// needed. Clients are safe for concurrent use by multiple goroutines.
-type Client struct {
- // Transport specifies the mechanism by which individual
- // HTTP requests are made.
- // If nil, DefaultTransport is used.
- Transport RoundTripper
-
- // CheckRedirect specifies the policy for handling redirects.
- // If CheckRedirect is not nil, the client calls it before
- // following an HTTP redirect. The arguments req and via are
- // the upcoming request and the requests made already, oldest
- // first. If CheckRedirect returns an error, the Client's Get
- // method returns both the previous Response and
- // CheckRedirect's error (wrapped in a url.Error) instead of
- // issuing the Request req.
- //
- // If CheckRedirect is nil, the Client uses its default policy,
- // which is to stop after 10 consecutive requests.
- CheckRedirect func(req *Request, via []*Request) error
-
- // Jar specifies the cookie jar.
- // If Jar is nil, cookies are not sent in requests and ignored
- // in responses.
- Jar CookieJar
-}
-
-// DefaultClient is the default Client and is used by Get, Head, and Post.
-var DefaultClient = &Client{}
-
-// RoundTripper is an interface representing the ability to execute a
-// single HTTP transaction, obtaining the Response for a given Request.
-//
-// A RoundTripper must be safe for concurrent use by multiple
-// goroutines.
-type RoundTripper interface {
- // RoundTrip executes a single HTTP transaction, returning
- // the Response for the request req. RoundTrip should not
- // attempt to interpret the response. In particular,
- // RoundTrip must return err == nil if it obtained a response,
- // regardless of the response's HTTP status code. A non-nil
- // err should be reserved for failure to obtain a response.
- // Similarly, RoundTrip should not attempt to handle
- // higher-level protocol details such as redirects,
- // authentication, or cookies.
- //
- // RoundTrip should not modify the request, except for
- // consuming the Body. The request's URL and Header fields
- // are guaranteed to be initialized.
- RoundTrip(*Request) (*Response, error)
-}
-
-// Given a string of the form "host", "host:port", or "[ipv6::address]:port",
-// return true if the string includes a port.
-func hasPort(s string) bool { return strings.LastIndex(s, ":") > strings.LastIndex(s, "]") }
-
-// Used in Send to implement io.ReadCloser by bundling together the
-// bufio.Reader through which we read the response, and the underlying
-// network connection.
-type readClose struct {
- io.Reader
- io.Closer
-}
-
-func (c *Client) send(req *Request) (*Response, error) {
- if c.Jar != nil {
- for _, cookie := range c.Jar.Cookies(req.URL) {
- req.AddCookie(cookie)
- }
- }
- resp, err := send(req, c.Transport)
- if err != nil {
- return nil, err
- }
- if c.Jar != nil {
- if rc := resp.Cookies(); len(rc) > 0 {
- c.Jar.SetCookies(req.URL, rc)
- }
- }
- return resp, err
-}
-
-// Do sends an HTTP request and returns an HTTP response, following
-// policy (e.g. redirects, cookies, auth) as configured on the client.
-//
-// An error is returned if caused by client policy (such as
-// CheckRedirect), or if there was an HTTP protocol error.
-// A non-2xx response doesn't cause an error.
-//
-// When err is nil, resp always contains a non-nil resp.Body.
-//
-// Callers should close resp.Body when done reading from it. If
-// resp.Body is not closed, the Client's underlying RoundTripper
-// (typically Transport) may not be able to re-use a persistent TCP
-// connection to the server for a subsequent "keep-alive" request.
-//
-// Generally Get, Post, or PostForm will be used instead of Do.
-func (c *Client) Do(req *Request) (resp *Response, err error) {
- if req.Method == "GET" || req.Method == "HEAD" {
- return c.doFollowingRedirects(req, shouldRedirectGet)
- }
- if req.Method == "POST" || req.Method == "PUT" {
- return c.doFollowingRedirects(req, shouldRedirectPost)
- }
- return c.send(req)
-}
-
-// send issues an HTTP request.
-// Caller should close resp.Body when done reading from it.
-func send(req *Request, t RoundTripper) (resp *Response, err error) {
- if t == nil {
- t = DefaultTransport
- if t == nil {
- err = errors.New("http: no Client.Transport or DefaultTransport")
- return
- }
- }
-
- if req.URL == nil {
- return nil, errors.New("http: nil Request.URL")
- }
-
- if req.RequestURI != "" {
- return nil, errors.New("http: Request.RequestURI can't be set in client requests.")
- }
-
- // Most the callers of send (Get, Post, et al) don't need
- // Headers, leaving it uninitialized. We guarantee to the
- // Transport that this has been initialized, though.
- if req.Header == nil {
- req.Header = make(Header)
- }
-
- if u := req.URL.User; u != nil {
- req.Header.Set("Authorization", "Basic "+base64.URLEncoding.EncodeToString([]byte(u.String())))
- }
- resp, err = t.RoundTrip(req)
- if err != nil {
- if resp != nil {
- log.Printf("RoundTripper returned a response & error; ignoring response")
- }
- return nil, err
- }
- return resp, nil
-}
-
-// True if the specified HTTP status code is one for which the Get utility should
-// automatically redirect.
-func shouldRedirectGet(statusCode int) bool {
- switch statusCode {
- case StatusMovedPermanently, StatusFound, StatusSeeOther, StatusTemporaryRedirect:
- return true
- }
- return false
-}
-
-// True if the specified HTTP status code is one for which the Post utility should
-// automatically redirect.
-func shouldRedirectPost(statusCode int) bool {
- switch statusCode {
- case StatusFound, StatusSeeOther:
- return true
- }
- return false
-}
-
-// Get issues a GET to the specified URL. If the response is one of the following
-// redirect codes, Get follows the redirect, up to a maximum of 10 redirects:
-//
-// 301 (Moved Permanently)
-// 302 (Found)
-// 303 (See Other)
-// 307 (Temporary Redirect)
-//
-// An error is returned if there were too many redirects or if there
-// was an HTTP protocol error. A non-2xx response doesn't cause an
-// error.
-//
-// When err is nil, resp always contains a non-nil resp.Body.
-// Caller should close resp.Body when done reading from it.
-//
-// Get is a wrapper around DefaultClient.Get.
-func Get(url string) (resp *Response, err error) {
- return DefaultClient.Get(url)
-}
-
-// Get issues a GET to the specified URL. If the response is one of the
-// following redirect codes, Get follows the redirect after calling the
-// Client's CheckRedirect function.
-//
-// 301 (Moved Permanently)
-// 302 (Found)
-// 303 (See Other)
-// 307 (Temporary Redirect)
-//
-// An error is returned if the Client's CheckRedirect function fails
-// or if there was an HTTP protocol error. A non-2xx response doesn't
-// cause an error.
-//
-// When err is nil, resp always contains a non-nil resp.Body.
-// Caller should close resp.Body when done reading from it.
-func (c *Client) Get(url string) (resp *Response, err error) {
- req, err := NewRequest("GET", url, nil)
- if err != nil {
- return nil, err
- }
- return c.doFollowingRedirects(req, shouldRedirectGet)
-}
-
-func (c *Client) doFollowingRedirects(ireq *Request, shouldRedirect func(int) bool) (resp *Response, err error) {
- var base *url.URL
- redirectChecker := c.CheckRedirect
- if redirectChecker == nil {
- redirectChecker = defaultCheckRedirect
- }
- var via []*Request
-
- if ireq.URL == nil {
- return nil, errors.New("http: nil Request.URL")
- }
-
- req := ireq
- urlStr := "" // next relative or absolute URL to fetch (after first request)
- redirectFailed := false
- for redirect := 0; ; redirect++ {
- if redirect != 0 {
- req = new(Request)
- req.Method = ireq.Method
- if ireq.Method == "POST" || ireq.Method == "PUT" {
- req.Method = "GET"
- }
- req.Header = make(Header)
- req.URL, err = base.Parse(urlStr)
- if err != nil {
- break
- }
- if len(via) > 0 {
- // Add the Referer header.
- lastReq := via[len(via)-1]
- if lastReq.URL.Scheme != "https" {
- req.Header.Set("Referer", lastReq.URL.String())
- }
-
- err = redirectChecker(req, via)
- if err != nil {
- redirectFailed = true
- break
- }
- }
- }
-
- urlStr = req.URL.String()
- if resp, err = c.send(req); err != nil {
- break
- }
-
- if shouldRedirect(resp.StatusCode) {
- resp.Body.Close()
- if urlStr = resp.Header.Get("Location"); urlStr == "" {
- err = errors.New(fmt.Sprintf("%d response missing Location header", resp.StatusCode))
- break
- }
- base = req.URL
- via = append(via, req)
- continue
- }
- return
- }
-
- method := ireq.Method
- urlErr := &url.Error{
- Op: method[0:1] + strings.ToLower(method[1:]),
- URL: urlStr,
- Err: err,
- }
-
- if redirectFailed {
- // Special case for Go 1 compatibility: return both the response
- // and an error if the CheckRedirect function failed.
- // See http://golang.org/issue/3795
- return resp, urlErr
- }
-
- if resp != nil {
- resp.Body.Close()
- }
- return nil, urlErr
-}
-
-func defaultCheckRedirect(req *Request, via []*Request) error {
- if len(via) >= 10 {
- return errors.New("stopped after 10 redirects")
- }
- return nil
-}
-
-// Post issues a POST to the specified URL.
-//
-// Caller should close resp.Body when done reading from it.
-//
-// Post is a wrapper around DefaultClient.Post
-func Post(url string, bodyType string, body io.Reader) (resp *Response, err error) {
- return DefaultClient.Post(url, bodyType, body)
-}
-
-// Post issues a POST to the specified URL.
-//
-// Caller should close resp.Body when done reading from it.
-func (c *Client) Post(url string, bodyType string, body io.Reader) (resp *Response, err error) {
- req, err := NewRequest("POST", url, body)
- if err != nil {
- return nil, err
- }
- req.Header.Set("Content-Type", bodyType)
- return c.doFollowingRedirects(req, shouldRedirectPost)
-}
-
-// PostForm issues a POST to the specified URL, with data's keys and
-// values URL-encoded as the request body.
-//
-// When err is nil, resp always contains a non-nil resp.Body.
-// Caller should close resp.Body when done reading from it.
-//
-// PostForm is a wrapper around DefaultClient.PostForm
-func PostForm(url string, data url.Values) (resp *Response, err error) {
- return DefaultClient.PostForm(url, data)
-}
-
-// PostForm issues a POST to the specified URL,
-// with data's keys and values urlencoded as the request body.
-//
-// When err is nil, resp always contains a non-nil resp.Body.
-// Caller should close resp.Body when done reading from it.
-func (c *Client) PostForm(url string, data url.Values) (resp *Response, err error) {
- return c.Post(url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode()))
-}
-
-// Head issues a HEAD to the specified URL. If the response is one of the
-// following redirect codes, Head follows the redirect after calling the
-// Client's CheckRedirect function.
-//
-// 301 (Moved Permanently)
-// 302 (Found)
-// 303 (See Other)
-// 307 (Temporary Redirect)
-//
-// Head is a wrapper around DefaultClient.Head
-func Head(url string) (resp *Response, err error) {
- return DefaultClient.Head(url)
-}
-
-// Head issues a HEAD to the specified URL. If the response is one of the
-// following redirect codes, Head follows the redirect after calling the
-// Client's CheckRedirect function.
-//
-// 301 (Moved Permanently)
-// 302 (Found)
-// 303 (See Other)
-// 307 (Temporary Redirect)
-func (c *Client) Head(url string) (resp *Response, err error) {
- req, err := NewRequest("HEAD", url, nil)
- if err != nil {
- return nil, err
- }
- return c.doFollowingRedirects(req, shouldRedirectGet)
-}
diff --git a/gcc-4.8.1/libgo/go/net/http/client_test.go b/gcc-4.8.1/libgo/go/net/http/client_test.go
deleted file mode 100644
index 9514a4b96..000000000
--- a/gcc-4.8.1/libgo/go/net/http/client_test.go
+++ /dev/null
@@ -1,678 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Tests for client.go
-
-package http_test
-
-import (
- "bytes"
- "crypto/tls"
- "crypto/x509"
- "errors"
- "fmt"
- "io"
- "io/ioutil"
- "net"
- . "net/http"
- "net/http/httptest"
- "net/url"
- "strconv"
- "strings"
- "sync"
- "testing"
-)
-
-var robotsTxtHandler = HandlerFunc(func(w ResponseWriter, r *Request) {
- w.Header().Set("Last-Modified", "sometime")
- fmt.Fprintf(w, "User-agent: go\nDisallow: /something/")
-})
-
-// pedanticReadAll works like ioutil.ReadAll but additionally
-// verifies that r obeys the documented io.Reader contract.
-func pedanticReadAll(r io.Reader) (b []byte, err error) {
- var bufa [64]byte
- buf := bufa[:]
- for {
- n, err := r.Read(buf)
- if n == 0 && err == nil {
- return nil, fmt.Errorf("Read: n=0 with err=nil")
- }
- b = append(b, buf[:n]...)
- if err == io.EOF {
- n, err := r.Read(buf)
- if n != 0 || err != io.EOF {
- return nil, fmt.Errorf("Read: n=%d err=%#v after EOF", n, err)
- }
- return b, nil
- }
- if err != nil {
- return b, err
- }
- }
- panic("unreachable")
-}
-
-func TestClient(t *testing.T) {
- ts := httptest.NewServer(robotsTxtHandler)
- defer ts.Close()
-
- r, err := Get(ts.URL)
- var b []byte
- if err == nil {
- b, err = pedanticReadAll(r.Body)
- r.Body.Close()
- }
- if err != nil {
- t.Error(err)
- } else if s := string(b); !strings.HasPrefix(s, "User-agent:") {
- t.Errorf("Incorrect page body (did not begin with User-agent): %q", s)
- }
-}
-
-func TestClientHead(t *testing.T) {
- ts := httptest.NewServer(robotsTxtHandler)
- defer ts.Close()
-
- r, err := Head(ts.URL)
- if err != nil {
- t.Fatal(err)
- }
- if _, ok := r.Header["Last-Modified"]; !ok {
- t.Error("Last-Modified header not found.")
- }
-}
-
-type recordingTransport struct {
- req *Request
-}
-
-func (t *recordingTransport) RoundTrip(req *Request) (resp *Response, err error) {
- t.req = req
- return nil, errors.New("dummy impl")
-}
-
-func TestGetRequestFormat(t *testing.T) {
- tr := &recordingTransport{}
- client := &Client{Transport: tr}
- url := "http://dummy.faketld/"
- client.Get(url) // Note: doesn't hit network
- if tr.req.Method != "GET" {
- t.Errorf("expected method %q; got %q", "GET", tr.req.Method)
- }
- if tr.req.URL.String() != url {
- t.Errorf("expected URL %q; got %q", url, tr.req.URL.String())
- }
- if tr.req.Header == nil {
- t.Errorf("expected non-nil request Header")
- }
-}
-
-func TestPostRequestFormat(t *testing.T) {
- tr := &recordingTransport{}
- client := &Client{Transport: tr}
-
- url := "http://dummy.faketld/"
- json := `{"key":"value"}`
- b := strings.NewReader(json)
- client.Post(url, "application/json", b) // Note: doesn't hit network
-
- if tr.req.Method != "POST" {
- t.Errorf("got method %q, want %q", tr.req.Method, "POST")
- }
- if tr.req.URL.String() != url {
- t.Errorf("got URL %q, want %q", tr.req.URL.String(), url)
- }
- if tr.req.Header == nil {
- t.Fatalf("expected non-nil request Header")
- }
- if tr.req.Close {
- t.Error("got Close true, want false")
- }
- if g, e := tr.req.ContentLength, int64(len(json)); g != e {
- t.Errorf("got ContentLength %d, want %d", g, e)
- }
-}
-
-func TestPostFormRequestFormat(t *testing.T) {
- tr := &recordingTransport{}
- client := &Client{Transport: tr}
-
- urlStr := "http://dummy.faketld/"
- form := make(url.Values)
- form.Set("foo", "bar")
- form.Add("foo", "bar2")
- form.Set("bar", "baz")
- client.PostForm(urlStr, form) // Note: doesn't hit network
-
- if tr.req.Method != "POST" {
- t.Errorf("got method %q, want %q", tr.req.Method, "POST")
- }
- if tr.req.URL.String() != urlStr {
- t.Errorf("got URL %q, want %q", tr.req.URL.String(), urlStr)
- }
- if tr.req.Header == nil {
- t.Fatalf("expected non-nil request Header")
- }
- if g, e := tr.req.Header.Get("Content-Type"), "application/x-www-form-urlencoded"; g != e {
- t.Errorf("got Content-Type %q, want %q", g, e)
- }
- if tr.req.Close {
- t.Error("got Close true, want false")
- }
- // Depending on map iteration, body can be either of these.
- expectedBody := "foo=bar&foo=bar2&bar=baz"
- expectedBody1 := "bar=baz&foo=bar&foo=bar2"
- if g, e := tr.req.ContentLength, int64(len(expectedBody)); g != e {
- t.Errorf("got ContentLength %d, want %d", g, e)
- }
- bodyb, err := ioutil.ReadAll(tr.req.Body)
- if err != nil {
- t.Fatalf("ReadAll on req.Body: %v", err)
- }
- if g := string(bodyb); g != expectedBody && g != expectedBody1 {
- t.Errorf("got body %q, want %q or %q", g, expectedBody, expectedBody1)
- }
-}
-
-func TestRedirects(t *testing.T) {
- var ts *httptest.Server
- ts = httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
- n, _ := strconv.Atoi(r.FormValue("n"))
- // Test Referer header. (7 is arbitrary position to test at)
- if n == 7 {
- if g, e := r.Referer(), ts.URL+"/?n=6"; e != g {
- t.Errorf("on request ?n=7, expected referer of %q; got %q", e, g)
- }
- }
- if n < 15 {
- Redirect(w, r, fmt.Sprintf("/?n=%d", n+1), StatusFound)
- return
- }
- fmt.Fprintf(w, "n=%d", n)
- }))
- defer ts.Close()
-
- c := &Client{}
- _, err := c.Get(ts.URL)
- if e, g := "Get /?n=10: stopped after 10 redirects", fmt.Sprintf("%v", err); e != g {
- t.Errorf("with default client Get, expected error %q, got %q", e, g)
- }
-
- // HEAD request should also have the ability to follow redirects.
- _, err = c.Head(ts.URL)
- if e, g := "Head /?n=10: stopped after 10 redirects", fmt.Sprintf("%v", err); e != g {
- t.Errorf("with default client Head, expected error %q, got %q", e, g)
- }
-
- // Do should also follow redirects.
- greq, _ := NewRequest("GET", ts.URL, nil)
- _, err = c.Do(greq)
- if e, g := "Get /?n=10: stopped after 10 redirects", fmt.Sprintf("%v", err); e != g {
- t.Errorf("with default client Do, expected error %q, got %q", e, g)
- }
-
- var checkErr error
- var lastVia []*Request
- c = &Client{CheckRedirect: func(_ *Request, via []*Request) error {
- lastVia = via
- return checkErr
- }}
- res, err := c.Get(ts.URL)
- if err != nil {
- t.Fatalf("Get error: %v", err)
- }
- finalUrl := res.Request.URL.String()
- if e, g := "<nil>", fmt.Sprintf("%v", err); e != g {
- t.Errorf("with custom client, expected error %q, got %q", e, g)
- }
- if !strings.HasSuffix(finalUrl, "/?n=15") {
- t.Errorf("expected final url to end in /?n=15; got url %q", finalUrl)
- }
- if e, g := 15, len(lastVia); e != g {
- t.Errorf("expected lastVia to have contained %d elements; got %d", e, g)
- }
-
- checkErr = errors.New("no redirects allowed")
- res, err = c.Get(ts.URL)
- if urlError, ok := err.(*url.Error); !ok || urlError.Err != checkErr {
- t.Errorf("with redirects forbidden, expected a *url.Error with our 'no redirects allowed' error inside; got %#v (%q)", err, err)
- }
- if res == nil {
- t.Fatalf("Expected a non-nil Response on CheckRedirect failure (http://golang.org/issue/3795)")
- }
- if res.Header.Get("Location") == "" {
- t.Errorf("no Location header in Response")
- }
-}
-
-func TestPostRedirects(t *testing.T) {
- var log struct {
- sync.Mutex
- bytes.Buffer
- }
- var ts *httptest.Server
- ts = httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
- log.Lock()
- fmt.Fprintf(&log.Buffer, "%s %s ", r.Method, r.RequestURI)
- log.Unlock()
- if v := r.URL.Query().Get("code"); v != "" {
- code, _ := strconv.Atoi(v)
- if code/100 == 3 {
- w.Header().Set("Location", ts.URL)
- }
- w.WriteHeader(code)
- }
- }))
- tests := []struct {
- suffix string
- want int // response code
- }{
- {"/", 200},
- {"/?code=301", 301},
- {"/?code=302", 200},
- {"/?code=303", 200},
- {"/?code=404", 404},
- }
- for _, tt := range tests {
- res, err := Post(ts.URL+tt.suffix, "text/plain", strings.NewReader("Some content"))
- if err != nil {
- t.Fatal(err)
- }
- if res.StatusCode != tt.want {
- t.Errorf("POST %s: status code = %d; want %d", tt.suffix, res.StatusCode, tt.want)
- }
- }
- log.Lock()
- got := log.String()
- log.Unlock()
- want := "POST / POST /?code=301 POST /?code=302 GET / POST /?code=303 GET / POST /?code=404 "
- if got != want {
- t.Errorf("Log differs.\n Got: %q\nWant: %q", got, want)
- }
-}
-
-var expectedCookies = []*Cookie{
- {Name: "ChocolateChip", Value: "tasty"},
- {Name: "First", Value: "Hit"},
- {Name: "Second", Value: "Hit"},
-}
-
-var echoCookiesRedirectHandler = HandlerFunc(func(w ResponseWriter, r *Request) {
- for _, cookie := range r.Cookies() {
- SetCookie(w, cookie)
- }
- if r.URL.Path == "/" {
- SetCookie(w, expectedCookies[1])
- Redirect(w, r, "/second", StatusMovedPermanently)
- } else {
- SetCookie(w, expectedCookies[2])
- w.Write([]byte("hello"))
- }
-})
-
-func TestClientSendsCookieFromJar(t *testing.T) {
- tr := &recordingTransport{}
- client := &Client{Transport: tr}
- client.Jar = &TestJar{perURL: make(map[string][]*Cookie)}
- us := "http://dummy.faketld/"
- u, _ := url.Parse(us)
- client.Jar.SetCookies(u, expectedCookies)
-
- client.Get(us) // Note: doesn't hit network
- matchReturnedCookies(t, expectedCookies, tr.req.Cookies())
-
- client.Head(us) // Note: doesn't hit network
- matchReturnedCookies(t, expectedCookies, tr.req.Cookies())
-
- client.Post(us, "text/plain", strings.NewReader("body")) // Note: doesn't hit network
- matchReturnedCookies(t, expectedCookies, tr.req.Cookies())
-
- client.PostForm(us, url.Values{}) // Note: doesn't hit network
- matchReturnedCookies(t, expectedCookies, tr.req.Cookies())
-
- req, _ := NewRequest("GET", us, nil)
- client.Do(req) // Note: doesn't hit network
- matchReturnedCookies(t, expectedCookies, tr.req.Cookies())
-
- req, _ = NewRequest("POST", us, nil)
- client.Do(req) // Note: doesn't hit network
- matchReturnedCookies(t, expectedCookies, tr.req.Cookies())
-}
-
-// Just enough correctness for our redirect tests. Uses the URL.Host as the
-// scope of all cookies.
-type TestJar struct {
- m sync.Mutex
- perURL map[string][]*Cookie
-}
-
-func (j *TestJar) SetCookies(u *url.URL, cookies []*Cookie) {
- j.m.Lock()
- defer j.m.Unlock()
- if j.perURL == nil {
- j.perURL = make(map[string][]*Cookie)
- }
- j.perURL[u.Host] = cookies
-}
-
-func (j *TestJar) Cookies(u *url.URL) []*Cookie {
- j.m.Lock()
- defer j.m.Unlock()
- return j.perURL[u.Host]
-}
-
-func TestRedirectCookiesOnRequest(t *testing.T) {
- var ts *httptest.Server
- ts = httptest.NewServer(echoCookiesRedirectHandler)
- defer ts.Close()
- c := &Client{}
- req, _ := NewRequest("GET", ts.URL, nil)
- req.AddCookie(expectedCookies[0])
- // TODO: Uncomment when an implementation of a RFC6265 cookie jar lands.
- _ = c
- // resp, _ := c.Do(req)
- // matchReturnedCookies(t, expectedCookies, resp.Cookies())
-
- req, _ = NewRequest("GET", ts.URL, nil)
- // resp, _ = c.Do(req)
- // matchReturnedCookies(t, expectedCookies[1:], resp.Cookies())
-}
-
-func TestRedirectCookiesJar(t *testing.T) {
- var ts *httptest.Server
- ts = httptest.NewServer(echoCookiesRedirectHandler)
- defer ts.Close()
- c := &Client{
- Jar: new(TestJar),
- }
- u, _ := url.Parse(ts.URL)
- c.Jar.SetCookies(u, []*Cookie{expectedCookies[0]})
- resp, err := c.Get(ts.URL)
- if err != nil {
- t.Fatalf("Get: %v", err)
- }
- matchReturnedCookies(t, expectedCookies, resp.Cookies())
-}
-
-func matchReturnedCookies(t *testing.T, expected, given []*Cookie) {
- t.Logf("Received cookies: %v", given)
- if len(given) != len(expected) {
- t.Errorf("Expected %d cookies, got %d", len(expected), len(given))
- }
- for _, ec := range expected {
- foundC := false
- for _, c := range given {
- if ec.Name == c.Name && ec.Value == c.Value {
- foundC = true
- break
- }
- }
- if !foundC {
- t.Errorf("Missing cookie %v", ec)
- }
- }
-}
-
-func TestJarCalls(t *testing.T) {
- ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
- pathSuffix := r.RequestURI[1:]
- if r.RequestURI == "/nosetcookie" {
- return // dont set cookies for this path
- }
- SetCookie(w, &Cookie{Name: "name" + pathSuffix, Value: "val" + pathSuffix})
- if r.RequestURI == "/" {
- Redirect(w, r, "http://secondhost.fake/secondpath", 302)
- }
- }))
- defer ts.Close()
- jar := new(RecordingJar)
- c := &Client{
- Jar: jar,
- Transport: &Transport{
- Dial: func(_ string, _ string) (net.Conn, error) {
- return net.Dial("tcp", ts.Listener.Addr().String())
- },
- },
- }
- _, err := c.Get("http://firsthost.fake/")
- if err != nil {
- t.Fatal(err)
- }
- _, err = c.Get("http://firsthost.fake/nosetcookie")
- if err != nil {
- t.Fatal(err)
- }
- got := jar.log.String()
- want := `Cookies("http://firsthost.fake/")
-SetCookie("http://firsthost.fake/", [name=val])
-Cookies("http://secondhost.fake/secondpath")
-SetCookie("http://secondhost.fake/secondpath", [namesecondpath=valsecondpath])
-Cookies("http://firsthost.fake/nosetcookie")
-`
- if got != want {
- t.Errorf("Got Jar calls:\n%s\nWant:\n%s", got, want)
- }
-}
-
-// RecordingJar keeps a log of calls made to it, without
-// tracking any cookies.
-type RecordingJar struct {
- mu sync.Mutex
- log bytes.Buffer
-}
-
-func (j *RecordingJar) SetCookies(u *url.URL, cookies []*Cookie) {
- j.logf("SetCookie(%q, %v)\n", u, cookies)
-}
-
-func (j *RecordingJar) Cookies(u *url.URL) []*Cookie {
- j.logf("Cookies(%q)\n", u)
- return nil
-}
-
-func (j *RecordingJar) logf(format string, args ...interface{}) {
- j.mu.Lock()
- defer j.mu.Unlock()
- fmt.Fprintf(&j.log, format, args...)
-}
-
-func TestStreamingGet(t *testing.T) {
- say := make(chan string)
- ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
- w.(Flusher).Flush()
- for str := range say {
- w.Write([]byte(str))
- w.(Flusher).Flush()
- }
- }))
- defer ts.Close()
-
- c := &Client{}
- res, err := c.Get(ts.URL)
- if err != nil {
- t.Fatal(err)
- }
- var buf [10]byte
- for _, str := range []string{"i", "am", "also", "known", "as", "comet"} {
- say <- str
- n, err := io.ReadFull(res.Body, buf[0:len(str)])
- if err != nil {
- t.Fatalf("ReadFull on %q: %v", str, err)
- }
- if n != len(str) {
- t.Fatalf("Receiving %q, only read %d bytes", str, n)
- }
- got := string(buf[0:n])
- if got != str {
- t.Fatalf("Expected %q, got %q", str, got)
- }
- }
- close(say)
- _, err = io.ReadFull(res.Body, buf[0:1])
- if err != io.EOF {
- t.Fatalf("at end expected EOF, got %v", err)
- }
-}
-
-type writeCountingConn struct {
- net.Conn
- count *int
-}
-
-func (c *writeCountingConn) Write(p []byte) (int, error) {
- *c.count++
- return c.Conn.Write(p)
-}
-
-// TestClientWrites verifies that client requests are buffered and we
-// don't send a TCP packet per line of the http request + body.
-func TestClientWrites(t *testing.T) {
- ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
- }))
- defer ts.Close()
-
- writes := 0
- dialer := func(netz string, addr string) (net.Conn, error) {
- c, err := net.Dial(netz, addr)
- if err == nil {
- c = &writeCountingConn{c, &writes}
- }
- return c, err
- }
- c := &Client{Transport: &Transport{Dial: dialer}}
-
- _, err := c.Get(ts.URL)
- if err != nil {
- t.Fatal(err)
- }
- if writes != 1 {
- t.Errorf("Get request did %d Write calls, want 1", writes)
- }
-
- writes = 0
- _, err = c.PostForm(ts.URL, url.Values{"foo": {"bar"}})
- if err != nil {
- t.Fatal(err)
- }
- if writes != 1 {
- t.Errorf("Post request did %d Write calls, want 1", writes)
- }
-}
-
-func TestClientInsecureTransport(t *testing.T) {
- ts := httptest.NewTLSServer(HandlerFunc(func(w ResponseWriter, r *Request) {
- w.Write([]byte("Hello"))
- }))
- defer ts.Close()
-
- // TODO(bradfitz): add tests for skipping hostname checks too?
- // would require a new cert for testing, and probably
- // redundant with these tests.
- for _, insecure := range []bool{true, false} {
- tr := &Transport{
- TLSClientConfig: &tls.Config{
- InsecureSkipVerify: insecure,
- },
- }
- c := &Client{Transport: tr}
- _, err := c.Get(ts.URL)
- if (err == nil) != insecure {
- t.Errorf("insecure=%v: got unexpected err=%v", insecure, err)
- }
- }
-}
-
-func TestClientErrorWithRequestURI(t *testing.T) {
- req, _ := NewRequest("GET", "http://localhost:1234/", nil)
- req.RequestURI = "/this/field/is/illegal/and/should/error/"
- _, err := DefaultClient.Do(req)
- if err == nil {
- t.Fatalf("expected an error")
- }
- if !strings.Contains(err.Error(), "RequestURI") {
- t.Errorf("wanted error mentioning RequestURI; got error: %v", err)
- }
-}
-
-func newTLSTransport(t *testing.T, ts *httptest.Server) *Transport {
- certs := x509.NewCertPool()
- for _, c := range ts.TLS.Certificates {
- roots, err := x509.ParseCertificates(c.Certificate[len(c.Certificate)-1])
- if err != nil {
- t.Fatalf("error parsing server's root cert: %v", err)
- }
- for _, root := range roots {
- certs.AddCert(root)
- }
- }
- return &Transport{
- TLSClientConfig: &tls.Config{RootCAs: certs},
- }
-}
-
-func TestClientWithCorrectTLSServerName(t *testing.T) {
- ts := httptest.NewTLSServer(HandlerFunc(func(w ResponseWriter, r *Request) {
- if r.TLS.ServerName != "127.0.0.1" {
- t.Errorf("expected client to set ServerName 127.0.0.1, got: %q", r.TLS.ServerName)
- }
- }))
- defer ts.Close()
-
- c := &Client{Transport: newTLSTransport(t, ts)}
- if _, err := c.Get(ts.URL); err != nil {
- t.Fatalf("expected successful TLS connection, got error: %v", err)
- }
-}
-
-func TestClientWithIncorrectTLSServerName(t *testing.T) {
- ts := httptest.NewTLSServer(HandlerFunc(func(w ResponseWriter, r *Request) {}))
- defer ts.Close()
-
- trans := newTLSTransport(t, ts)
- trans.TLSClientConfig.ServerName = "badserver"
- c := &Client{Transport: trans}
- _, err := c.Get(ts.URL)
- if err == nil {
- t.Fatalf("expected an error")
- }
- if !strings.Contains(err.Error(), "127.0.0.1") || !strings.Contains(err.Error(), "badserver") {
- t.Errorf("wanted error mentioning 127.0.0.1 and badserver; got error: %v", err)
- }
-}
-
-// Verify Response.ContentLength is populated. http://golang.org/issue/4126
-func TestClientHeadContentLength(t *testing.T) {
- ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
- if v := r.FormValue("cl"); v != "" {
- w.Header().Set("Content-Length", v)
- }
- }))
- defer ts.Close()
- tests := []struct {
- suffix string
- want int64
- }{
- {"/?cl=1234", 1234},
- {"/?cl=0", 0},
- {"", -1},
- }
- for _, tt := range tests {
- req, _ := NewRequest("HEAD", ts.URL+tt.suffix, nil)
- res, err := DefaultClient.Do(req)
- if err != nil {
- t.Fatal(err)
- }
- if res.ContentLength != tt.want {
- t.Errorf("Content-Length = %d; want %d", res.ContentLength, tt.want)
- }
- bs, err := ioutil.ReadAll(res.Body)
- if err != nil {
- t.Fatal(err)
- }
- if len(bs) != 0 {
- t.Errorf("Unexpected content: %q", bs)
- }
- }
-}
diff --git a/gcc-4.8.1/libgo/go/net/http/cookie.go b/gcc-4.8.1/libgo/go/net/http/cookie.go
deleted file mode 100644
index 155b09223..000000000
--- a/gcc-4.8.1/libgo/go/net/http/cookie.go
+++ /dev/null
@@ -1,262 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package http
-
-import (
- "bytes"
- "fmt"
- "strconv"
- "strings"
- "time"
-)
-
-// This implementation is done according to RFC 6265:
-//
-// http://tools.ietf.org/html/rfc6265
-
-// A Cookie represents an HTTP cookie as sent in the Set-Cookie header of an
-// HTTP response or the Cookie header of an HTTP request.
-type Cookie struct {
- Name string
- Value string
- Path string
- Domain string
- Expires time.Time
- RawExpires string
-
- // MaxAge=0 means no 'Max-Age' attribute specified.
- // MaxAge<0 means delete cookie now, equivalently 'Max-Age: 0'
- // MaxAge>0 means Max-Age attribute present and given in seconds
- MaxAge int
- Secure bool
- HttpOnly bool
- Raw string
- Unparsed []string // Raw text of unparsed attribute-value pairs
-}
-
-// readSetCookies parses all "Set-Cookie" values from
-// the header h and returns the successfully parsed Cookies.
-func readSetCookies(h Header) []*Cookie {
- cookies := []*Cookie{}
- for _, line := range h["Set-Cookie"] {
- parts := strings.Split(strings.TrimSpace(line), ";")
- if len(parts) == 1 && parts[0] == "" {
- continue
- }
- parts[0] = strings.TrimSpace(parts[0])
- j := strings.Index(parts[0], "=")
- if j < 0 {
- continue
- }
- name, value := parts[0][:j], parts[0][j+1:]
- if !isCookieNameValid(name) {
- continue
- }
- value, success := parseCookieValue(value)
- if !success {
- continue
- }
- c := &Cookie{
- Name: name,
- Value: value,
- Raw: line,
- }
- for i := 1; i < len(parts); i++ {
- parts[i] = strings.TrimSpace(parts[i])
- if len(parts[i]) == 0 {
- continue
- }
-
- attr, val := parts[i], ""
- if j := strings.Index(attr, "="); j >= 0 {
- attr, val = attr[:j], attr[j+1:]
- }
- lowerAttr := strings.ToLower(attr)
- parseCookieValueFn := parseCookieValue
- if lowerAttr == "expires" {
- parseCookieValueFn = parseCookieExpiresValue
- }
- val, success = parseCookieValueFn(val)
- if !success {
- c.Unparsed = append(c.Unparsed, parts[i])
- continue
- }
- switch lowerAttr {
- case "secure":
- c.Secure = true
- continue
- case "httponly":
- c.HttpOnly = true
- continue
- case "domain":
- c.Domain = val
- // TODO: Add domain parsing
- continue
- case "max-age":
- secs, err := strconv.Atoi(val)
- if err != nil || secs != 0 && val[0] == '0' {
- break
- }
- if secs <= 0 {
- c.MaxAge = -1
- } else {
- c.MaxAge = secs
- }
- continue
- case "expires":
- c.RawExpires = val
- exptime, err := time.Parse(time.RFC1123, val)
- if err != nil {
- exptime, err = time.Parse("Mon, 02-Jan-2006 15:04:05 MST", val)
- if err != nil {
- c.Expires = time.Time{}
- break
- }
- }
- c.Expires = exptime.UTC()
- continue
- case "path":
- c.Path = val
- // TODO: Add path parsing
- continue
- }
- c.Unparsed = append(c.Unparsed, parts[i])
- }
- cookies = append(cookies, c)
- }
- return cookies
-}
-
-// SetCookie adds a Set-Cookie header to the provided ResponseWriter's headers.
-func SetCookie(w ResponseWriter, cookie *Cookie) {
- w.Header().Add("Set-Cookie", cookie.String())
-}
-
-// String returns the serialization of the cookie for use in a Cookie
-// header (if only Name and Value are set) or a Set-Cookie response
-// header (if other fields are set).
-func (c *Cookie) String() string {
- var b bytes.Buffer
- fmt.Fprintf(&b, "%s=%s", sanitizeName(c.Name), sanitizeValue(c.Value))
- if len(c.Path) > 0 {
- fmt.Fprintf(&b, "; Path=%s", sanitizeValue(c.Path))
- }
- if len(c.Domain) > 0 {
- fmt.Fprintf(&b, "; Domain=%s", sanitizeValue(c.Domain))
- }
- if c.Expires.Unix() > 0 {
- fmt.Fprintf(&b, "; Expires=%s", c.Expires.UTC().Format(time.RFC1123))
- }
- if c.MaxAge > 0 {
- fmt.Fprintf(&b, "; Max-Age=%d", c.MaxAge)
- } else if c.MaxAge < 0 {
- fmt.Fprintf(&b, "; Max-Age=0")
- }
- if c.HttpOnly {
- fmt.Fprintf(&b, "; HttpOnly")
- }
- if c.Secure {
- fmt.Fprintf(&b, "; Secure")
- }
- return b.String()
-}
-
-// readCookies parses all "Cookie" values from the header h and
-// returns the successfully parsed Cookies.
-//
-// if filter isn't empty, only cookies of that name are returned
-func readCookies(h Header, filter string) []*Cookie {
- cookies := []*Cookie{}
- lines, ok := h["Cookie"]
- if !ok {
- return cookies
- }
-
- for _, line := range lines {
- parts := strings.Split(strings.TrimSpace(line), ";")
- if len(parts) == 1 && parts[0] == "" {
- continue
- }
- // Per-line attributes
- parsedPairs := 0
- for i := 0; i < len(parts); i++ {
- parts[i] = strings.TrimSpace(parts[i])
- if len(parts[i]) == 0 {
- continue
- }
- name, val := parts[i], ""
- if j := strings.Index(name, "="); j >= 0 {
- name, val = name[:j], name[j+1:]
- }
- if !isCookieNameValid(name) {
- continue
- }
- if filter != "" && filter != name {
- continue
- }
- val, success := parseCookieValue(val)
- if !success {
- continue
- }
- cookies = append(cookies, &Cookie{Name: name, Value: val})
- parsedPairs++
- }
- }
- return cookies
-}
-
-var cookieNameSanitizer = strings.NewReplacer("\n", "-", "\r", "-")
-
-func sanitizeName(n string) string {
- return cookieNameSanitizer.Replace(n)
-}
-
-var cookieValueSanitizer = strings.NewReplacer("\n", " ", "\r", " ", ";", " ")
-
-func sanitizeValue(v string) string {
- return cookieValueSanitizer.Replace(v)
-}
-
-func unquoteCookieValue(v string) string {
- if len(v) > 1 && v[0] == '"' && v[len(v)-1] == '"' {
- return v[1 : len(v)-1]
- }
- return v
-}
-
-func isCookieByte(c byte) bool {
- switch {
- case c == 0x21, 0x23 <= c && c <= 0x2b, 0x2d <= c && c <= 0x3a,
- 0x3c <= c && c <= 0x5b, 0x5d <= c && c <= 0x7e:
- return true
- }
- return false
-}
-
-func isCookieExpiresByte(c byte) (ok bool) {
- return isCookieByte(c) || c == ',' || c == ' '
-}
-
-func parseCookieValue(raw string) (string, bool) {
- return parseCookieValueUsing(raw, isCookieByte)
-}
-
-func parseCookieExpiresValue(raw string) (string, bool) {
- return parseCookieValueUsing(raw, isCookieExpiresByte)
-}
-
-func parseCookieValueUsing(raw string, validByte func(byte) bool) (string, bool) {
- raw = unquoteCookieValue(raw)
- for i := 0; i < len(raw); i++ {
- if !validByte(raw[i]) {
- return "", false
- }
- }
- return raw, true
-}
-
-func isCookieNameValid(raw string) bool {
- return strings.IndexFunc(raw, isNotToken) < 0
-}
diff --git a/gcc-4.8.1/libgo/go/net/http/cookie_test.go b/gcc-4.8.1/libgo/go/net/http/cookie_test.go
deleted file mode 100644
index f84f73936..000000000
--- a/gcc-4.8.1/libgo/go/net/http/cookie_test.go
+++ /dev/null
@@ -1,228 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package http
-
-import (
- "encoding/json"
- "fmt"
- "reflect"
- "testing"
- "time"
-)
-
-var writeSetCookiesTests = []struct {
- Cookie *Cookie
- Raw string
-}{
- {
- &Cookie{Name: "cookie-1", Value: "v$1"},
- "cookie-1=v$1",
- },
- {
- &Cookie{Name: "cookie-2", Value: "two", MaxAge: 3600},
- "cookie-2=two; Max-Age=3600",
- },
- {
- &Cookie{Name: "cookie-3", Value: "three", Domain: ".example.com"},
- "cookie-3=three; Domain=.example.com",
- },
- {
- &Cookie{Name: "cookie-4", Value: "four", Path: "/restricted/"},
- "cookie-4=four; Path=/restricted/",
- },
-}
-
-func TestWriteSetCookies(t *testing.T) {
- for i, tt := range writeSetCookiesTests {
- if g, e := tt.Cookie.String(), tt.Raw; g != e {
- t.Errorf("Test %d, expecting:\n%s\nGot:\n%s\n", i, e, g)
- continue
- }
- }
-}
-
-type headerOnlyResponseWriter Header
-
-func (ho headerOnlyResponseWriter) Header() Header {
- return Header(ho)
-}
-
-func (ho headerOnlyResponseWriter) Write([]byte) (int, error) {
- panic("NOIMPL")
-}
-
-func (ho headerOnlyResponseWriter) WriteHeader(int) {
- panic("NOIMPL")
-}
-
-func TestSetCookie(t *testing.T) {
- m := make(Header)
- SetCookie(headerOnlyResponseWriter(m), &Cookie{Name: "cookie-1", Value: "one", Path: "/restricted/"})
- SetCookie(headerOnlyResponseWriter(m), &Cookie{Name: "cookie-2", Value: "two", MaxAge: 3600})
- if l := len(m["Set-Cookie"]); l != 2 {
- t.Fatalf("expected %d cookies, got %d", 2, l)
- }
- if g, e := m["Set-Cookie"][0], "cookie-1=one; Path=/restricted/"; g != e {
- t.Errorf("cookie #1: want %q, got %q", e, g)
- }
- if g, e := m["Set-Cookie"][1], "cookie-2=two; Max-Age=3600"; g != e {
- t.Errorf("cookie #2: want %q, got %q", e, g)
- }
-}
-
-var addCookieTests = []struct {
- Cookies []*Cookie
- Raw string
-}{
- {
- []*Cookie{},
- "",
- },
- {
- []*Cookie{{Name: "cookie-1", Value: "v$1"}},
- "cookie-1=v$1",
- },
- {
- []*Cookie{
- {Name: "cookie-1", Value: "v$1"},
- {Name: "cookie-2", Value: "v$2"},
- {Name: "cookie-3", Value: "v$3"},
- },
- "cookie-1=v$1; cookie-2=v$2; cookie-3=v$3",
- },
-}
-
-func TestAddCookie(t *testing.T) {
- for i, tt := range addCookieTests {
- req, _ := NewRequest("GET", "http://example.com/", nil)
- for _, c := range tt.Cookies {
- req.AddCookie(c)
- }
- if g := req.Header.Get("Cookie"); g != tt.Raw {
- t.Errorf("Test %d:\nwant: %s\n got: %s\n", i, tt.Raw, g)
- continue
- }
- }
-}
-
-var readSetCookiesTests = []struct {
- Header Header
- Cookies []*Cookie
-}{
- {
- Header{"Set-Cookie": {"Cookie-1=v$1"}},
- []*Cookie{{Name: "Cookie-1", Value: "v$1", Raw: "Cookie-1=v$1"}},
- },
- {
- Header{"Set-Cookie": {"NID=99=YsDT5i3E-CXax-; expires=Wed, 23-Nov-2011 01:05:03 GMT; path=/; domain=.google.ch; HttpOnly"}},
- []*Cookie{{
- Name: "NID",
- Value: "99=YsDT5i3E-CXax-",
- Path: "/",
- Domain: ".google.ch",
- HttpOnly: true,
- Expires: time.Date(2011, 11, 23, 1, 5, 3, 0, time.UTC),
- RawExpires: "Wed, 23-Nov-2011 01:05:03 GMT",
- Raw: "NID=99=YsDT5i3E-CXax-; expires=Wed, 23-Nov-2011 01:05:03 GMT; path=/; domain=.google.ch; HttpOnly",
- }},
- },
- {
- Header{"Set-Cookie": {".ASPXAUTH=7E3AA; expires=Wed, 07-Mar-2012 14:25:06 GMT; path=/; HttpOnly"}},
- []*Cookie{{
- Name: ".ASPXAUTH",
- Value: "7E3AA",
- Path: "/",
- Expires: time.Date(2012, 3, 7, 14, 25, 6, 0, time.UTC),
- RawExpires: "Wed, 07-Mar-2012 14:25:06 GMT",
- HttpOnly: true,
- Raw: ".ASPXAUTH=7E3AA; expires=Wed, 07-Mar-2012 14:25:06 GMT; path=/; HttpOnly",
- }},
- },
- {
- Header{"Set-Cookie": {"ASP.NET_SessionId=foo; path=/; HttpOnly"}},
- []*Cookie{{
- Name: "ASP.NET_SessionId",
- Value: "foo",
- Path: "/",
- HttpOnly: true,
- Raw: "ASP.NET_SessionId=foo; path=/; HttpOnly",
- }},
- },
-
- // TODO(bradfitz): users have reported seeing this in the
- // wild, but do browsers handle it? RFC 6265 just says "don't
- // do that" (section 3) and then never mentions header folding
- // again.
- // Header{"Set-Cookie": {"ASP.NET_SessionId=foo; path=/; HttpOnly, .ASPXAUTH=7E3AA; expires=Wed, 07-Mar-2012 14:25:06 GMT; path=/; HttpOnly"}},
-}
-
-func toJSON(v interface{}) string {
- b, err := json.Marshal(v)
- if err != nil {
- return fmt.Sprintf("%#v", v)
- }
- return string(b)
-}
-
-func TestReadSetCookies(t *testing.T) {
- for i, tt := range readSetCookiesTests {
- for n := 0; n < 2; n++ { // to verify readSetCookies doesn't mutate its input
- c := readSetCookies(tt.Header)
- if !reflect.DeepEqual(c, tt.Cookies) {
- t.Errorf("#%d readSetCookies: have\n%s\nwant\n%s\n", i, toJSON(c), toJSON(tt.Cookies))
- continue
- }
- }
- }
-}
-
-var readCookiesTests = []struct {
- Header Header
- Filter string
- Cookies []*Cookie
-}{
- {
- Header{"Cookie": {"Cookie-1=v$1", "c2=v2"}},
- "",
- []*Cookie{
- {Name: "Cookie-1", Value: "v$1"},
- {Name: "c2", Value: "v2"},
- },
- },
- {
- Header{"Cookie": {"Cookie-1=v$1", "c2=v2"}},
- "c2",
- []*Cookie{
- {Name: "c2", Value: "v2"},
- },
- },
- {
- Header{"Cookie": {"Cookie-1=v$1; c2=v2"}},
- "",
- []*Cookie{
- {Name: "Cookie-1", Value: "v$1"},
- {Name: "c2", Value: "v2"},
- },
- },
- {
- Header{"Cookie": {"Cookie-1=v$1; c2=v2"}},
- "c2",
- []*Cookie{
- {Name: "c2", Value: "v2"},
- },
- },
-}
-
-func TestReadCookies(t *testing.T) {
- for i, tt := range readCookiesTests {
- for n := 0; n < 2; n++ { // to verify readCookies doesn't mutate its input
- c := readCookies(tt.Header, tt.Filter)
- if !reflect.DeepEqual(c, tt.Cookies) {
- t.Errorf("#%d readCookies:\nhave: %s\nwant: %s\n", i, toJSON(c), toJSON(tt.Cookies))
- continue
- }
- }
- }
-}
diff --git a/gcc-4.8.1/libgo/go/net/http/doc.go b/gcc-4.8.1/libgo/go/net/http/doc.go
deleted file mode 100644
index b6ae8b87a..000000000
--- a/gcc-4.8.1/libgo/go/net/http/doc.go
+++ /dev/null
@@ -1,80 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-/*
-Package http provides HTTP client and server implementations.
-
-Get, Head, Post, and PostForm make HTTP requests:
-
- resp, err := http.Get("http://example.com/")
- ...
- resp, err := http.Post("http://example.com/upload", "image/jpeg", &buf)
- ...
- resp, err := http.PostForm("http://example.com/form",
- url.Values{"key": {"Value"}, "id": {"123"}})
-
-The client must close the response body when finished with it:
-
- resp, err := http.Get("http://example.com/")
- if err != nil {
- // handle error
- }
- defer resp.Body.Close()
- body, err := ioutil.ReadAll(resp.Body)
- // ...
-
-For control over HTTP client headers, redirect policy, and other
-settings, create a Client:
-
- client := &http.Client{
- CheckRedirect: redirectPolicyFunc,
- }
-
- resp, err := client.Get("http://example.com")
- // ...
-
- req, err := http.NewRequest("GET", "http://example.com", nil)
- // ...
- req.Header.Add("If-None-Match", `W/"wyzzy"`)
- resp, err := client.Do(req)
- // ...
-
-For control over proxies, TLS configuration, keep-alives,
-compression, and other settings, create a Transport:
-
- tr := &http.Transport{
- TLSClientConfig: &tls.Config{RootCAs: pool},
- DisableCompression: true,
- }
- client := &http.Client{Transport: tr}
- resp, err := client.Get("https://example.com")
-
-Clients and Transports are safe for concurrent use by multiple
-goroutines and for efficiency should only be created once and re-used.
-
-ListenAndServe starts an HTTP server with a given address and handler.
-The handler is usually nil, which means to use DefaultServeMux.
-Handle and HandleFunc add handlers to DefaultServeMux:
-
- http.Handle("/foo", fooHandler)
-
- http.HandleFunc("/bar", func(w http.ResponseWriter, r *http.Request) {
- fmt.Fprintf(w, "Hello, %q", html.EscapeString(r.URL.Path))
- })
-
- log.Fatal(http.ListenAndServe(":8080", nil))
-
-More control over the server's behavior is available by creating a
-custom Server:
-
- s := &http.Server{
- Addr: ":8080",
- Handler: myHandler,
- ReadTimeout: 10 * time.Second,
- WriteTimeout: 10 * time.Second,
- MaxHeaderBytes: 1 << 20,
- }
- log.Fatal(s.ListenAndServe())
-*/
-package http
diff --git a/gcc-4.8.1/libgo/go/net/http/example_test.go b/gcc-4.8.1/libgo/go/net/http/example_test.go
deleted file mode 100644
index 22073eaf7..000000000
--- a/gcc-4.8.1/libgo/go/net/http/example_test.go
+++ /dev/null
@@ -1,56 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package http_test
-
-import (
- "fmt"
- "io/ioutil"
- "log"
- "net/http"
-)
-
-func ExampleHijacker() {
- http.HandleFunc("/hijack", func(w http.ResponseWriter, r *http.Request) {
- hj, ok := w.(http.Hijacker)
- if !ok {
- http.Error(w, "webserver doesn't support hijacking", http.StatusInternalServerError)
- return
- }
- conn, bufrw, err := hj.Hijack()
- if err != nil {
- http.Error(w, err.Error(), http.StatusInternalServerError)
- return
- }
- // Don't forget to close the connection:
- defer conn.Close()
- bufrw.WriteString("Now we're speaking raw TCP. Say hi: ")
- bufrw.Flush()
- s, err := bufrw.ReadString('\n')
- if err != nil {
- log.Printf("error reading string: %v", err)
- return
- }
- fmt.Fprintf(bufrw, "You said: %q\nBye.\n", s)
- bufrw.Flush()
- })
-}
-
-func ExampleGet() {
- res, err := http.Get("http://www.google.com/robots.txt")
- if err != nil {
- log.Fatal(err)
- }
- robots, err := ioutil.ReadAll(res.Body)
- res.Body.Close()
- if err != nil {
- log.Fatal(err)
- }
- fmt.Printf("%s", robots)
-}
-
-func ExampleFileServer() {
- // we use StripPrefix so that /tmpfiles/somefile will access /tmp/somefile
- http.Handle("/tmpfiles/", http.StripPrefix("/tmpfiles/", http.FileServer(http.Dir("/tmp"))))
-}
diff --git a/gcc-4.8.1/libgo/go/net/http/export_test.go b/gcc-4.8.1/libgo/go/net/http/export_test.go
deleted file mode 100644
index a7a07852d..000000000
--- a/gcc-4.8.1/libgo/go/net/http/export_test.go
+++ /dev/null
@@ -1,50 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Bridge package to expose http internals to tests in the http_test
-// package.
-
-package http
-
-import (
- "net"
- "time"
-)
-
-func NewLoggingConn(baseName string, c net.Conn) net.Conn {
- return newLoggingConn(baseName, c)
-}
-
-func (t *Transport) IdleConnKeysForTesting() (keys []string) {
- keys = make([]string, 0)
- t.idleLk.Lock()
- defer t.idleLk.Unlock()
- if t.idleConn == nil {
- return
- }
- for key := range t.idleConn {
- keys = append(keys, key)
- }
- return
-}
-
-func (t *Transport) IdleConnCountForTesting(cacheKey string) int {
- t.idleLk.Lock()
- defer t.idleLk.Unlock()
- if t.idleConn == nil {
- return 0
- }
- conns, ok := t.idleConn[cacheKey]
- if !ok {
- return 0
- }
- return len(conns)
-}
-
-func NewTestTimeoutHandler(handler Handler, ch <-chan time.Time) Handler {
- f := func() <-chan time.Time {
- return ch
- }
- return &timeoutHandler{handler, f, ""}
-}
diff --git a/gcc-4.8.1/libgo/go/net/http/fcgi/child.go b/gcc-4.8.1/libgo/go/net/http/fcgi/child.go
deleted file mode 100644
index c8b9a33c8..000000000
--- a/gcc-4.8.1/libgo/go/net/http/fcgi/child.go
+++ /dev/null
@@ -1,271 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package fcgi
-
-// This file implements FastCGI from the perspective of a child process.
-
-import (
- "errors"
- "fmt"
- "io"
- "net"
- "net/http"
- "net/http/cgi"
- "os"
- "time"
-)
-
-// request holds the state for an in-progress request. As soon as it's complete,
-// it's converted to an http.Request.
-type request struct {
- pw *io.PipeWriter
- reqId uint16
- params map[string]string
- buf [1024]byte
- rawParams []byte
- keepConn bool
-}
-
-func newRequest(reqId uint16, flags uint8) *request {
- r := &request{
- reqId: reqId,
- params: map[string]string{},
- keepConn: flags&flagKeepConn != 0,
- }
- r.rawParams = r.buf[:0]
- return r
-}
-
-// parseParams reads an encoded []byte into Params.
-func (r *request) parseParams() {
- text := r.rawParams
- r.rawParams = nil
- for len(text) > 0 {
- keyLen, n := readSize(text)
- if n == 0 {
- return
- }
- text = text[n:]
- valLen, n := readSize(text)
- if n == 0 {
- return
- }
- text = text[n:]
- key := readString(text, keyLen)
- text = text[keyLen:]
- val := readString(text, valLen)
- text = text[valLen:]
- r.params[key] = val
- }
-}
-
-// response implements http.ResponseWriter.
-type response struct {
- req *request
- header http.Header
- w *bufWriter
- wroteHeader bool
-}
-
-func newResponse(c *child, req *request) *response {
- return &response{
- req: req,
- header: http.Header{},
- w: newWriter(c.conn, typeStdout, req.reqId),
- }
-}
-
-func (r *response) Header() http.Header {
- return r.header
-}
-
-func (r *response) Write(data []byte) (int, error) {
- if !r.wroteHeader {
- r.WriteHeader(http.StatusOK)
- }
- return r.w.Write(data)
-}
-
-func (r *response) WriteHeader(code int) {
- if r.wroteHeader {
- return
- }
- r.wroteHeader = true
- if code == http.StatusNotModified {
- // Must not have body.
- r.header.Del("Content-Type")
- r.header.Del("Content-Length")
- r.header.Del("Transfer-Encoding")
- } else if r.header.Get("Content-Type") == "" {
- r.header.Set("Content-Type", "text/html; charset=utf-8")
- }
-
- if r.header.Get("Date") == "" {
- r.header.Set("Date", time.Now().UTC().Format(http.TimeFormat))
- }
-
- fmt.Fprintf(r.w, "Status: %d %s\r\n", code, http.StatusText(code))
- r.header.Write(r.w)
- r.w.WriteString("\r\n")
-}
-
-func (r *response) Flush() {
- if !r.wroteHeader {
- r.WriteHeader(http.StatusOK)
- }
- r.w.Flush()
-}
-
-func (r *response) Close() error {
- r.Flush()
- return r.w.Close()
-}
-
-type child struct {
- conn *conn
- handler http.Handler
- requests map[uint16]*request // keyed by request ID
-}
-
-func newChild(rwc io.ReadWriteCloser, handler http.Handler) *child {
- return &child{
- conn: newConn(rwc),
- handler: handler,
- requests: make(map[uint16]*request),
- }
-}
-
-func (c *child) serve() {
- defer c.conn.Close()
- var rec record
- for {
- if err := rec.read(c.conn.rwc); err != nil {
- return
- }
- if err := c.handleRecord(&rec); err != nil {
- return
- }
- }
-}
-
-var errCloseConn = errors.New("fcgi: connection should be closed")
-
-func (c *child) handleRecord(rec *record) error {
- req, ok := c.requests[rec.h.Id]
- if !ok && rec.h.Type != typeBeginRequest && rec.h.Type != typeGetValues {
- // The spec says to ignore unknown request IDs.
- return nil
- }
- if ok && rec.h.Type == typeBeginRequest {
- // The server is trying to begin a request with the same ID
- // as an in-progress request. This is an error.
- return errors.New("fcgi: received ID that is already in-flight")
- }
-
- switch rec.h.Type {
- case typeBeginRequest:
- var br beginRequest
- if err := br.read(rec.content()); err != nil {
- return err
- }
- if br.role != roleResponder {
- c.conn.writeEndRequest(rec.h.Id, 0, statusUnknownRole)
- return nil
- }
- c.requests[rec.h.Id] = newRequest(rec.h.Id, br.flags)
- case typeParams:
- // NOTE(eds): Technically a key-value pair can straddle the boundary
- // between two packets. We buffer until we've received all parameters.
- if len(rec.content()) > 0 {
- req.rawParams = append(req.rawParams, rec.content()...)
- return nil
- }
- req.parseParams()
- case typeStdin:
- content := rec.content()
- if req.pw == nil {
- var body io.ReadCloser
- if len(content) > 0 {
- // body could be an io.LimitReader, but it shouldn't matter
- // as long as both sides are behaving.
- body, req.pw = io.Pipe()
- }
- go c.serveRequest(req, body)
- }
- if len(content) > 0 {
- // TODO(eds): This blocks until the handler reads from the pipe.
- // If the handler takes a long time, it might be a problem.
- req.pw.Write(content)
- } else if req.pw != nil {
- req.pw.Close()
- }
- case typeGetValues:
- values := map[string]string{"FCGI_MPXS_CONNS": "1"}
- c.conn.writePairs(typeGetValuesResult, 0, values)
- case typeData:
- // If the filter role is implemented, read the data stream here.
- case typeAbortRequest:
- delete(c.requests, rec.h.Id)
- c.conn.writeEndRequest(rec.h.Id, 0, statusRequestComplete)
- if !req.keepConn {
- // connection will close upon return
- return errCloseConn
- }
- default:
- b := make([]byte, 8)
- b[0] = byte(rec.h.Type)
- c.conn.writeRecord(typeUnknownType, 0, b)
- }
- return nil
-}
-
-func (c *child) serveRequest(req *request, body io.ReadCloser) {
- r := newResponse(c, req)
- httpReq, err := cgi.RequestFromMap(req.params)
- if err != nil {
- // there was an error reading the request
- r.WriteHeader(http.StatusInternalServerError)
- c.conn.writeRecord(typeStderr, req.reqId, []byte(err.Error()))
- } else {
- httpReq.Body = body
- c.handler.ServeHTTP(r, httpReq)
- }
- if body != nil {
- body.Close()
- }
- r.Close()
- c.conn.writeEndRequest(req.reqId, 0, statusRequestComplete)
- if !req.keepConn {
- c.conn.Close()
- }
-}
-
-// Serve accepts incoming FastCGI connections on the listener l, creating a new
-// goroutine for each. The goroutine reads requests and then calls handler
-// to reply to them.
-// If l is nil, Serve accepts connections from os.Stdin.
-// If handler is nil, http.DefaultServeMux is used.
-func Serve(l net.Listener, handler http.Handler) error {
- if l == nil {
- var err error
- l, err = net.FileListener(os.Stdin)
- if err != nil {
- return err
- }
- defer l.Close()
- }
- if handler == nil {
- handler = http.DefaultServeMux
- }
- for {
- rw, err := l.Accept()
- if err != nil {
- return err
- }
- c := newChild(rw, handler)
- go c.serve()
- }
- panic("unreachable")
-}
diff --git a/gcc-4.8.1/libgo/go/net/http/fcgi/fcgi.go b/gcc-4.8.1/libgo/go/net/http/fcgi/fcgi.go
deleted file mode 100644
index 06bba0488..000000000
--- a/gcc-4.8.1/libgo/go/net/http/fcgi/fcgi.go
+++ /dev/null
@@ -1,274 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package fcgi implements the FastCGI protocol.
-// Currently only the responder role is supported.
-// The protocol is defined at http://www.fastcgi.com/drupal/node/6?q=node/22
-package fcgi
-
-// This file defines the raw protocol and some utilities used by the child and
-// the host.
-
-import (
- "bufio"
- "bytes"
- "encoding/binary"
- "errors"
- "io"
- "sync"
-)
-
-// recType is a record type, as defined by
-// http://www.fastcgi.com/devkit/doc/fcgi-spec.html#S8
-type recType uint8
-
-const (
- typeBeginRequest recType = 1
- typeAbortRequest recType = 2
- typeEndRequest recType = 3
- typeParams recType = 4
- typeStdin recType = 5
- typeStdout recType = 6
- typeStderr recType = 7
- typeData recType = 8
- typeGetValues recType = 9
- typeGetValuesResult recType = 10
- typeUnknownType recType = 11
-)
-
-// keep the connection between web-server and responder open after request
-const flagKeepConn = 1
-
-const (
- maxWrite = 65535 // maximum record body
- maxPad = 255
-)
-
-const (
- roleResponder = iota + 1 // only Responders are implemented.
- roleAuthorizer
- roleFilter
-)
-
-const (
- statusRequestComplete = iota
- statusCantMultiplex
- statusOverloaded
- statusUnknownRole
-)
-
-const headerLen = 8
-
-type header struct {
- Version uint8
- Type recType
- Id uint16
- ContentLength uint16
- PaddingLength uint8
- Reserved uint8
-}
-
-type beginRequest struct {
- role uint16
- flags uint8
- reserved [5]uint8
-}
-
-func (br *beginRequest) read(content []byte) error {
- if len(content) != 8 {
- return errors.New("fcgi: invalid begin request record")
- }
- br.role = binary.BigEndian.Uint16(content)
- br.flags = content[2]
- return nil
-}
-
-// for padding so we don't have to allocate all the time
-// not synchronized because we don't care what the contents are
-var pad [maxPad]byte
-
-func (h *header) init(recType recType, reqId uint16, contentLength int) {
- h.Version = 1
- h.Type = recType
- h.Id = reqId
- h.ContentLength = uint16(contentLength)
- h.PaddingLength = uint8(-contentLength & 7)
-}
-
-// conn sends records over rwc
-type conn struct {
- mutex sync.Mutex
- rwc io.ReadWriteCloser
-
- // to avoid allocations
- buf bytes.Buffer
- h header
-}
-
-func newConn(rwc io.ReadWriteCloser) *conn {
- return &conn{rwc: rwc}
-}
-
-func (c *conn) Close() error {
- c.mutex.Lock()
- defer c.mutex.Unlock()
- return c.rwc.Close()
-}
-
-type record struct {
- h header
- buf [maxWrite + maxPad]byte
-}
-
-func (rec *record) read(r io.Reader) (err error) {
- if err = binary.Read(r, binary.BigEndian, &rec.h); err != nil {
- return err
- }
- if rec.h.Version != 1 {
- return errors.New("fcgi: invalid header version")
- }
- n := int(rec.h.ContentLength) + int(rec.h.PaddingLength)
- if _, err = io.ReadFull(r, rec.buf[:n]); err != nil {
- return err
- }
- return nil
-}
-
-func (r *record) content() []byte {
- return r.buf[:r.h.ContentLength]
-}
-
-// writeRecord writes and sends a single record.
-func (c *conn) writeRecord(recType recType, reqId uint16, b []byte) error {
- c.mutex.Lock()
- defer c.mutex.Unlock()
- c.buf.Reset()
- c.h.init(recType, reqId, len(b))
- if err := binary.Write(&c.buf, binary.BigEndian, c.h); err != nil {
- return err
- }
- if _, err := c.buf.Write(b); err != nil {
- return err
- }
- if _, err := c.buf.Write(pad[:c.h.PaddingLength]); err != nil {
- return err
- }
- _, err := c.rwc.Write(c.buf.Bytes())
- return err
-}
-
-func (c *conn) writeBeginRequest(reqId uint16, role uint16, flags uint8) error {
- b := [8]byte{byte(role >> 8), byte(role), flags}
- return c.writeRecord(typeBeginRequest, reqId, b[:])
-}
-
-func (c *conn) writeEndRequest(reqId uint16, appStatus int, protocolStatus uint8) error {
- b := make([]byte, 8)
- binary.BigEndian.PutUint32(b, uint32(appStatus))
- b[4] = protocolStatus
- return c.writeRecord(typeEndRequest, reqId, b)
-}
-
-func (c *conn) writePairs(recType recType, reqId uint16, pairs map[string]string) error {
- w := newWriter(c, recType, reqId)
- b := make([]byte, 8)
- for k, v := range pairs {
- n := encodeSize(b, uint32(len(k)))
- n += encodeSize(b[n:], uint32(len(v)))
- if _, err := w.Write(b[:n]); err != nil {
- return err
- }
- if _, err := w.WriteString(k); err != nil {
- return err
- }
- if _, err := w.WriteString(v); err != nil {
- return err
- }
- }
- w.Close()
- return nil
-}
-
-func readSize(s []byte) (uint32, int) {
- if len(s) == 0 {
- return 0, 0
- }
- size, n := uint32(s[0]), 1
- if size&(1<<7) != 0 {
- if len(s) < 4 {
- return 0, 0
- }
- n = 4
- size = binary.BigEndian.Uint32(s)
- size &^= 1 << 31
- }
- return size, n
-}
-
-func readString(s []byte, size uint32) string {
- if size > uint32(len(s)) {
- return ""
- }
- return string(s[:size])
-}
-
-func encodeSize(b []byte, size uint32) int {
- if size > 127 {
- size |= 1 << 31
- binary.BigEndian.PutUint32(b, size)
- return 4
- }
- b[0] = byte(size)
- return 1
-}
-
-// bufWriter encapsulates bufio.Writer but also closes the underlying stream when
-// Closed.
-type bufWriter struct {
- closer io.Closer
- *bufio.Writer
-}
-
-func (w *bufWriter) Close() error {
- if err := w.Writer.Flush(); err != nil {
- w.closer.Close()
- return err
- }
- return w.closer.Close()
-}
-
-func newWriter(c *conn, recType recType, reqId uint16) *bufWriter {
- s := &streamWriter{c: c, recType: recType, reqId: reqId}
- w := bufio.NewWriterSize(s, maxWrite)
- return &bufWriter{s, w}
-}
-
-// streamWriter abstracts out the separation of a stream into discrete records.
-// It only writes maxWrite bytes at a time.
-type streamWriter struct {
- c *conn
- recType recType
- reqId uint16
-}
-
-func (w *streamWriter) Write(p []byte) (int, error) {
- nn := 0
- for len(p) > 0 {
- n := len(p)
- if n > maxWrite {
- n = maxWrite
- }
- if err := w.c.writeRecord(w.recType, w.reqId, p[:n]); err != nil {
- return nn, err
- }
- nn += n
- p = p[n:]
- }
- return nn, nil
-}
-
-func (w *streamWriter) Close() error {
- // send empty record to close the stream
- return w.c.writeRecord(w.recType, w.reqId, nil)
-}
diff --git a/gcc-4.8.1/libgo/go/net/http/fcgi/fcgi_test.go b/gcc-4.8.1/libgo/go/net/http/fcgi/fcgi_test.go
deleted file mode 100644
index 6c7e1a9ce..000000000
--- a/gcc-4.8.1/libgo/go/net/http/fcgi/fcgi_test.go
+++ /dev/null
@@ -1,150 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package fcgi
-
-import (
- "bytes"
- "errors"
- "io"
- "testing"
-)
-
-var sizeTests = []struct {
- size uint32
- bytes []byte
-}{
- {0, []byte{0x00}},
- {127, []byte{0x7F}},
- {128, []byte{0x80, 0x00, 0x00, 0x80}},
- {1000, []byte{0x80, 0x00, 0x03, 0xE8}},
- {33554431, []byte{0x81, 0xFF, 0xFF, 0xFF}},
-}
-
-func TestSize(t *testing.T) {
- b := make([]byte, 4)
- for i, test := range sizeTests {
- n := encodeSize(b, test.size)
- if !bytes.Equal(b[:n], test.bytes) {
- t.Errorf("%d expected %x, encoded %x", i, test.bytes, b)
- }
- size, n := readSize(test.bytes)
- if size != test.size {
- t.Errorf("%d expected %d, read %d", i, test.size, size)
- }
- if len(test.bytes) != n {
- t.Errorf("%d did not consume all the bytes", i)
- }
- }
-}
-
-var streamTests = []struct {
- desc string
- recType recType
- reqId uint16
- content []byte
- raw []byte
-}{
- {"single record", typeStdout, 1, nil,
- []byte{1, byte(typeStdout), 0, 1, 0, 0, 0, 0},
- },
- // this data will have to be split into two records
- {"two records", typeStdin, 300, make([]byte, 66000),
- bytes.Join([][]byte{
- // header for the first record
- {1, byte(typeStdin), 0x01, 0x2C, 0xFF, 0xFF, 1, 0},
- make([]byte, 65536),
- // header for the second
- {1, byte(typeStdin), 0x01, 0x2C, 0x01, 0xD1, 7, 0},
- make([]byte, 472),
- // header for the empty record
- {1, byte(typeStdin), 0x01, 0x2C, 0, 0, 0, 0},
- },
- nil),
- },
-}
-
-type nilCloser struct {
- io.ReadWriter
-}
-
-func (c *nilCloser) Close() error { return nil }
-
-func TestStreams(t *testing.T) {
- var rec record
-outer:
- for _, test := range streamTests {
- buf := bytes.NewBuffer(test.raw)
- var content []byte
- for buf.Len() > 0 {
- if err := rec.read(buf); err != nil {
- t.Errorf("%s: error reading record: %v", test.desc, err)
- continue outer
- }
- content = append(content, rec.content()...)
- }
- if rec.h.Type != test.recType {
- t.Errorf("%s: got type %d expected %d", test.desc, rec.h.Type, test.recType)
- continue
- }
- if rec.h.Id != test.reqId {
- t.Errorf("%s: got request ID %d expected %d", test.desc, rec.h.Id, test.reqId)
- continue
- }
- if !bytes.Equal(content, test.content) {
- t.Errorf("%s: read wrong content", test.desc)
- continue
- }
- buf.Reset()
- c := newConn(&nilCloser{buf})
- w := newWriter(c, test.recType, test.reqId)
- if _, err := w.Write(test.content); err != nil {
- t.Errorf("%s: error writing record: %v", test.desc, err)
- continue
- }
- if err := w.Close(); err != nil {
- t.Errorf("%s: error closing stream: %v", test.desc, err)
- continue
- }
- if !bytes.Equal(buf.Bytes(), test.raw) {
- t.Errorf("%s: wrote wrong content", test.desc)
- }
- }
-}
-
-type writeOnlyConn struct {
- buf []byte
-}
-
-func (c *writeOnlyConn) Write(p []byte) (int, error) {
- c.buf = append(c.buf, p...)
- return len(p), nil
-}
-
-func (c *writeOnlyConn) Read(p []byte) (int, error) {
- return 0, errors.New("conn is write-only")
-}
-
-func (c *writeOnlyConn) Close() error {
- return nil
-}
-
-func TestGetValues(t *testing.T) {
- var rec record
- rec.h.Type = typeGetValues
-
- wc := new(writeOnlyConn)
- c := newChild(wc, nil)
- err := c.handleRecord(&rec)
- if err != nil {
- t.Fatalf("handleRecord: %v", err)
- }
-
- const want = "\x01\n\x00\x00\x00\x12\x06\x00" +
- "\x0f\x01FCGI_MPXS_CONNS1" +
- "\x00\x00\x00\x00\x00\x00\x01\n\x00\x00\x00\x00\x00\x00"
- if got := string(wc.buf); got != want {
- t.Errorf(" got: %q\nwant: %q\n", got, want)
- }
-}
diff --git a/gcc-4.8.1/libgo/go/net/http/filetransport.go b/gcc-4.8.1/libgo/go/net/http/filetransport.go
deleted file mode 100644
index 821787e0c..000000000
--- a/gcc-4.8.1/libgo/go/net/http/filetransport.go
+++ /dev/null
@@ -1,123 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package http
-
-import (
- "fmt"
- "io"
-)
-
-// fileTransport implements RoundTripper for the 'file' protocol.
-type fileTransport struct {
- fh fileHandler
-}
-
-// NewFileTransport returns a new RoundTripper, serving the provided
-// FileSystem. The returned RoundTripper ignores the URL host in its
-// incoming requests, as well as most other properties of the
-// request.
-//
-// The typical use case for NewFileTransport is to register the "file"
-// protocol with a Transport, as in:
-//
-// t := &http.Transport{}
-// t.RegisterProtocol("file", http.NewFileTransport(http.Dir("/")))
-// c := &http.Client{Transport: t}
-// res, err := c.Get("file:///etc/passwd")
-// ...
-func NewFileTransport(fs FileSystem) RoundTripper {
- return fileTransport{fileHandler{fs}}
-}
-
-func (t fileTransport) RoundTrip(req *Request) (resp *Response, err error) {
- // We start ServeHTTP in a goroutine, which may take a long
- // time if the file is large. The newPopulateResponseWriter
- // call returns a channel which either ServeHTTP or finish()
- // sends our *Response on, once the *Response itself has been
- // populated (even if the body itself is still being
- // written to the res.Body, a pipe)
- rw, resc := newPopulateResponseWriter()
- go func() {
- t.fh.ServeHTTP(rw, req)
- rw.finish()
- }()
- return <-resc, nil
-}
-
-func newPopulateResponseWriter() (*populateResponse, <-chan *Response) {
- pr, pw := io.Pipe()
- rw := &populateResponse{
- ch: make(chan *Response),
- pw: pw,
- res: &Response{
- Proto: "HTTP/1.0",
- ProtoMajor: 1,
- Header: make(Header),
- Close: true,
- Body: pr,
- },
- }
- return rw, rw.ch
-}
-
-// populateResponse is a ResponseWriter that populates the *Response
-// in res, and writes its body to a pipe connected to the response
-// body. Once writes begin or finish() is called, the response is sent
-// on ch.
-type populateResponse struct {
- res *Response
- ch chan *Response
- wroteHeader bool
- hasContent bool
- sentResponse bool
- pw *io.PipeWriter
-}
-
-func (pr *populateResponse) finish() {
- if !pr.wroteHeader {
- pr.WriteHeader(500)
- }
- if !pr.sentResponse {
- pr.sendResponse()
- }
- pr.pw.Close()
-}
-
-func (pr *populateResponse) sendResponse() {
- if pr.sentResponse {
- return
- }
- pr.sentResponse = true
-
- if pr.hasContent {
- pr.res.ContentLength = -1
- }
- pr.ch <- pr.res
-}
-
-func (pr *populateResponse) Header() Header {
- return pr.res.Header
-}
-
-func (pr *populateResponse) WriteHeader(code int) {
- if pr.wroteHeader {
- return
- }
- pr.wroteHeader = true
-
- pr.res.StatusCode = code
- pr.res.Status = fmt.Sprintf("%d %s", code, StatusText(code))
-}
-
-func (pr *populateResponse) Write(p []byte) (n int, err error) {
- if !pr.wroteHeader {
- pr.WriteHeader(StatusOK)
- }
- pr.hasContent = true
- if !pr.sentResponse {
- pr.sendResponse()
- }
- return pr.pw.Write(p)
-}
diff --git a/gcc-4.8.1/libgo/go/net/http/filetransport_test.go b/gcc-4.8.1/libgo/go/net/http/filetransport_test.go
deleted file mode 100644
index 039926b53..000000000
--- a/gcc-4.8.1/libgo/go/net/http/filetransport_test.go
+++ /dev/null
@@ -1,65 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package http_test
-
-import (
- "io/ioutil"
- "net/http"
- "os"
- "path/filepath"
- "testing"
-)
-
-func checker(t *testing.T) func(string, error) {
- return func(call string, err error) {
- if err == nil {
- return
- }
- t.Fatalf("%s: %v", call, err)
- }
-}
-
-func TestFileTransport(t *testing.T) {
- check := checker(t)
-
- dname, err := ioutil.TempDir("", "")
- check("TempDir", err)
- fname := filepath.Join(dname, "foo.txt")
- err = ioutil.WriteFile(fname, []byte("Bar"), 0644)
- check("WriteFile", err)
- defer os.Remove(dname)
- defer os.Remove(fname)
-
- tr := &http.Transport{}
- tr.RegisterProtocol("file", http.NewFileTransport(http.Dir(dname)))
- c := &http.Client{Transport: tr}
-
- fooURLs := []string{"file:///foo.txt", "file://../foo.txt"}
- for _, urlstr := range fooURLs {
- res, err := c.Get(urlstr)
- check("Get "+urlstr, err)
- if res.StatusCode != 200 {
- t.Errorf("for %s, StatusCode = %d, want 200", urlstr, res.StatusCode)
- }
- if res.ContentLength != -1 {
- t.Errorf("for %s, ContentLength = %d, want -1", urlstr, res.ContentLength)
- }
- if res.Body == nil {
- t.Fatalf("for %s, nil Body", urlstr)
- }
- slurp, err := ioutil.ReadAll(res.Body)
- check("ReadAll "+urlstr, err)
- if string(slurp) != "Bar" {
- t.Errorf("for %s, got content %q, want %q", urlstr, string(slurp), "Bar")
- }
- }
-
- const badURL = "file://../no-exist.txt"
- res, err := c.Get(badURL)
- check("Get "+badURL, err)
- if res.StatusCode != 404 {
- t.Errorf("for %s, StatusCode = %d, want 404", badURL, res.StatusCode)
- }
-}
diff --git a/gcc-4.8.1/libgo/go/net/http/fs.go b/gcc-4.8.1/libgo/go/net/http/fs.go
deleted file mode 100644
index b6bea0dfa..000000000
--- a/gcc-4.8.1/libgo/go/net/http/fs.go
+++ /dev/null
@@ -1,525 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// HTTP file system request handler
-
-package http
-
-import (
- "errors"
- "fmt"
- "io"
- "mime"
- "mime/multipart"
- "net/textproto"
- "os"
- "path"
- "path/filepath"
- "strconv"
- "strings"
- "time"
-)
-
-// A Dir implements http.FileSystem using the native file
-// system restricted to a specific directory tree.
-//
-// An empty Dir is treated as ".".
-type Dir string
-
-func (d Dir) Open(name string) (File, error) {
- if filepath.Separator != '/' && strings.IndexRune(name, filepath.Separator) >= 0 ||
- strings.Contains(name, "\x00") {
- return nil, errors.New("http: invalid character in file path")
- }
- dir := string(d)
- if dir == "" {
- dir = "."
- }
- f, err := os.Open(filepath.Join(dir, filepath.FromSlash(path.Clean("/"+name))))
- if err != nil {
- return nil, err
- }
- return f, nil
-}
-
-// A FileSystem implements access to a collection of named files.
-// The elements in a file path are separated by slash ('/', U+002F)
-// characters, regardless of host operating system convention.
-type FileSystem interface {
- Open(name string) (File, error)
-}
-
-// A File is returned by a FileSystem's Open method and can be
-// served by the FileServer implementation.
-type File interface {
- Close() error
- Stat() (os.FileInfo, error)
- Readdir(count int) ([]os.FileInfo, error)
- Read([]byte) (int, error)
- Seek(offset int64, whence int) (int64, error)
-}
-
-func dirList(w ResponseWriter, f File) {
- w.Header().Set("Content-Type", "text/html; charset=utf-8")
- fmt.Fprintf(w, "<pre>\n")
- for {
- dirs, err := f.Readdir(100)
- if err != nil || len(dirs) == 0 {
- break
- }
- for _, d := range dirs {
- name := d.Name()
- if d.IsDir() {
- name += "/"
- }
- // TODO htmlescape
- fmt.Fprintf(w, "<a href=\"%s\">%s</a>\n", name, name)
- }
- }
- fmt.Fprintf(w, "</pre>\n")
-}
-
-// ServeContent replies to the request using the content in the
-// provided ReadSeeker. The main benefit of ServeContent over io.Copy
-// is that it handles Range requests properly, sets the MIME type, and
-// handles If-Modified-Since requests.
-//
-// If the response's Content-Type header is not set, ServeContent
-// first tries to deduce the type from name's file extension and,
-// if that fails, falls back to reading the first block of the content
-// and passing it to DetectContentType.
-// The name is otherwise unused; in particular it can be empty and is
-// never sent in the response.
-//
-// If modtime is not the zero time, ServeContent includes it in a
-// Last-Modified header in the response. If the request includes an
-// If-Modified-Since header, ServeContent uses modtime to decide
-// whether the content needs to be sent at all.
-//
-// The content's Seek method must work: ServeContent uses
-// a seek to the end of the content to determine its size.
-//
-// If the caller has set w's ETag header, ServeContent uses it to
-// handle requests using If-Range and If-None-Match.
-//
-// Note that *os.File implements the io.ReadSeeker interface.
-func ServeContent(w ResponseWriter, req *Request, name string, modtime time.Time, content io.ReadSeeker) {
- size, err := content.Seek(0, os.SEEK_END)
- if err != nil {
- Error(w, "seeker can't seek", StatusInternalServerError)
- return
- }
- _, err = content.Seek(0, os.SEEK_SET)
- if err != nil {
- Error(w, "seeker can't seek", StatusInternalServerError)
- return
- }
- serveContent(w, req, name, modtime, size, content)
-}
-
-// if name is empty, filename is unknown. (used for mime type, before sniffing)
-// if modtime.IsZero(), modtime is unknown.
-// content must be seeked to the beginning of the file.
-func serveContent(w ResponseWriter, r *Request, name string, modtime time.Time, size int64, content io.ReadSeeker) {
- if checkLastModified(w, r, modtime) {
- return
- }
- rangeReq, done := checkETag(w, r)
- if done {
- return
- }
-
- code := StatusOK
-
- // If Content-Type isn't set, use the file's extension to find it.
- ctype := w.Header().Get("Content-Type")
- if ctype == "" {
- ctype = mime.TypeByExtension(filepath.Ext(name))
- if ctype == "" {
- // read a chunk to decide between utf-8 text and binary
- var buf [1024]byte
- n, _ := io.ReadFull(content, buf[:])
- b := buf[:n]
- ctype = DetectContentType(b)
- _, err := content.Seek(0, os.SEEK_SET) // rewind to output whole file
- if err != nil {
- Error(w, "seeker can't seek", StatusInternalServerError)
- return
- }
- }
- w.Header().Set("Content-Type", ctype)
- }
-
- // handle Content-Range header.
- sendSize := size
- var sendContent io.Reader = content
- if size >= 0 {
- ranges, err := parseRange(rangeReq, size)
- if err != nil {
- Error(w, err.Error(), StatusRequestedRangeNotSatisfiable)
- return
- }
- if sumRangesSize(ranges) >= size {
- // The total number of bytes in all the ranges
- // is larger than the size of the file by
- // itself, so this is probably an attack, or a
- // dumb client. Ignore the range request.
- ranges = nil
- }
- switch {
- case len(ranges) == 1:
- // RFC 2616, Section 14.16:
- // "When an HTTP message includes the content of a single
- // range (for example, a response to a request for a
- // single range, or to a request for a set of ranges
- // that overlap without any holes), this content is
- // transmitted with a Content-Range header, and a
- // Content-Length header showing the number of bytes
- // actually transferred.
- // ...
- // A response to a request for a single range MUST NOT
- // be sent using the multipart/byteranges media type."
- ra := ranges[0]
- if _, err := content.Seek(ra.start, os.SEEK_SET); err != nil {
- Error(w, err.Error(), StatusRequestedRangeNotSatisfiable)
- return
- }
- sendSize = ra.length
- code = StatusPartialContent
- w.Header().Set("Content-Range", ra.contentRange(size))
- case len(ranges) > 1:
- for _, ra := range ranges {
- if ra.start > size {
- Error(w, err.Error(), StatusRequestedRangeNotSatisfiable)
- return
- }
- }
- sendSize = rangesMIMESize(ranges, ctype, size)
- code = StatusPartialContent
-
- pr, pw := io.Pipe()
- mw := multipart.NewWriter(pw)
- w.Header().Set("Content-Type", "multipart/byteranges; boundary="+mw.Boundary())
- sendContent = pr
- defer pr.Close() // cause writing goroutine to fail and exit if CopyN doesn't finish.
- go func() {
- for _, ra := range ranges {
- part, err := mw.CreatePart(ra.mimeHeader(ctype, size))
- if err != nil {
- pw.CloseWithError(err)
- return
- }
- if _, err := content.Seek(ra.start, os.SEEK_SET); err != nil {
- pw.CloseWithError(err)
- return
- }
- if _, err := io.CopyN(part, content, ra.length); err != nil {
- pw.CloseWithError(err)
- return
- }
- }
- mw.Close()
- pw.Close()
- }()
- }
-
- w.Header().Set("Accept-Ranges", "bytes")
- if w.Header().Get("Content-Encoding") == "" {
- w.Header().Set("Content-Length", strconv.FormatInt(sendSize, 10))
- }
- }
-
- w.WriteHeader(code)
-
- if r.Method != "HEAD" {
- io.CopyN(w, sendContent, sendSize)
- }
-}
-
-// modtime is the modification time of the resource to be served, or IsZero().
-// return value is whether this request is now complete.
-func checkLastModified(w ResponseWriter, r *Request, modtime time.Time) bool {
- if modtime.IsZero() {
- return false
- }
-
- // The Date-Modified header truncates sub-second precision, so
- // use mtime < t+1s instead of mtime <= t to check for unmodified.
- if t, err := time.Parse(TimeFormat, r.Header.Get("If-Modified-Since")); err == nil && modtime.Before(t.Add(1*time.Second)) {
- h := w.Header()
- delete(h, "Content-Type")
- delete(h, "Content-Length")
- w.WriteHeader(StatusNotModified)
- return true
- }
- w.Header().Set("Last-Modified", modtime.UTC().Format(TimeFormat))
- return false
-}
-
-// checkETag implements If-None-Match and If-Range checks.
-// The ETag must have been previously set in the ResponseWriter's headers.
-//
-// The return value is the effective request "Range" header to use and
-// whether this request is now considered done.
-func checkETag(w ResponseWriter, r *Request) (rangeReq string, done bool) {
- etag := w.Header().get("Etag")
- rangeReq = r.Header.get("Range")
-
- // Invalidate the range request if the entity doesn't match the one
- // the client was expecting.
- // "If-Range: version" means "ignore the Range: header unless version matches the
- // current file."
- // We only support ETag versions.
- // The caller must have set the ETag on the response already.
- if ir := r.Header.get("If-Range"); ir != "" && ir != etag {
- // TODO(bradfitz): handle If-Range requests with Last-Modified
- // times instead of ETags? I'd rather not, at least for
- // now. That seems like a bug/compromise in the RFC 2616, and
- // I've never heard of anybody caring about that (yet).
- rangeReq = ""
- }
-
- if inm := r.Header.get("If-None-Match"); inm != "" {
- // Must know ETag.
- if etag == "" {
- return rangeReq, false
- }
-
- // TODO(bradfitz): non-GET/HEAD requests require more work:
- // sending a different status code on matches, and
- // also can't use weak cache validators (those with a "W/
- // prefix). But most users of ServeContent will be using
- // it on GET or HEAD, so only support those for now.
- if r.Method != "GET" && r.Method != "HEAD" {
- return rangeReq, false
- }
-
- // TODO(bradfitz): deal with comma-separated or multiple-valued
- // list of If-None-match values. For now just handle the common
- // case of a single item.
- if inm == etag || inm == "*" {
- h := w.Header()
- delete(h, "Content-Type")
- delete(h, "Content-Length")
- w.WriteHeader(StatusNotModified)
- return "", true
- }
- }
- return rangeReq, false
-}
-
-// name is '/'-separated, not filepath.Separator.
-func serveFile(w ResponseWriter, r *Request, fs FileSystem, name string, redirect bool) {
- const indexPage = "/index.html"
-
- // redirect .../index.html to .../
- // can't use Redirect() because that would make the path absolute,
- // which would be a problem running under StripPrefix
- if strings.HasSuffix(r.URL.Path, indexPage) {
- localRedirect(w, r, "./")
- return
- }
-
- f, err := fs.Open(name)
- if err != nil {
- // TODO expose actual error?
- NotFound(w, r)
- return
- }
- defer f.Close()
-
- d, err1 := f.Stat()
- if err1 != nil {
- // TODO expose actual error?
- NotFound(w, r)
- return
- }
-
- if redirect {
- // redirect to canonical path: / at end of directory url
- // r.URL.Path always begins with /
- url := r.URL.Path
- if d.IsDir() {
- if url[len(url)-1] != '/' {
- localRedirect(w, r, path.Base(url)+"/")
- return
- }
- } else {
- if url[len(url)-1] == '/' {
- localRedirect(w, r, "../"+path.Base(url))
- return
- }
- }
- }
-
- // use contents of index.html for directory, if present
- if d.IsDir() {
- index := name + indexPage
- ff, err := fs.Open(index)
- if err == nil {
- defer ff.Close()
- dd, err := ff.Stat()
- if err == nil {
- name = index
- d = dd
- f = ff
- }
- }
- }
-
- // Still a directory? (we didn't find an index.html file)
- if d.IsDir() {
- if checkLastModified(w, r, d.ModTime()) {
- return
- }
- dirList(w, f)
- return
- }
-
- // serverContent will check modification time
- serveContent(w, r, d.Name(), d.ModTime(), d.Size(), f)
-}
-
-// localRedirect gives a Moved Permanently response.
-// It does not convert relative paths to absolute paths like Redirect does.
-func localRedirect(w ResponseWriter, r *Request, newPath string) {
- if q := r.URL.RawQuery; q != "" {
- newPath += "?" + q
- }
- w.Header().Set("Location", newPath)
- w.WriteHeader(StatusMovedPermanently)
-}
-
-// ServeFile replies to the request with the contents of the named file or directory.
-func ServeFile(w ResponseWriter, r *Request, name string) {
- dir, file := filepath.Split(name)
- serveFile(w, r, Dir(dir), file, false)
-}
-
-type fileHandler struct {
- root FileSystem
-}
-
-// FileServer returns a handler that serves HTTP requests
-// with the contents of the file system rooted at root.
-//
-// To use the operating system's file system implementation,
-// use http.Dir:
-//
-// http.Handle("/", http.FileServer(http.Dir("/tmp")))
-func FileServer(root FileSystem) Handler {
- return &fileHandler{root}
-}
-
-func (f *fileHandler) ServeHTTP(w ResponseWriter, r *Request) {
- upath := r.URL.Path
- if !strings.HasPrefix(upath, "/") {
- upath = "/" + upath
- r.URL.Path = upath
- }
- serveFile(w, r, f.root, path.Clean(upath), true)
-}
-
-// httpRange specifies the byte range to be sent to the client.
-type httpRange struct {
- start, length int64
-}
-
-func (r httpRange) contentRange(size int64) string {
- return fmt.Sprintf("bytes %d-%d/%d", r.start, r.start+r.length-1, size)
-}
-
-func (r httpRange) mimeHeader(contentType string, size int64) textproto.MIMEHeader {
- return textproto.MIMEHeader{
- "Content-Range": {r.contentRange(size)},
- "Content-Type": {contentType},
- }
-}
-
-// parseRange parses a Range header string as per RFC 2616.
-func parseRange(s string, size int64) ([]httpRange, error) {
- if s == "" {
- return nil, nil // header not present
- }
- const b = "bytes="
- if !strings.HasPrefix(s, b) {
- return nil, errors.New("invalid range")
- }
- var ranges []httpRange
- for _, ra := range strings.Split(s[len(b):], ",") {
- ra = strings.TrimSpace(ra)
- if ra == "" {
- continue
- }
- i := strings.Index(ra, "-")
- if i < 0 {
- return nil, errors.New("invalid range")
- }
- start, end := strings.TrimSpace(ra[:i]), strings.TrimSpace(ra[i+1:])
- var r httpRange
- if start == "" {
- // If no start is specified, end specifies the
- // range start relative to the end of the file.
- i, err := strconv.ParseInt(end, 10, 64)
- if err != nil {
- return nil, errors.New("invalid range")
- }
- if i > size {
- i = size
- }
- r.start = size - i
- r.length = size - r.start
- } else {
- i, err := strconv.ParseInt(start, 10, 64)
- if err != nil || i > size || i < 0 {
- return nil, errors.New("invalid range")
- }
- r.start = i
- if end == "" {
- // If no end is specified, range extends to end of the file.
- r.length = size - r.start
- } else {
- i, err := strconv.ParseInt(end, 10, 64)
- if err != nil || r.start > i {
- return nil, errors.New("invalid range")
- }
- if i >= size {
- i = size - 1
- }
- r.length = i - r.start + 1
- }
- }
- ranges = append(ranges, r)
- }
- return ranges, nil
-}
-
-// countingWriter counts how many bytes have been written to it.
-type countingWriter int64
-
-func (w *countingWriter) Write(p []byte) (n int, err error) {
- *w += countingWriter(len(p))
- return len(p), nil
-}
-
-// rangesMIMESize returns the nunber of bytes it takes to encode the
-// provided ranges as a multipart response.
-func rangesMIMESize(ranges []httpRange, contentType string, contentSize int64) (encSize int64) {
- var w countingWriter
- mw := multipart.NewWriter(&w)
- for _, ra := range ranges {
- mw.CreatePart(ra.mimeHeader(contentType, contentSize))
- encSize += ra.length
- }
- mw.Close()
- encSize += int64(w)
- return
-}
-
-func sumRangesSize(ranges []httpRange) (size int64) {
- for _, ra := range ranges {
- size += ra.length
- }
- return
-}
diff --git a/gcc-4.8.1/libgo/go/net/http/fs_test.go b/gcc-4.8.1/libgo/go/net/http/fs_test.go
deleted file mode 100644
index d42014c26..000000000
--- a/gcc-4.8.1/libgo/go/net/http/fs_test.go
+++ /dev/null
@@ -1,749 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package http_test
-
-import (
- "bytes"
- "errors"
- "fmt"
- "io"
- "io/ioutil"
- "mime"
- "mime/multipart"
- "net"
- . "net/http"
- "net/http/httptest"
- "net/url"
- "os"
- "os/exec"
- "path"
- "path/filepath"
- "regexp"
- "runtime"
- "strings"
- "testing"
- "time"
-)
-
-const (
- testFile = "testdata/file"
- testFileLen = 11
-)
-
-type wantRange struct {
- start, end int64 // range [start,end)
-}
-
-var ServeFileRangeTests = []struct {
- r string
- code int
- ranges []wantRange
-}{
- {r: "", code: StatusOK},
- {r: "bytes=0-4", code: StatusPartialContent, ranges: []wantRange{{0, 5}}},
- {r: "bytes=2-", code: StatusPartialContent, ranges: []wantRange{{2, testFileLen}}},
- {r: "bytes=-5", code: StatusPartialContent, ranges: []wantRange{{testFileLen - 5, testFileLen}}},
- {r: "bytes=3-7", code: StatusPartialContent, ranges: []wantRange{{3, 8}}},
- {r: "bytes=20-", code: StatusRequestedRangeNotSatisfiable},
- {r: "bytes=0-0,-2", code: StatusPartialContent, ranges: []wantRange{{0, 1}, {testFileLen - 2, testFileLen}}},
- {r: "bytes=0-1,5-8", code: StatusPartialContent, ranges: []wantRange{{0, 2}, {5, 9}}},
- {r: "bytes=0-1,5-", code: StatusPartialContent, ranges: []wantRange{{0, 2}, {5, testFileLen}}},
- {r: "bytes=0-,1-,2-,3-,4-", code: StatusOK}, // ignore wasteful range request
-}
-
-func TestServeFile(t *testing.T) {
- ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
- ServeFile(w, r, "testdata/file")
- }))
- defer ts.Close()
-
- var err error
-
- file, err := ioutil.ReadFile(testFile)
- if err != nil {
- t.Fatal("reading file:", err)
- }
-
- // set up the Request (re-used for all tests)
- var req Request
- req.Header = make(Header)
- if req.URL, err = url.Parse(ts.URL); err != nil {
- t.Fatal("ParseURL:", err)
- }
- req.Method = "GET"
-
- // straight GET
- _, body := getBody(t, "straight get", req)
- if !bytes.Equal(body, file) {
- t.Fatalf("body mismatch: got %q, want %q", body, file)
- }
-
- // Range tests
-Cases:
- for _, rt := range ServeFileRangeTests {
- if rt.r != "" {
- req.Header.Set("Range", rt.r)
- }
- resp, body := getBody(t, fmt.Sprintf("range test %q", rt.r), req)
- if resp.StatusCode != rt.code {
- t.Errorf("range=%q: StatusCode=%d, want %d", rt.r, resp.StatusCode, rt.code)
- }
- if rt.code == StatusRequestedRangeNotSatisfiable {
- continue
- }
- wantContentRange := ""
- if len(rt.ranges) == 1 {
- rng := rt.ranges[0]
- wantContentRange = fmt.Sprintf("bytes %d-%d/%d", rng.start, rng.end-1, testFileLen)
- }
- cr := resp.Header.Get("Content-Range")
- if cr != wantContentRange {
- t.Errorf("range=%q: Content-Range = %q, want %q", rt.r, cr, wantContentRange)
- }
- ct := resp.Header.Get("Content-Type")
- if len(rt.ranges) == 1 {
- rng := rt.ranges[0]
- wantBody := file[rng.start:rng.end]
- if !bytes.Equal(body, wantBody) {
- t.Errorf("range=%q: body = %q, want %q", rt.r, body, wantBody)
- }
- if strings.HasPrefix(ct, "multipart/byteranges") {
- t.Errorf("range=%q content-type = %q; unexpected multipart/byteranges", rt.r, ct)
- }
- }
- if len(rt.ranges) > 1 {
- typ, params, err := mime.ParseMediaType(ct)
- if err != nil {
- t.Errorf("range=%q content-type = %q; %v", rt.r, ct, err)
- continue
- }
- if typ != "multipart/byteranges" {
- t.Errorf("range=%q content-type = %q; want multipart/byteranges", rt.r, typ)
- continue
- }
- if params["boundary"] == "" {
- t.Errorf("range=%q content-type = %q; lacks boundary", rt.r, ct)
- continue
- }
- if g, w := resp.ContentLength, int64(len(body)); g != w {
- t.Errorf("range=%q Content-Length = %d; want %d", rt.r, g, w)
- continue
- }
- mr := multipart.NewReader(bytes.NewReader(body), params["boundary"])
- for ri, rng := range rt.ranges {
- part, err := mr.NextPart()
- if err != nil {
- t.Errorf("range=%q, reading part index %d: %v", rt.r, ri, err)
- continue Cases
- }
- wantContentRange = fmt.Sprintf("bytes %d-%d/%d", rng.start, rng.end-1, testFileLen)
- if g, w := part.Header.Get("Content-Range"), wantContentRange; g != w {
- t.Errorf("range=%q: part Content-Range = %q; want %q", rt.r, g, w)
- }
- body, err := ioutil.ReadAll(part)
- if err != nil {
- t.Errorf("range=%q, reading part index %d body: %v", rt.r, ri, err)
- continue Cases
- }
- wantBody := file[rng.start:rng.end]
- if !bytes.Equal(body, wantBody) {
- t.Errorf("range=%q: body = %q, want %q", rt.r, body, wantBody)
- }
- }
- _, err = mr.NextPart()
- if err != io.EOF {
- t.Errorf("range=%q; expected final error io.EOF; got %v", rt.r, err)
- }
- }
- }
-}
-
-var fsRedirectTestData = []struct {
- original, redirect string
-}{
- {"/test/index.html", "/test/"},
- {"/test/testdata", "/test/testdata/"},
- {"/test/testdata/file/", "/test/testdata/file"},
-}
-
-func TestFSRedirect(t *testing.T) {
- ts := httptest.NewServer(StripPrefix("/test", FileServer(Dir("."))))
- defer ts.Close()
-
- for _, data := range fsRedirectTestData {
- res, err := Get(ts.URL + data.original)
- if err != nil {
- t.Fatal(err)
- }
- res.Body.Close()
- if g, e := res.Request.URL.Path, data.redirect; g != e {
- t.Errorf("redirect from %s: got %s, want %s", data.original, g, e)
- }
- }
-}
-
-type testFileSystem struct {
- open func(name string) (File, error)
-}
-
-func (fs *testFileSystem) Open(name string) (File, error) {
- return fs.open(name)
-}
-
-func TestFileServerCleans(t *testing.T) {
- ch := make(chan string, 1)
- fs := FileServer(&testFileSystem{func(name string) (File, error) {
- ch <- name
- return nil, errors.New("file does not exist")
- }})
- tests := []struct {
- reqPath, openArg string
- }{
- {"/foo.txt", "/foo.txt"},
- {"//foo.txt", "/foo.txt"},
- {"/../foo.txt", "/foo.txt"},
- }
- req, _ := NewRequest("GET", "http://example.com", nil)
- for n, test := range tests {
- rec := httptest.NewRecorder()
- req.URL.Path = test.reqPath
- fs.ServeHTTP(rec, req)
- if got := <-ch; got != test.openArg {
- t.Errorf("test %d: got %q, want %q", n, got, test.openArg)
- }
- }
-}
-
-func mustRemoveAll(dir string) {
- err := os.RemoveAll(dir)
- if err != nil {
- panic(err)
- }
-}
-
-func TestFileServerImplicitLeadingSlash(t *testing.T) {
- tempDir, err := ioutil.TempDir("", "")
- if err != nil {
- t.Fatalf("TempDir: %v", err)
- }
- defer mustRemoveAll(tempDir)
- if err := ioutil.WriteFile(filepath.Join(tempDir, "foo.txt"), []byte("Hello world"), 0644); err != nil {
- t.Fatalf("WriteFile: %v", err)
- }
- ts := httptest.NewServer(StripPrefix("/bar/", FileServer(Dir(tempDir))))
- defer ts.Close()
- get := func(suffix string) string {
- res, err := Get(ts.URL + suffix)
- if err != nil {
- t.Fatalf("Get %s: %v", suffix, err)
- }
- b, err := ioutil.ReadAll(res.Body)
- if err != nil {
- t.Fatalf("ReadAll %s: %v", suffix, err)
- }
- res.Body.Close()
- return string(b)
- }
- if s := get("/bar/"); !strings.Contains(s, ">foo.txt<") {
- t.Logf("expected a directory listing with foo.txt, got %q", s)
- }
- if s := get("/bar/foo.txt"); s != "Hello world" {
- t.Logf("expected %q, got %q", "Hello world", s)
- }
-}
-
-func TestDirJoin(t *testing.T) {
- wfi, err := os.Stat("/etc/hosts")
- if err != nil {
- t.Skip("skipping test; no /etc/hosts file")
- }
- test := func(d Dir, name string) {
- f, err := d.Open(name)
- if err != nil {
- t.Fatalf("open of %s: %v", name, err)
- }
- defer f.Close()
- gfi, err := f.Stat()
- if err != nil {
- t.Fatalf("stat of %s: %v", name, err)
- }
- if !os.SameFile(gfi, wfi) {
- t.Errorf("%s got different file", name)
- }
- }
- test(Dir("/etc/"), "/hosts")
- test(Dir("/etc/"), "hosts")
- test(Dir("/etc/"), "../../../../hosts")
- test(Dir("/etc"), "/hosts")
- test(Dir("/etc"), "hosts")
- test(Dir("/etc"), "../../../../hosts")
-
- // Not really directories, but since we use this trick in
- // ServeFile, test it:
- test(Dir("/etc/hosts"), "")
- test(Dir("/etc/hosts"), "/")
- test(Dir("/etc/hosts"), "../")
-}
-
-func TestEmptyDirOpenCWD(t *testing.T) {
- test := func(d Dir) {
- name := "fs_test.go"
- f, err := d.Open(name)
- if err != nil {
- t.Fatalf("open of %s: %v", name, err)
- }
- defer f.Close()
- }
- test(Dir(""))
- test(Dir("."))
- test(Dir("./"))
-}
-
-func TestServeFileContentType(t *testing.T) {
- const ctype = "icecream/chocolate"
- ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
- if r.FormValue("override") == "1" {
- w.Header().Set("Content-Type", ctype)
- }
- ServeFile(w, r, "testdata/file")
- }))
- defer ts.Close()
- get := func(override, want string) {
- resp, err := Get(ts.URL + "?override=" + override)
- if err != nil {
- t.Fatal(err)
- }
- if h := resp.Header.Get("Content-Type"); h != want {
- t.Errorf("Content-Type mismatch: got %q, want %q", h, want)
- }
- }
- get("0", "text/plain; charset=utf-8")
- get("1", ctype)
-}
-
-func TestServeFileMimeType(t *testing.T) {
- ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
- ServeFile(w, r, "testdata/style.css")
- }))
- defer ts.Close()
- resp, err := Get(ts.URL)
- if err != nil {
- t.Fatal(err)
- }
- want := "text/css; charset=utf-8"
- if h := resp.Header.Get("Content-Type"); h != want {
- t.Errorf("Content-Type mismatch: got %q, want %q", h, want)
- }
-}
-
-func TestServeFileFromCWD(t *testing.T) {
- ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
- ServeFile(w, r, "fs_test.go")
- }))
- defer ts.Close()
- r, err := Get(ts.URL)
- if err != nil {
- t.Fatal(err)
- }
- r.Body.Close()
- if r.StatusCode != 200 {
- t.Fatalf("expected 200 OK, got %s", r.Status)
- }
-}
-
-func TestServeFileWithContentEncoding(t *testing.T) {
- ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
- w.Header().Set("Content-Encoding", "foo")
- ServeFile(w, r, "testdata/file")
- }))
- defer ts.Close()
- resp, err := Get(ts.URL)
- if err != nil {
- t.Fatal(err)
- }
- if g, e := resp.ContentLength, int64(-1); g != e {
- t.Errorf("Content-Length mismatch: got %d, want %d", g, e)
- }
-}
-
-func TestServeIndexHtml(t *testing.T) {
- const want = "index.html says hello\n"
- ts := httptest.NewServer(FileServer(Dir(".")))
- defer ts.Close()
-
- for _, path := range []string{"/testdata/", "/testdata/index.html"} {
- res, err := Get(ts.URL + path)
- if err != nil {
- t.Fatal(err)
- }
- b, err := ioutil.ReadAll(res.Body)
- if err != nil {
- t.Fatal("reading Body:", err)
- }
- if s := string(b); s != want {
- t.Errorf("for path %q got %q, want %q", path, s, want)
- }
- res.Body.Close()
- }
-}
-
-func TestFileServerZeroByte(t *testing.T) {
- ts := httptest.NewServer(FileServer(Dir(".")))
- defer ts.Close()
-
- res, err := Get(ts.URL + "/..\x00")
- if err != nil {
- t.Fatal(err)
- }
- b, err := ioutil.ReadAll(res.Body)
- if err != nil {
- t.Fatal("reading Body:", err)
- }
- if res.StatusCode == 200 {
- t.Errorf("got status 200; want an error. Body is:\n%s", string(b))
- }
-}
-
-type fakeFileInfo struct {
- dir bool
- basename string
- modtime time.Time
- ents []*fakeFileInfo
- contents string
-}
-
-func (f *fakeFileInfo) Name() string { return f.basename }
-func (f *fakeFileInfo) Sys() interface{} { return nil }
-func (f *fakeFileInfo) ModTime() time.Time { return f.modtime }
-func (f *fakeFileInfo) IsDir() bool { return f.dir }
-func (f *fakeFileInfo) Size() int64 { return int64(len(f.contents)) }
-func (f *fakeFileInfo) Mode() os.FileMode {
- if f.dir {
- return 0755 | os.ModeDir
- }
- return 0644
-}
-
-type fakeFile struct {
- io.ReadSeeker
- fi *fakeFileInfo
- path string // as opened
-}
-
-func (f *fakeFile) Close() error { return nil }
-func (f *fakeFile) Stat() (os.FileInfo, error) { return f.fi, nil }
-func (f *fakeFile) Readdir(count int) ([]os.FileInfo, error) {
- if !f.fi.dir {
- return nil, os.ErrInvalid
- }
- var fis []os.FileInfo
- for _, fi := range f.fi.ents {
- fis = append(fis, fi)
- }
- return fis, nil
-}
-
-type fakeFS map[string]*fakeFileInfo
-
-func (fs fakeFS) Open(name string) (File, error) {
- name = path.Clean(name)
- f, ok := fs[name]
- if !ok {
- println("fake filesystem didn't find file", name)
- return nil, os.ErrNotExist
- }
- return &fakeFile{ReadSeeker: strings.NewReader(f.contents), fi: f, path: name}, nil
-}
-
-func TestDirectoryIfNotModified(t *testing.T) {
- const indexContents = "I am a fake index.html file"
- fileMod := time.Unix(1000000000, 0).UTC()
- fileModStr := fileMod.Format(TimeFormat)
- dirMod := time.Unix(123, 0).UTC()
- indexFile := &fakeFileInfo{
- basename: "index.html",
- modtime: fileMod,
- contents: indexContents,
- }
- fs := fakeFS{
- "/": &fakeFileInfo{
- dir: true,
- modtime: dirMod,
- ents: []*fakeFileInfo{indexFile},
- },
- "/index.html": indexFile,
- }
-
- ts := httptest.NewServer(FileServer(fs))
- defer ts.Close()
-
- res, err := Get(ts.URL)
- if err != nil {
- t.Fatal(err)
- }
- b, err := ioutil.ReadAll(res.Body)
- if err != nil {
- t.Fatal(err)
- }
- if string(b) != indexContents {
- t.Fatalf("Got body %q; want %q", b, indexContents)
- }
- res.Body.Close()
-
- lastMod := res.Header.Get("Last-Modified")
- if lastMod != fileModStr {
- t.Fatalf("initial Last-Modified = %q; want %q", lastMod, fileModStr)
- }
-
- req, _ := NewRequest("GET", ts.URL, nil)
- req.Header.Set("If-Modified-Since", lastMod)
-
- res, err = DefaultClient.Do(req)
- if err != nil {
- t.Fatal(err)
- }
- if res.StatusCode != 304 {
- t.Fatalf("Code after If-Modified-Since request = %v; want 304", res.StatusCode)
- }
- res.Body.Close()
-
- // Advance the index.html file's modtime, but not the directory's.
- indexFile.modtime = indexFile.modtime.Add(1 * time.Hour)
-
- res, err = DefaultClient.Do(req)
- if err != nil {
- t.Fatal(err)
- }
- if res.StatusCode != 200 {
- t.Fatalf("Code after second If-Modified-Since request = %v; want 200; res is %#v", res.StatusCode, res)
- }
- res.Body.Close()
-}
-
-func mustStat(t *testing.T, fileName string) os.FileInfo {
- fi, err := os.Stat(fileName)
- if err != nil {
- t.Fatal(err)
- }
- return fi
-}
-
-func TestServeContent(t *testing.T) {
- type serveParam struct {
- name string
- modtime time.Time
- content io.ReadSeeker
- contentType string
- etag string
- }
- servec := make(chan serveParam, 1)
- ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
- p := <-servec
- if p.etag != "" {
- w.Header().Set("ETag", p.etag)
- }
- if p.contentType != "" {
- w.Header().Set("Content-Type", p.contentType)
- }
- ServeContent(w, r, p.name, p.modtime, p.content)
- }))
- defer ts.Close()
-
- type testCase struct {
- file string
- modtime time.Time
- serveETag string // optional
- serveContentType string // optional
- reqHeader map[string]string
- wantLastMod string
- wantContentType string
- wantStatus int
- }
- htmlModTime := mustStat(t, "testdata/index.html").ModTime()
- tests := map[string]testCase{
- "no_last_modified": {
- file: "testdata/style.css",
- wantContentType: "text/css; charset=utf-8",
- wantStatus: 200,
- },
- "with_last_modified": {
- file: "testdata/index.html",
- wantContentType: "text/html; charset=utf-8",
- modtime: htmlModTime,
- wantLastMod: htmlModTime.UTC().Format(TimeFormat),
- wantStatus: 200,
- },
- "not_modified_modtime": {
- file: "testdata/style.css",
- modtime: htmlModTime,
- reqHeader: map[string]string{
- "If-Modified-Since": htmlModTime.UTC().Format(TimeFormat),
- },
- wantStatus: 304,
- },
- "not_modified_modtime_with_contenttype": {
- file: "testdata/style.css",
- serveContentType: "text/css", // explicit content type
- modtime: htmlModTime,
- reqHeader: map[string]string{
- "If-Modified-Since": htmlModTime.UTC().Format(TimeFormat),
- },
- wantStatus: 304,
- },
- "not_modified_etag": {
- file: "testdata/style.css",
- serveETag: `"foo"`,
- reqHeader: map[string]string{
- "If-None-Match": `"foo"`,
- },
- wantStatus: 304,
- },
- "range_good": {
- file: "testdata/style.css",
- serveETag: `"A"`,
- reqHeader: map[string]string{
- "Range": "bytes=0-4",
- },
- wantStatus: StatusPartialContent,
- wantContentType: "text/css; charset=utf-8",
- },
- // An If-Range resource for entity "A", but entity "B" is now current.
- // The Range request should be ignored.
- "range_no_match": {
- file: "testdata/style.css",
- serveETag: `"A"`,
- reqHeader: map[string]string{
- "Range": "bytes=0-4",
- "If-Range": `"B"`,
- },
- wantStatus: 200,
- wantContentType: "text/css; charset=utf-8",
- },
- }
- for testName, tt := range tests {
- f, err := os.Open(tt.file)
- if err != nil {
- t.Fatalf("test %q: %v", testName, err)
- }
- defer f.Close()
-
- servec <- serveParam{
- name: filepath.Base(tt.file),
- content: f,
- modtime: tt.modtime,
- etag: tt.serveETag,
- contentType: tt.serveContentType,
- }
- req, err := NewRequest("GET", ts.URL, nil)
- if err != nil {
- t.Fatal(err)
- }
- for k, v := range tt.reqHeader {
- req.Header.Set(k, v)
- }
- res, err := DefaultClient.Do(req)
- if err != nil {
- t.Fatal(err)
- }
- io.Copy(ioutil.Discard, res.Body)
- res.Body.Close()
- if res.StatusCode != tt.wantStatus {
- t.Errorf("test %q: status = %d; want %d", testName, res.StatusCode, tt.wantStatus)
- }
- if g, e := res.Header.Get("Content-Type"), tt.wantContentType; g != e {
- t.Errorf("test %q: content-type = %q, want %q", testName, g, e)
- }
- if g, e := res.Header.Get("Last-Modified"), tt.wantLastMod; g != e {
- t.Errorf("test %q: last-modified = %q, want %q", testName, g, e)
- }
- }
-}
-
-// verifies that sendfile is being used on Linux
-func TestLinuxSendfile(t *testing.T) {
- if runtime.GOOS != "linux" {
- t.Skip("skipping; linux-only test")
- }
- if _, err := exec.LookPath("strace"); err != nil {
- t.Skip("skipping; strace not found in path")
- }
-
- ln, err := net.Listen("tcp", "127.0.0.1:0")
- if err != nil {
- t.Fatal(err)
- }
- lnf, err := ln.(*net.TCPListener).File()
- if err != nil {
- t.Fatal(err)
- }
- defer ln.Close()
-
- var buf bytes.Buffer
- child := exec.Command("strace", "-f", "-e!sigaltstack", os.Args[0], "-test.run=TestLinuxSendfileChild")
- child.ExtraFiles = append(child.ExtraFiles, lnf)
- child.Env = append([]string{"GO_WANT_HELPER_PROCESS=1"}, os.Environ()...)
- child.Stdout = &buf
- child.Stderr = &buf
- if err := child.Start(); err != nil {
- t.Skipf("skipping; failed to start straced child: %v", err)
- }
-
- res, err := Get(fmt.Sprintf("http://%s/", ln.Addr()))
- if err != nil {
- t.Fatalf("http client error: %v", err)
- }
- _, err = io.Copy(ioutil.Discard, res.Body)
- if err != nil {
- t.Fatalf("client body read error: %v", err)
- }
- res.Body.Close()
-
- // Force child to exit cleanly.
- Get(fmt.Sprintf("http://%s/quit", ln.Addr()))
- child.Wait()
-
- rx := regexp.MustCompile(`sendfile(64)?\(\d+,\s*\d+,\s*NULL,\s*\d+\)\s*=\s*\d+\s*\n`)
- rxResume := regexp.MustCompile(`<\.\.\. sendfile(64)? resumed> \)\s*=\s*\d+\s*\n`)
- out := buf.String()
- if !rx.MatchString(out) && !rxResume.MatchString(out) {
- t.Errorf("no sendfile system call found in:\n%s", out)
- }
-}
-
-func getBody(t *testing.T, testName string, req Request) (*Response, []byte) {
- r, err := DefaultClient.Do(&req)
- if err != nil {
- t.Fatalf("%s: for URL %q, send error: %v", testName, req.URL.String(), err)
- }
- b, err := ioutil.ReadAll(r.Body)
- if err != nil {
- t.Fatalf("%s: for URL %q, reading body: %v", testName, req.URL.String(), err)
- }
- return r, b
-}
-
-// TestLinuxSendfileChild isn't a real test. It's used as a helper process
-// for TestLinuxSendfile.
-func TestLinuxSendfileChild(*testing.T) {
- if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" {
- return
- }
- defer os.Exit(0)
- fd3 := os.NewFile(3, "ephemeral-port-listener")
- ln, err := net.FileListener(fd3)
- if err != nil {
- panic(err)
- }
- mux := NewServeMux()
- mux.Handle("/", FileServer(Dir("testdata")))
- mux.HandleFunc("/quit", func(ResponseWriter, *Request) {
- os.Exit(0)
- })
- s := &Server{Handler: mux}
- err = s.Serve(ln)
- if err != nil {
- panic(err)
- }
-}
diff --git a/gcc-4.8.1/libgo/go/net/http/header.go b/gcc-4.8.1/libgo/go/net/http/header.go
deleted file mode 100644
index f479b7b4e..000000000
--- a/gcc-4.8.1/libgo/go/net/http/header.go
+++ /dev/null
@@ -1,189 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package http
-
-import (
- "io"
- "net/textproto"
- "sort"
- "strings"
- "time"
-)
-
-// A Header represents the key-value pairs in an HTTP header.
-type Header map[string][]string
-
-// Add adds the key, value pair to the header.
-// It appends to any existing values associated with key.
-func (h Header) Add(key, value string) {
- textproto.MIMEHeader(h).Add(key, value)
-}
-
-// Set sets the header entries associated with key to
-// the single element value. It replaces any existing
-// values associated with key.
-func (h Header) Set(key, value string) {
- textproto.MIMEHeader(h).Set(key, value)
-}
-
-// Get gets the first value associated with the given key.
-// If there are no values associated with the key, Get returns "".
-// To access multiple values of a key, access the map directly
-// with CanonicalHeaderKey.
-func (h Header) Get(key string) string {
- return textproto.MIMEHeader(h).Get(key)
-}
-
-// get is like Get, but key must already be in CanonicalHeaderKey form.
-func (h Header) get(key string) string {
- if v := h[key]; len(v) > 0 {
- return v[0]
- }
- return ""
-}
-
-// Del deletes the values associated with key.
-func (h Header) Del(key string) {
- textproto.MIMEHeader(h).Del(key)
-}
-
-// Write writes a header in wire format.
-func (h Header) Write(w io.Writer) error {
- return h.WriteSubset(w, nil)
-}
-
-func (h Header) clone() Header {
- h2 := make(Header, len(h))
- for k, vv := range h {
- vv2 := make([]string, len(vv))
- copy(vv2, vv)
- h2[k] = vv2
- }
- return h2
-}
-
-var timeFormats = []string{
- TimeFormat,
- time.RFC850,
- time.ANSIC,
-}
-
-// ParseTime parses a time header (such as the Date: header),
-// trying each of the three formats allowed by HTTP/1.1:
-// TimeFormat, time.RFC850, and time.ANSIC.
-func ParseTime(text string) (t time.Time, err error) {
- for _, layout := range timeFormats {
- t, err = time.Parse(layout, text)
- if err == nil {
- return
- }
- }
- return
-}
-
-var headerNewlineToSpace = strings.NewReplacer("\n", " ", "\r", " ")
-
-type writeStringer interface {
- WriteString(string) (int, error)
-}
-
-// stringWriter implements WriteString on a Writer.
-type stringWriter struct {
- w io.Writer
-}
-
-func (w stringWriter) WriteString(s string) (n int, err error) {
- return w.w.Write([]byte(s))
-}
-
-type keyValues struct {
- key string
- values []string
-}
-
-type byKey []keyValues
-
-func (s byKey) Len() int { return len(s) }
-func (s byKey) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
-func (s byKey) Less(i, j int) bool { return s[i].key < s[j].key }
-
-func (h Header) sortedKeyValues(exclude map[string]bool) []keyValues {
- kvs := make([]keyValues, 0, len(h))
- for k, vv := range h {
- if !exclude[k] {
- kvs = append(kvs, keyValues{k, vv})
- }
- }
- sort.Sort(byKey(kvs))
- return kvs
-}
-
-// WriteSubset writes a header in wire format.
-// If exclude is not nil, keys where exclude[key] == true are not written.
-func (h Header) WriteSubset(w io.Writer, exclude map[string]bool) error {
- ws, ok := w.(writeStringer)
- if !ok {
- ws = stringWriter{w}
- }
- for _, kv := range h.sortedKeyValues(exclude) {
- for _, v := range kv.values {
- v = headerNewlineToSpace.Replace(v)
- v = textproto.TrimString(v)
- for _, s := range []string{kv.key, ": ", v, "\r\n"} {
- if _, err := ws.WriteString(s); err != nil {
- return err
- }
- }
- }
- }
- return nil
-}
-
-// CanonicalHeaderKey returns the canonical format of the
-// header key s. The canonicalization converts the first
-// letter and any letter following a hyphen to upper case;
-// the rest are converted to lowercase. For example, the
-// canonical key for "accept-encoding" is "Accept-Encoding".
-func CanonicalHeaderKey(s string) string { return textproto.CanonicalMIMEHeaderKey(s) }
-
-// hasToken returns whether token appears with v, ASCII
-// case-insensitive, with space or comma boundaries.
-// token must be all lowercase.
-// v may contain mixed cased.
-func hasToken(v, token string) bool {
- if len(token) > len(v) || token == "" {
- return false
- }
- if v == token {
- return true
- }
- for sp := 0; sp <= len(v)-len(token); sp++ {
- // Check that first character is good.
- // The token is ASCII, so checking only a single byte
- // is sufficient. We skip this potential starting
- // position if both the first byte and its potential
- // ASCII uppercase equivalent (b|0x20) don't match.
- // False positives ('^' => '~') are caught by EqualFold.
- if b := v[sp]; b != token[0] && b|0x20 != token[0] {
- continue
- }
- // Check that start pos is on a valid token boundary.
- if sp > 0 && !isTokenBoundary(v[sp-1]) {
- continue
- }
- // Check that end pos is on a valid token boundary.
- if endPos := sp + len(token); endPos != len(v) && !isTokenBoundary(v[endPos]) {
- continue
- }
- if strings.EqualFold(v[sp:sp+len(token)], token) {
- return true
- }
- }
- return false
-}
-
-func isTokenBoundary(b byte) bool {
- return b == ' ' || b == ',' || b == '\t'
-}
diff --git a/gcc-4.8.1/libgo/go/net/http/header_test.go b/gcc-4.8.1/libgo/go/net/http/header_test.go
deleted file mode 100644
index 01bb4dce0..000000000
--- a/gcc-4.8.1/libgo/go/net/http/header_test.go
+++ /dev/null
@@ -1,212 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package http
-
-import (
- "bytes"
- "runtime"
- "testing"
- "time"
-)
-
-var headerWriteTests = []struct {
- h Header
- exclude map[string]bool
- expected string
-}{
- {Header{}, nil, ""},
- {
- Header{
- "Content-Type": {"text/html; charset=UTF-8"},
- "Content-Length": {"0"},
- },
- nil,
- "Content-Length: 0\r\nContent-Type: text/html; charset=UTF-8\r\n",
- },
- {
- Header{
- "Content-Length": {"0", "1", "2"},
- },
- nil,
- "Content-Length: 0\r\nContent-Length: 1\r\nContent-Length: 2\r\n",
- },
- {
- Header{
- "Expires": {"-1"},
- "Content-Length": {"0"},
- "Content-Encoding": {"gzip"},
- },
- map[string]bool{"Content-Length": true},
- "Content-Encoding: gzip\r\nExpires: -1\r\n",
- },
- {
- Header{
- "Expires": {"-1"},
- "Content-Length": {"0", "1", "2"},
- "Content-Encoding": {"gzip"},
- },
- map[string]bool{"Content-Length": true},
- "Content-Encoding: gzip\r\nExpires: -1\r\n",
- },
- {
- Header{
- "Expires": {"-1"},
- "Content-Length": {"0"},
- "Content-Encoding": {"gzip"},
- },
- map[string]bool{"Content-Length": true, "Expires": true, "Content-Encoding": true},
- "",
- },
- {
- Header{
- "Nil": nil,
- "Empty": {},
- "Blank": {""},
- "Double-Blank": {"", ""},
- },
- nil,
- "Blank: \r\nDouble-Blank: \r\nDouble-Blank: \r\n",
- },
- // Tests header sorting when over the insertion sort threshold side:
- {
- Header{
- "k1": {"1a", "1b"},
- "k2": {"2a", "2b"},
- "k3": {"3a", "3b"},
- "k4": {"4a", "4b"},
- "k5": {"5a", "5b"},
- "k6": {"6a", "6b"},
- "k7": {"7a", "7b"},
- "k8": {"8a", "8b"},
- "k9": {"9a", "9b"},
- },
- map[string]bool{"k5": true},
- "k1: 1a\r\nk1: 1b\r\nk2: 2a\r\nk2: 2b\r\nk3: 3a\r\nk3: 3b\r\n" +
- "k4: 4a\r\nk4: 4b\r\nk6: 6a\r\nk6: 6b\r\n" +
- "k7: 7a\r\nk7: 7b\r\nk8: 8a\r\nk8: 8b\r\nk9: 9a\r\nk9: 9b\r\n",
- },
-}
-
-func TestHeaderWrite(t *testing.T) {
- var buf bytes.Buffer
- for i, test := range headerWriteTests {
- test.h.WriteSubset(&buf, test.exclude)
- if buf.String() != test.expected {
- t.Errorf("#%d:\n got: %q\nwant: %q", i, buf.String(), test.expected)
- }
- buf.Reset()
- }
-}
-
-var parseTimeTests = []struct {
- h Header
- err bool
-}{
- {Header{"Date": {""}}, true},
- {Header{"Date": {"invalid"}}, true},
- {Header{"Date": {"1994-11-06T08:49:37Z00:00"}}, true},
- {Header{"Date": {"Sun, 06 Nov 1994 08:49:37 GMT"}}, false},
- {Header{"Date": {"Sunday, 06-Nov-94 08:49:37 GMT"}}, false},
- {Header{"Date": {"Sun Nov 6 08:49:37 1994"}}, false},
-}
-
-func TestParseTime(t *testing.T) {
- expect := time.Date(1994, 11, 6, 8, 49, 37, 0, time.UTC)
- for i, test := range parseTimeTests {
- d, err := ParseTime(test.h.Get("Date"))
- if err != nil {
- if !test.err {
- t.Errorf("#%d:\n got err: %v", i, err)
- }
- continue
- }
- if test.err {
- t.Errorf("#%d:\n should err", i)
- continue
- }
- if !expect.Equal(d) {
- t.Errorf("#%d:\n got: %v\nwant: %v", i, d, expect)
- }
- }
-}
-
-type hasTokenTest struct {
- header string
- token string
- want bool
-}
-
-var hasTokenTests = []hasTokenTest{
- {"", "", false},
- {"", "foo", false},
- {"foo", "foo", true},
- {"foo ", "foo", true},
- {" foo", "foo", true},
- {" foo ", "foo", true},
- {"foo,bar", "foo", true},
- {"bar,foo", "foo", true},
- {"bar, foo", "foo", true},
- {"bar,foo, baz", "foo", true},
- {"bar, foo,baz", "foo", true},
- {"bar,foo, baz", "foo", true},
- {"bar, foo, baz", "foo", true},
- {"FOO", "foo", true},
- {"FOO ", "foo", true},
- {" FOO", "foo", true},
- {" FOO ", "foo", true},
- {"FOO,BAR", "foo", true},
- {"BAR,FOO", "foo", true},
- {"BAR, FOO", "foo", true},
- {"BAR,FOO, baz", "foo", true},
- {"BAR, FOO,BAZ", "foo", true},
- {"BAR,FOO, BAZ", "foo", true},
- {"BAR, FOO, BAZ", "foo", true},
- {"foobar", "foo", false},
- {"barfoo ", "foo", false},
-}
-
-func TestHasToken(t *testing.T) {
- for _, tt := range hasTokenTests {
- if hasToken(tt.header, tt.token) != tt.want {
- t.Errorf("hasToken(%q, %q) = %v; want %v", tt.header, tt.token, !tt.want, tt.want)
- }
- }
-}
-
-func BenchmarkHeaderWriteSubset(b *testing.B) {
- doHeaderWriteSubset(b.N, b)
-}
-
-func TestHeaderWriteSubsetMallocs(t *testing.T) {
- doHeaderWriteSubset(100, t)
-}
-
-type errorfer interface {
- Errorf(string, ...interface{})
-}
-
-func doHeaderWriteSubset(n int, t errorfer) {
- defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(1))
- h := Header(map[string][]string{
- "Content-Length": {"123"},
- "Content-Type": {"text/plain"},
- "Date": {"some date at some time Z"},
- "Server": {"Go http package"},
- })
- var buf bytes.Buffer
- var m0 runtime.MemStats
- runtime.ReadMemStats(&m0)
- for i := 0; i < n; i++ {
- buf.Reset()
- h.WriteSubset(&buf, nil)
- }
- var m1 runtime.MemStats
- runtime.ReadMemStats(&m1)
- if mallocs := m1.Mallocs - m0.Mallocs; n >= 100 && mallocs >= uint64(n) {
- // TODO(bradfitz,rsc): once we can sort without allocating,
- // make this an error. See http://golang.org/issue/3761
- // t.Errorf("did %d mallocs (>= %d iterations); should have avoided mallocs", mallocs, n)
- }
-}
diff --git a/gcc-4.8.1/libgo/go/net/http/httptest/recorder.go b/gcc-4.8.1/libgo/go/net/http/httptest/recorder.go
deleted file mode 100644
index 5451f5423..000000000
--- a/gcc-4.8.1/libgo/go/net/http/httptest/recorder.go
+++ /dev/null
@@ -1,72 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package httptest provides utilities for HTTP testing.
-package httptest
-
-import (
- "bytes"
- "net/http"
-)
-
-// ResponseRecorder is an implementation of http.ResponseWriter that
-// records its mutations for later inspection in tests.
-type ResponseRecorder struct {
- Code int // the HTTP response code from WriteHeader
- HeaderMap http.Header // the HTTP response headers
- Body *bytes.Buffer // if non-nil, the bytes.Buffer to append written data to
- Flushed bool
-
- wroteHeader bool
-}
-
-// NewRecorder returns an initialized ResponseRecorder.
-func NewRecorder() *ResponseRecorder {
- return &ResponseRecorder{
- HeaderMap: make(http.Header),
- Body: new(bytes.Buffer),
- Code: 200,
- }
-}
-
-// DefaultRemoteAddr is the default remote address to return in RemoteAddr if
-// an explicit DefaultRemoteAddr isn't set on ResponseRecorder.
-const DefaultRemoteAddr = "1.2.3.4"
-
-// Header returns the response headers.
-func (rw *ResponseRecorder) Header() http.Header {
- m := rw.HeaderMap
- if m == nil {
- m = make(http.Header)
- rw.HeaderMap = m
- }
- return m
-}
-
-// Write always succeeds and writes to rw.Body, if not nil.
-func (rw *ResponseRecorder) Write(buf []byte) (int, error) {
- if !rw.wroteHeader {
- rw.WriteHeader(200)
- }
- if rw.Body != nil {
- rw.Body.Write(buf)
- }
- return len(buf), nil
-}
-
-// WriteHeader sets rw.Code.
-func (rw *ResponseRecorder) WriteHeader(code int) {
- if !rw.wroteHeader {
- rw.Code = code
- }
- rw.wroteHeader = true
-}
-
-// Flush sets rw.Flushed to true.
-func (rw *ResponseRecorder) Flush() {
- if !rw.wroteHeader {
- rw.WriteHeader(200)
- }
- rw.Flushed = true
-}
diff --git a/gcc-4.8.1/libgo/go/net/http/httptest/recorder_test.go b/gcc-4.8.1/libgo/go/net/http/httptest/recorder_test.go
deleted file mode 100644
index 2b563260c..000000000
--- a/gcc-4.8.1/libgo/go/net/http/httptest/recorder_test.go
+++ /dev/null
@@ -1,90 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package httptest
-
-import (
- "fmt"
- "net/http"
- "testing"
-)
-
-func TestRecorder(t *testing.T) {
- type checkFunc func(*ResponseRecorder) error
- check := func(fns ...checkFunc) []checkFunc { return fns }
-
- hasStatus := func(wantCode int) checkFunc {
- return func(rec *ResponseRecorder) error {
- if rec.Code != wantCode {
- return fmt.Errorf("Status = %d; want %d", rec.Code, wantCode)
- }
- return nil
- }
- }
- hasContents := func(want string) checkFunc {
- return func(rec *ResponseRecorder) error {
- if rec.Body.String() != want {
- return fmt.Errorf("wrote = %q; want %q", rec.Body.String(), want)
- }
- return nil
- }
- }
- hasFlush := func(want bool) checkFunc {
- return func(rec *ResponseRecorder) error {
- if rec.Flushed != want {
- return fmt.Errorf("Flushed = %v; want %v", rec.Flushed, want)
- }
- return nil
- }
- }
-
- tests := []struct {
- name string
- h func(w http.ResponseWriter, r *http.Request)
- checks []checkFunc
- }{
- {
- "200 default",
- func(w http.ResponseWriter, r *http.Request) {},
- check(hasStatus(200), hasContents("")),
- },
- {
- "first code only",
- func(w http.ResponseWriter, r *http.Request) {
- w.WriteHeader(201)
- w.WriteHeader(202)
- w.Write([]byte("hi"))
- },
- check(hasStatus(201), hasContents("hi")),
- },
- {
- "write sends 200",
- func(w http.ResponseWriter, r *http.Request) {
- w.Write([]byte("hi first"))
- w.WriteHeader(201)
- w.WriteHeader(202)
- },
- check(hasStatus(200), hasContents("hi first"), hasFlush(false)),
- },
- {
- "flush",
- func(w http.ResponseWriter, r *http.Request) {
- w.(http.Flusher).Flush() // also sends a 200
- w.WriteHeader(201)
- },
- check(hasStatus(200), hasFlush(true)),
- },
- }
- r, _ := http.NewRequest("GET", "http://foo.com/", nil)
- for _, tt := range tests {
- h := http.HandlerFunc(tt.h)
- rec := NewRecorder()
- h.ServeHTTP(rec, r)
- for _, check := range tt.checks {
- if err := check(rec); err != nil {
- t.Errorf("%s: %v", tt.name, err)
- }
- }
- }
-}
diff --git a/gcc-4.8.1/libgo/go/net/http/httptest/server.go b/gcc-4.8.1/libgo/go/net/http/httptest/server.go
deleted file mode 100644
index fc52c9a2e..000000000
--- a/gcc-4.8.1/libgo/go/net/http/httptest/server.go
+++ /dev/null
@@ -1,216 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Implementation of Server
-
-package httptest
-
-import (
- "crypto/tls"
- "flag"
- "fmt"
- "net"
- "net/http"
- "os"
- "sync"
-)
-
-// A Server is an HTTP server listening on a system-chosen port on the
-// local loopback interface, for use in end-to-end HTTP tests.
-type Server struct {
- URL string // base URL of form http://ipaddr:port with no trailing slash
- Listener net.Listener
- TLS *tls.Config // nil if not using TLS
-
- // Config may be changed after calling NewUnstartedServer and
- // before Start or StartTLS.
- Config *http.Server
-
- // wg counts the number of outstanding HTTP requests on this server.
- // Close blocks until all requests are finished.
- wg sync.WaitGroup
-}
-
-// historyListener keeps track of all connections that it's ever
-// accepted.
-type historyListener struct {
- net.Listener
- sync.Mutex // protects history
- history []net.Conn
-}
-
-func (hs *historyListener) Accept() (c net.Conn, err error) {
- c, err = hs.Listener.Accept()
- if err == nil {
- hs.Lock()
- hs.history = append(hs.history, c)
- hs.Unlock()
- }
- return
-}
-
-func newLocalListener() net.Listener {
- if *serve != "" {
- l, err := net.Listen("tcp", *serve)
- if err != nil {
- panic(fmt.Sprintf("httptest: failed to listen on %v: %v", *serve, err))
- }
- return l
- }
- l, err := net.Listen("tcp", "127.0.0.1:0")
- if err != nil {
- if l, err = net.Listen("tcp6", "[::1]:0"); err != nil {
- panic(fmt.Sprintf("httptest: failed to listen on a port: %v", err))
- }
- }
- return l
-}
-
-// When debugging a particular http server-based test,
-// this flag lets you run
-// go test -run=BrokenTest -httptest.serve=127.0.0.1:8000
-// to start the broken server so you can interact with it manually.
-var serve = flag.String("httptest.serve", "", "if non-empty, httptest.NewServer serves on this address and blocks")
-
-// NewServer starts and returns a new Server.
-// The caller should call Close when finished, to shut it down.
-func NewServer(handler http.Handler) *Server {
- ts := NewUnstartedServer(handler)
- ts.Start()
- return ts
-}
-
-// NewUnstartedServer returns a new Server but doesn't start it.
-//
-// After changing its configuration, the caller should call Start or
-// StartTLS.
-//
-// The caller should call Close when finished, to shut it down.
-func NewUnstartedServer(handler http.Handler) *Server {
- return &Server{
- Listener: newLocalListener(),
- Config: &http.Server{Handler: handler},
- }
-}
-
-// Start starts a server from NewUnstartedServer.
-func (s *Server) Start() {
- if s.URL != "" {
- panic("Server already started")
- }
- s.Listener = &historyListener{Listener: s.Listener}
- s.URL = "http://" + s.Listener.Addr().String()
- s.wrapHandler()
- go s.Config.Serve(s.Listener)
- if *serve != "" {
- fmt.Fprintln(os.Stderr, "httptest: serving on", s.URL)
- select {}
- }
-}
-
-// StartTLS starts TLS on a server from NewUnstartedServer.
-func (s *Server) StartTLS() {
- if s.URL != "" {
- panic("Server already started")
- }
- cert, err := tls.X509KeyPair(localhostCert, localhostKey)
- if err != nil {
- panic(fmt.Sprintf("httptest: NewTLSServer: %v", err))
- }
-
- s.TLS = &tls.Config{
- NextProtos: []string{"http/1.1"},
- Certificates: []tls.Certificate{cert},
- }
- tlsListener := tls.NewListener(s.Listener, s.TLS)
-
- s.Listener = &historyListener{Listener: tlsListener}
- s.URL = "https://" + s.Listener.Addr().String()
- s.wrapHandler()
- go s.Config.Serve(s.Listener)
-}
-
-func (s *Server) wrapHandler() {
- h := s.Config.Handler
- if h == nil {
- h = http.DefaultServeMux
- }
- s.Config.Handler = &waitGroupHandler{
- s: s,
- h: h,
- }
-}
-
-// NewTLSServer starts and returns a new Server using TLS.
-// The caller should call Close when finished, to shut it down.
-func NewTLSServer(handler http.Handler) *Server {
- ts := NewUnstartedServer(handler)
- ts.StartTLS()
- return ts
-}
-
-// Close shuts down the server and blocks until all outstanding
-// requests on this server have completed.
-func (s *Server) Close() {
- s.Listener.Close()
- s.wg.Wait()
- s.CloseClientConnections()
- if t, ok := http.DefaultTransport.(*http.Transport); ok {
- t.CloseIdleConnections()
- }
-}
-
-// CloseClientConnections closes any currently open HTTP connections
-// to the test Server.
-func (s *Server) CloseClientConnections() {
- hl, ok := s.Listener.(*historyListener)
- if !ok {
- return
- }
- hl.Lock()
- for _, conn := range hl.history {
- conn.Close()
- }
- hl.Unlock()
-}
-
-// waitGroupHandler wraps a handler, incrementing and decrementing a
-// sync.WaitGroup on each request, to enable Server.Close to block
-// until outstanding requests are finished.
-type waitGroupHandler struct {
- s *Server
- h http.Handler // non-nil
-}
-
-func (h *waitGroupHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
- h.s.wg.Add(1)
- defer h.s.wg.Done() // a defer, in case ServeHTTP below panics
- h.h.ServeHTTP(w, r)
-}
-
-// localhostCert is a PEM-encoded TLS cert with SAN DNS names
-// "127.0.0.1" and "[::1]", expiring at the last second of 2049 (the end
-// of ASN.1 time).
-var localhostCert = []byte(`-----BEGIN CERTIFICATE-----
-MIIBTTCB+qADAgECAgEAMAsGCSqGSIb3DQEBBTAAMB4XDTcwMDEwMTAwMDAwMFoX
-DTQ5MTIzMTIzNTk1OVowADBaMAsGCSqGSIb3DQEBAQNLADBIAkEAsuA5mAFMj6Q7
-qoBzcvKzIq4kzuT5epSp2AkcQfyBHm7K13Ws7u+0b5Vb9gqTf5cAiIKcrtrXVqkL
-8i1UQF6AzwIDAQABo2MwYTAOBgNVHQ8BAf8EBAMCACQwEgYDVR0TAQH/BAgwBgEB
-/wIBATANBgNVHQ4EBgQEAQIDBDAPBgNVHSMECDAGgAQBAgMEMBsGA1UdEQQUMBKC
-CTEyNy4wLjAuMYIFWzo6MV0wCwYJKoZIhvcNAQEFA0EAj1Jsn/h2KHy7dgqutZNB
-nCGlNN+8vw263Bax9MklR85Ti6a0VWSvp/fDQZUADvmFTDkcXeA24pqmdUxeQDWw
-Pg==
------END CERTIFICATE-----`)
-
-// localhostKey is the private key for localhostCert.
-var localhostKey = []byte(`-----BEGIN RSA PRIVATE KEY-----
-MIIBPQIBAAJBALLgOZgBTI+kO6qAc3LysyKuJM7k+XqUqdgJHEH8gR5uytd1rO7v
-tG+VW/YKk3+XAIiCnK7a11apC/ItVEBegM8CAwEAAQJBAI5sxq7naeR9ahyqRkJi
-SIv2iMxLuPEHaezf5CYOPWjSjBPyVhyRevkhtqEjF/WkgL7C2nWpYHsUcBDBQVF0
-3KECIQDtEGB2ulnkZAahl3WuJziXGLB+p8Wgx7wzSM6bHu1c6QIhAMEp++CaS+SJ
-/TrU0zwY/fW4SvQeb49BPZUF3oqR8Xz3AiEA1rAJHBzBgdOQKdE3ksMUPcnvNJSN
-poCcELmz2clVXtkCIQCLytuLV38XHToTipR4yMl6O+6arzAjZ56uq7m7ZRV0TwIh
-AM65XAOw8Dsg9Kq78aYXiOEDc5DL0sbFUu/SlmRcCg93
------END RSA PRIVATE KEY-----
-`)
diff --git a/gcc-4.8.1/libgo/go/net/http/httptest/server_test.go b/gcc-4.8.1/libgo/go/net/http/httptest/server_test.go
deleted file mode 100644
index 500a9f0b8..000000000
--- a/gcc-4.8.1/libgo/go/net/http/httptest/server_test.go
+++ /dev/null
@@ -1,29 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package httptest
-
-import (
- "io/ioutil"
- "net/http"
- "testing"
-)
-
-func TestServer(t *testing.T) {
- ts := NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- w.Write([]byte("hello"))
- }))
- defer ts.Close()
- res, err := http.Get(ts.URL)
- if err != nil {
- t.Fatal(err)
- }
- got, err := ioutil.ReadAll(res.Body)
- if err != nil {
- t.Fatal(err)
- }
- if string(got) != "hello" {
- t.Errorf("got %q, want hello", string(got))
- }
-}
diff --git a/gcc-4.8.1/libgo/go/net/http/httputil/chunked.go b/gcc-4.8.1/libgo/go/net/http/httputil/chunked.go
deleted file mode 100644
index b66d40951..000000000
--- a/gcc-4.8.1/libgo/go/net/http/httputil/chunked.go
+++ /dev/null
@@ -1,185 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// The wire protocol for HTTP's "chunked" Transfer-Encoding.
-
-// This code is a duplicate of ../chunked.go with these edits:
-// s/newChunked/NewChunked/g
-// s/package http/package httputil/
-// Please make any changes in both files.
-
-package httputil
-
-import (
- "bufio"
- "errors"
- "fmt"
- "io"
-)
-
-const maxLineLength = 4096 // assumed <= bufio.defaultBufSize
-
-var ErrLineTooLong = errors.New("header line too long")
-
-// NewChunkedReader returns a new chunkedReader that translates the data read from r
-// out of HTTP "chunked" format before returning it.
-// The chunkedReader returns io.EOF when the final 0-length chunk is read.
-//
-// NewChunkedReader is not needed by normal applications. The http package
-// automatically decodes chunking when reading response bodies.
-func NewChunkedReader(r io.Reader) io.Reader {
- br, ok := r.(*bufio.Reader)
- if !ok {
- br = bufio.NewReader(r)
- }
- return &chunkedReader{r: br}
-}
-
-type chunkedReader struct {
- r *bufio.Reader
- n uint64 // unread bytes in chunk
- err error
- buf [2]byte
-}
-
-func (cr *chunkedReader) beginChunk() {
- // chunk-size CRLF
- var line []byte
- line, cr.err = readLine(cr.r)
- if cr.err != nil {
- return
- }
- cr.n, cr.err = parseHexUint(line)
- if cr.err != nil {
- return
- }
- if cr.n == 0 {
- cr.err = io.EOF
- }
-}
-
-func (cr *chunkedReader) Read(b []uint8) (n int, err error) {
- if cr.err != nil {
- return 0, cr.err
- }
- if cr.n == 0 {
- cr.beginChunk()
- if cr.err != nil {
- return 0, cr.err
- }
- }
- if uint64(len(b)) > cr.n {
- b = b[0:cr.n]
- }
- n, cr.err = cr.r.Read(b)
- cr.n -= uint64(n)
- if cr.n == 0 && cr.err == nil {
- // end of chunk (CRLF)
- if _, cr.err = io.ReadFull(cr.r, cr.buf[:]); cr.err == nil {
- if cr.buf[0] != '\r' || cr.buf[1] != '\n' {
- cr.err = errors.New("malformed chunked encoding")
- }
- }
- }
- return n, cr.err
-}
-
-// Read a line of bytes (up to \n) from b.
-// Give up if the line exceeds maxLineLength.
-// The returned bytes are a pointer into storage in
-// the bufio, so they are only valid until the next bufio read.
-func readLine(b *bufio.Reader) (p []byte, err error) {
- if p, err = b.ReadSlice('\n'); err != nil {
- // We always know when EOF is coming.
- // If the caller asked for a line, there should be a line.
- if err == io.EOF {
- err = io.ErrUnexpectedEOF
- } else if err == bufio.ErrBufferFull {
- err = ErrLineTooLong
- }
- return nil, err
- }
- if len(p) >= maxLineLength {
- return nil, ErrLineTooLong
- }
- return trimTrailingWhitespace(p), nil
-}
-
-func trimTrailingWhitespace(b []byte) []byte {
- for len(b) > 0 && isASCIISpace(b[len(b)-1]) {
- b = b[:len(b)-1]
- }
- return b
-}
-
-func isASCIISpace(b byte) bool {
- return b == ' ' || b == '\t' || b == '\n' || b == '\r'
-}
-
-// NewChunkedWriter returns a new chunkedWriter that translates writes into HTTP
-// "chunked" format before writing them to w. Closing the returned chunkedWriter
-// sends the final 0-length chunk that marks the end of the stream.
-//
-// NewChunkedWriter is not needed by normal applications. The http
-// package adds chunking automatically if handlers don't set a
-// Content-Length header. Using NewChunkedWriter inside a handler
-// would result in double chunking or chunking with a Content-Length
-// length, both of which are wrong.
-func NewChunkedWriter(w io.Writer) io.WriteCloser {
- return &chunkedWriter{w}
-}
-
-// Writing to chunkedWriter translates to writing in HTTP chunked Transfer
-// Encoding wire format to the underlying Wire chunkedWriter.
-type chunkedWriter struct {
- Wire io.Writer
-}
-
-// Write the contents of data as one chunk to Wire.
-// NOTE: Note that the corresponding chunk-writing procedure in Conn.Write has
-// a bug since it does not check for success of io.WriteString
-func (cw *chunkedWriter) Write(data []byte) (n int, err error) {
-
- // Don't send 0-length data. It looks like EOF for chunked encoding.
- if len(data) == 0 {
- return 0, nil
- }
-
- if _, err = fmt.Fprintf(cw.Wire, "%x\r\n", len(data)); err != nil {
- return 0, err
- }
- if n, err = cw.Wire.Write(data); err != nil {
- return
- }
- if n != len(data) {
- err = io.ErrShortWrite
- return
- }
- _, err = io.WriteString(cw.Wire, "\r\n")
-
- return
-}
-
-func (cw *chunkedWriter) Close() error {
- _, err := io.WriteString(cw.Wire, "0\r\n")
- return err
-}
-
-func parseHexUint(v []byte) (n uint64, err error) {
- for _, b := range v {
- n <<= 4
- switch {
- case '0' <= b && b <= '9':
- b = b - '0'
- case 'a' <= b && b <= 'f':
- b = b - 'a' + 10
- case 'A' <= b && b <= 'F':
- b = b - 'A' + 10
- default:
- return 0, errors.New("invalid byte in chunk length")
- }
- n |= uint64(b)
- }
- return
-}
diff --git a/gcc-4.8.1/libgo/go/net/http/httputil/chunked_test.go b/gcc-4.8.1/libgo/go/net/http/httputil/chunked_test.go
deleted file mode 100644
index a06bffad5..000000000
--- a/gcc-4.8.1/libgo/go/net/http/httputil/chunked_test.go
+++ /dev/null
@@ -1,95 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This code is a duplicate of ../chunked_test.go with these edits:
-// s/newChunked/NewChunked/g
-// s/package http/package httputil/
-// Please make any changes in both files.
-
-package httputil
-
-import (
- "bytes"
- "fmt"
- "io"
- "io/ioutil"
- "runtime"
- "testing"
-)
-
-func TestChunk(t *testing.T) {
- var b bytes.Buffer
-
- w := NewChunkedWriter(&b)
- const chunk1 = "hello, "
- const chunk2 = "world! 0123456789abcdef"
- w.Write([]byte(chunk1))
- w.Write([]byte(chunk2))
- w.Close()
-
- if g, e := b.String(), "7\r\nhello, \r\n17\r\nworld! 0123456789abcdef\r\n0\r\n"; g != e {
- t.Fatalf("chunk writer wrote %q; want %q", g, e)
- }
-
- r := NewChunkedReader(&b)
- data, err := ioutil.ReadAll(r)
- if err != nil {
- t.Logf(`data: "%s"`, data)
- t.Fatalf("ReadAll from reader: %v", err)
- }
- if g, e := string(data), chunk1+chunk2; g != e {
- t.Errorf("chunk reader read %q; want %q", g, e)
- }
-}
-
-func TestChunkReaderAllocs(t *testing.T) {
- // temporarily set GOMAXPROCS to 1 as we are testing memory allocations
- defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(1))
- var buf bytes.Buffer
- w := NewChunkedWriter(&buf)
- a, b, c := []byte("aaaaaa"), []byte("bbbbbbbbbbbb"), []byte("cccccccccccccccccccccccc")
- w.Write(a)
- w.Write(b)
- w.Write(c)
- w.Close()
-
- r := NewChunkedReader(&buf)
- readBuf := make([]byte, len(a)+len(b)+len(c)+1)
-
- var ms runtime.MemStats
- runtime.ReadMemStats(&ms)
- m0 := ms.Mallocs
-
- n, err := io.ReadFull(r, readBuf)
-
- runtime.ReadMemStats(&ms)
- mallocs := ms.Mallocs - m0
- if mallocs > 1 {
- t.Errorf("%d mallocs; want <= 1", mallocs)
- }
-
- if n != len(readBuf)-1 {
- t.Errorf("read %d bytes; want %d", n, len(readBuf)-1)
- }
- if err != io.ErrUnexpectedEOF {
- t.Errorf("read error = %v; want ErrUnexpectedEOF", err)
- }
-}
-
-func TestParseHexUint(t *testing.T) {
- for i := uint64(0); i <= 1234; i++ {
- line := []byte(fmt.Sprintf("%x", i))
- got, err := parseHexUint(line)
- if err != nil {
- t.Fatalf("on %d: %v", i, err)
- }
- if got != i {
- t.Errorf("for input %q = %d; want %d", line, got, i)
- }
- }
- _, err := parseHexUint([]byte("bogus"))
- if err == nil {
- t.Error("expected error on bogus input")
- }
-}
diff --git a/gcc-4.8.1/libgo/go/net/http/httputil/dump.go b/gcc-4.8.1/libgo/go/net/http/httputil/dump.go
deleted file mode 100644
index 0b0035661..000000000
--- a/gcc-4.8.1/libgo/go/net/http/httputil/dump.go
+++ /dev/null
@@ -1,229 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package httputil
-
-import (
- "bufio"
- "bytes"
- "fmt"
- "io"
- "io/ioutil"
- "net"
- "net/http"
- "net/url"
- "strings"
- "time"
-)
-
-// One of the copies, say from b to r2, could be avoided by using a more
-// elaborate trick where the other copy is made during Request/Response.Write.
-// This would complicate things too much, given that these functions are for
-// debugging only.
-func drainBody(b io.ReadCloser) (r1, r2 io.ReadCloser, err error) {
- var buf bytes.Buffer
- if _, err = buf.ReadFrom(b); err != nil {
- return nil, nil, err
- }
- if err = b.Close(); err != nil {
- return nil, nil, err
- }
- return ioutil.NopCloser(&buf), ioutil.NopCloser(bytes.NewBuffer(buf.Bytes())), nil
-}
-
-// dumpConn is a net.Conn which writes to Writer and reads from Reader
-type dumpConn struct {
- io.Writer
- io.Reader
-}
-
-func (c *dumpConn) Close() error { return nil }
-func (c *dumpConn) LocalAddr() net.Addr { return nil }
-func (c *dumpConn) RemoteAddr() net.Addr { return nil }
-func (c *dumpConn) SetDeadline(t time.Time) error { return nil }
-func (c *dumpConn) SetReadDeadline(t time.Time) error { return nil }
-func (c *dumpConn) SetWriteDeadline(t time.Time) error { return nil }
-
-// DumpRequestOut is like DumpRequest but includes
-// headers that the standard http.Transport adds,
-// such as User-Agent.
-func DumpRequestOut(req *http.Request, body bool) ([]byte, error) {
- save := req.Body
- if !body || req.Body == nil {
- req.Body = nil
- } else {
- var err error
- save, req.Body, err = drainBody(req.Body)
- if err != nil {
- return nil, err
- }
- }
-
- // Since we're using the actual Transport code to write the request,
- // switch to http so the Transport doesn't try to do an SSL
- // negotiation with our dumpConn and its bytes.Buffer & pipe.
- // The wire format for https and http are the same, anyway.
- reqSend := req
- if req.URL.Scheme == "https" {
- reqSend = new(http.Request)
- *reqSend = *req
- reqSend.URL = new(url.URL)
- *reqSend.URL = *req.URL
- reqSend.URL.Scheme = "http"
- }
-
- // Use the actual Transport code to record what we would send
- // on the wire, but not using TCP. Use a Transport with a
- // custom dialer that returns a fake net.Conn that waits
- // for the full input (and recording it), and then responds
- // with a dummy response.
- var buf bytes.Buffer // records the output
- pr, pw := io.Pipe()
- dr := &delegateReader{c: make(chan io.Reader)}
- // Wait for the request before replying with a dummy response:
- go func() {
- http.ReadRequest(bufio.NewReader(pr))
- dr.c <- strings.NewReader("HTTP/1.1 204 No Content\r\n\r\n")
- }()
-
- t := &http.Transport{
- Dial: func(net, addr string) (net.Conn, error) {
- return &dumpConn{io.MultiWriter(&buf, pw), dr}, nil
- },
- }
-
- _, err := t.RoundTrip(reqSend)
-
- req.Body = save
- if err != nil {
- return nil, err
- }
- return buf.Bytes(), nil
-}
-
-// delegateReader is a reader that delegates to another reader,
-// once it arrives on a channel.
-type delegateReader struct {
- c chan io.Reader
- r io.Reader // nil until received from c
-}
-
-func (r *delegateReader) Read(p []byte) (int, error) {
- if r.r == nil {
- r.r = <-r.c
- }
- return r.r.Read(p)
-}
-
-// Return value if nonempty, def otherwise.
-func valueOrDefault(value, def string) string {
- if value != "" {
- return value
- }
- return def
-}
-
-var reqWriteExcludeHeaderDump = map[string]bool{
- "Host": true, // not in Header map anyway
- "Content-Length": true,
- "Transfer-Encoding": true,
- "Trailer": true,
-}
-
-// dumpAsReceived writes req to w in the form as it was received, or
-// at least as accurately as possible from the information retained in
-// the request.
-func dumpAsReceived(req *http.Request, w io.Writer) error {
- return nil
-}
-
-// DumpRequest returns the as-received wire representation of req,
-// optionally including the request body, for debugging.
-// DumpRequest is semantically a no-op, but in order to
-// dump the body, it reads the body data into memory and
-// changes req.Body to refer to the in-memory copy.
-// The documentation for http.Request.Write details which fields
-// of req are used.
-func DumpRequest(req *http.Request, body bool) (dump []byte, err error) {
- save := req.Body
- if !body || req.Body == nil {
- req.Body = nil
- } else {
- save, req.Body, err = drainBody(req.Body)
- if err != nil {
- return
- }
- }
-
- var b bytes.Buffer
-
- fmt.Fprintf(&b, "%s %s HTTP/%d.%d\r\n", valueOrDefault(req.Method, "GET"),
- req.URL.RequestURI(), req.ProtoMajor, req.ProtoMinor)
-
- host := req.Host
- if host == "" && req.URL != nil {
- host = req.URL.Host
- }
- if host != "" {
- fmt.Fprintf(&b, "Host: %s\r\n", host)
- }
-
- chunked := len(req.TransferEncoding) > 0 && req.TransferEncoding[0] == "chunked"
- if len(req.TransferEncoding) > 0 {
- fmt.Fprintf(&b, "Transfer-Encoding: %s\r\n", strings.Join(req.TransferEncoding, ","))
- }
- if req.Close {
- fmt.Fprintf(&b, "Connection: close\r\n")
- }
-
- err = req.Header.WriteSubset(&b, reqWriteExcludeHeaderDump)
- if err != nil {
- return
- }
-
- io.WriteString(&b, "\r\n")
-
- if req.Body != nil {
- var dest io.Writer = &b
- if chunked {
- dest = NewChunkedWriter(dest)
- }
- _, err = io.Copy(dest, req.Body)
- if chunked {
- dest.(io.Closer).Close()
- io.WriteString(&b, "\r\n")
- }
- }
-
- req.Body = save
- if err != nil {
- return
- }
- dump = b.Bytes()
- return
-}
-
-// DumpResponse is like DumpRequest but dumps a response.
-func DumpResponse(resp *http.Response, body bool) (dump []byte, err error) {
- var b bytes.Buffer
- save := resp.Body
- savecl := resp.ContentLength
- if !body || resp.Body == nil {
- resp.Body = nil
- resp.ContentLength = 0
- } else {
- save, resp.Body, err = drainBody(resp.Body)
- if err != nil {
- return
- }
- }
- err = resp.Write(&b)
- resp.Body = save
- resp.ContentLength = savecl
- if err != nil {
- return
- }
- dump = b.Bytes()
- return
-}
diff --git a/gcc-4.8.1/libgo/go/net/http/httputil/dump_test.go b/gcc-4.8.1/libgo/go/net/http/httputil/dump_test.go
deleted file mode 100644
index 5afe9ba74..000000000
--- a/gcc-4.8.1/libgo/go/net/http/httputil/dump_test.go
+++ /dev/null
@@ -1,152 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package httputil
-
-import (
- "bytes"
- "fmt"
- "io"
- "io/ioutil"
- "net/http"
- "net/url"
- "testing"
-)
-
-type dumpTest struct {
- Req http.Request
- Body interface{} // optional []byte or func() io.ReadCloser to populate Req.Body
-
- WantDump string
- WantDumpOut string
-}
-
-var dumpTests = []dumpTest{
-
- // HTTP/1.1 => chunked coding; body; empty trailer
- {
- Req: http.Request{
- Method: "GET",
- URL: &url.URL{
- Scheme: "http",
- Host: "www.google.com",
- Path: "/search",
- },
- ProtoMajor: 1,
- ProtoMinor: 1,
- TransferEncoding: []string{"chunked"},
- },
-
- Body: []byte("abcdef"),
-
- WantDump: "GET /search HTTP/1.1\r\n" +
- "Host: www.google.com\r\n" +
- "Transfer-Encoding: chunked\r\n\r\n" +
- chunk("abcdef") + chunk(""),
- },
-
- // Verify that DumpRequest preserves the HTTP version number, doesn't add a Host,
- // and doesn't add a User-Agent.
- {
- Req: http.Request{
- Method: "GET",
- URL: mustParseURL("/foo"),
- ProtoMajor: 1,
- ProtoMinor: 0,
- Header: http.Header{
- "X-Foo": []string{"X-Bar"},
- },
- },
-
- WantDump: "GET /foo HTTP/1.0\r\n" +
- "X-Foo: X-Bar\r\n\r\n",
- },
-
- {
- Req: *mustNewRequest("GET", "http://example.com/foo", nil),
-
- WantDumpOut: "GET /foo HTTP/1.1\r\n" +
- "Host: example.com\r\n" +
- "User-Agent: Go http package\r\n" +
- "Accept-Encoding: gzip\r\n\r\n",
- },
-
- // Test that an https URL doesn't try to do an SSL negotiation
- // with a bytes.Buffer and hang with all goroutines not
- // runnable.
- {
- Req: *mustNewRequest("GET", "https://example.com/foo", nil),
-
- WantDumpOut: "GET /foo HTTP/1.1\r\n" +
- "Host: example.com\r\n" +
- "User-Agent: Go http package\r\n" +
- "Accept-Encoding: gzip\r\n\r\n",
- },
-}
-
-func TestDumpRequest(t *testing.T) {
- for i, tt := range dumpTests {
- setBody := func() {
- if tt.Body == nil {
- return
- }
- switch b := tt.Body.(type) {
- case []byte:
- tt.Req.Body = ioutil.NopCloser(bytes.NewBuffer(b))
- case func() io.ReadCloser:
- tt.Req.Body = b()
- }
- }
- setBody()
- if tt.Req.Header == nil {
- tt.Req.Header = make(http.Header)
- }
-
- if tt.WantDump != "" {
- setBody()
- dump, err := DumpRequest(&tt.Req, true)
- if err != nil {
- t.Errorf("DumpRequest #%d: %s", i, err)
- continue
- }
- if string(dump) != tt.WantDump {
- t.Errorf("DumpRequest %d, expecting:\n%s\nGot:\n%s\n", i, tt.WantDump, string(dump))
- continue
- }
- }
-
- if tt.WantDumpOut != "" {
- setBody()
- dump, err := DumpRequestOut(&tt.Req, true)
- if err != nil {
- t.Errorf("DumpRequestOut #%d: %s", i, err)
- continue
- }
- if string(dump) != tt.WantDumpOut {
- t.Errorf("DumpRequestOut %d, expecting:\n%s\nGot:\n%s\n", i, tt.WantDumpOut, string(dump))
- continue
- }
- }
- }
-}
-
-func chunk(s string) string {
- return fmt.Sprintf("%x\r\n%s\r\n", len(s), s)
-}
-
-func mustParseURL(s string) *url.URL {
- u, err := url.Parse(s)
- if err != nil {
- panic(fmt.Sprintf("Error parsing URL %q: %v", s, err))
- }
- return u
-}
-
-func mustNewRequest(method, url string, body io.Reader) *http.Request {
- req, err := http.NewRequest(method, url, body)
- if err != nil {
- panic(fmt.Sprintf("NewRequest(%q, %q, %p) err = %v", method, url, body, err))
- }
- return req
-}
diff --git a/gcc-4.8.1/libgo/go/net/http/httputil/persist.go b/gcc-4.8.1/libgo/go/net/http/httputil/persist.go
deleted file mode 100644
index 507938aca..000000000
--- a/gcc-4.8.1/libgo/go/net/http/httputil/persist.go
+++ /dev/null
@@ -1,422 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package httputil provides HTTP utility functions, complementing the
-// more common ones in the net/http package.
-package httputil
-
-import (
- "bufio"
- "errors"
- "io"
- "net"
- "net/http"
- "net/textproto"
- "sync"
-)
-
-var (
- ErrPersistEOF = &http.ProtocolError{ErrorString: "persistent connection closed"}
- ErrClosed = &http.ProtocolError{ErrorString: "connection closed by user"}
- ErrPipeline = &http.ProtocolError{ErrorString: "pipeline error"}
-)
-
-// This is an API usage error - the local side is closed.
-// ErrPersistEOF (above) reports that the remote side is closed.
-var errClosed = errors.New("i/o operation on closed connection")
-
-// A ServerConn reads requests and sends responses over an underlying
-// connection, until the HTTP keepalive logic commands an end. ServerConn
-// also allows hijacking the underlying connection by calling Hijack
-// to regain control over the connection. ServerConn supports pipe-lining,
-// i.e. requests can be read out of sync (but in the same order) while the
-// respective responses are sent.
-//
-// ServerConn is low-level and should not be needed by most applications.
-// See Server.
-type ServerConn struct {
- lk sync.Mutex // read-write protects the following fields
- c net.Conn
- r *bufio.Reader
- re, we error // read/write errors
- lastbody io.ReadCloser
- nread, nwritten int
- pipereq map[*http.Request]uint
-
- pipe textproto.Pipeline
-}
-
-// NewServerConn returns a new ServerConn reading and writing c. If r is not
-// nil, it is the buffer to use when reading c.
-func NewServerConn(c net.Conn, r *bufio.Reader) *ServerConn {
- if r == nil {
- r = bufio.NewReader(c)
- }
- return &ServerConn{c: c, r: r, pipereq: make(map[*http.Request]uint)}
-}
-
-// Hijack detaches the ServerConn and returns the underlying connection as well
-// as the read-side bufio which may have some left over data. Hijack may be
-// called before Read has signaled the end of the keep-alive logic. The user
-// should not call Hijack while Read or Write is in progress.
-func (sc *ServerConn) Hijack() (c net.Conn, r *bufio.Reader) {
- sc.lk.Lock()
- defer sc.lk.Unlock()
- c = sc.c
- r = sc.r
- sc.c = nil
- sc.r = nil
- return
-}
-
-// Close calls Hijack and then also closes the underlying connection
-func (sc *ServerConn) Close() error {
- c, _ := sc.Hijack()
- if c != nil {
- return c.Close()
- }
- return nil
-}
-
-// Read returns the next request on the wire. An ErrPersistEOF is returned if
-// it is gracefully determined that there are no more requests (e.g. after the
-// first request on an HTTP/1.0 connection, or after a Connection:close on a
-// HTTP/1.1 connection).
-func (sc *ServerConn) Read() (req *http.Request, err error) {
-
- // Ensure ordered execution of Reads and Writes
- id := sc.pipe.Next()
- sc.pipe.StartRequest(id)
- defer func() {
- sc.pipe.EndRequest(id)
- if req == nil {
- sc.pipe.StartResponse(id)
- sc.pipe.EndResponse(id)
- } else {
- // Remember the pipeline id of this request
- sc.lk.Lock()
- sc.pipereq[req] = id
- sc.lk.Unlock()
- }
- }()
-
- sc.lk.Lock()
- if sc.we != nil { // no point receiving if write-side broken or closed
- defer sc.lk.Unlock()
- return nil, sc.we
- }
- if sc.re != nil {
- defer sc.lk.Unlock()
- return nil, sc.re
- }
- if sc.r == nil { // connection closed by user in the meantime
- defer sc.lk.Unlock()
- return nil, errClosed
- }
- r := sc.r
- lastbody := sc.lastbody
- sc.lastbody = nil
- sc.lk.Unlock()
-
- // Make sure body is fully consumed, even if user does not call body.Close
- if lastbody != nil {
- // body.Close is assumed to be idempotent and multiple calls to
- // it should return the error that its first invocation
- // returned.
- err = lastbody.Close()
- if err != nil {
- sc.lk.Lock()
- defer sc.lk.Unlock()
- sc.re = err
- return nil, err
- }
- }
-
- req, err = http.ReadRequest(r)
- sc.lk.Lock()
- defer sc.lk.Unlock()
- if err != nil {
- if err == io.ErrUnexpectedEOF {
- // A close from the opposing client is treated as a
- // graceful close, even if there was some unparse-able
- // data before the close.
- sc.re = ErrPersistEOF
- return nil, sc.re
- } else {
- sc.re = err
- return req, err
- }
- }
- sc.lastbody = req.Body
- sc.nread++
- if req.Close {
- sc.re = ErrPersistEOF
- return req, sc.re
- }
- return req, err
-}
-
-// Pending returns the number of unanswered requests
-// that have been received on the connection.
-func (sc *ServerConn) Pending() int {
- sc.lk.Lock()
- defer sc.lk.Unlock()
- return sc.nread - sc.nwritten
-}
-
-// Write writes resp in response to req. To close the connection gracefully, set the
-// Response.Close field to true. Write should be considered operational until
-// it returns an error, regardless of any errors returned on the Read side.
-func (sc *ServerConn) Write(req *http.Request, resp *http.Response) error {
-
- // Retrieve the pipeline ID of this request/response pair
- sc.lk.Lock()
- id, ok := sc.pipereq[req]
- delete(sc.pipereq, req)
- if !ok {
- sc.lk.Unlock()
- return ErrPipeline
- }
- sc.lk.Unlock()
-
- // Ensure pipeline order
- sc.pipe.StartResponse(id)
- defer sc.pipe.EndResponse(id)
-
- sc.lk.Lock()
- if sc.we != nil {
- defer sc.lk.Unlock()
- return sc.we
- }
- if sc.c == nil { // connection closed by user in the meantime
- defer sc.lk.Unlock()
- return ErrClosed
- }
- c := sc.c
- if sc.nread <= sc.nwritten {
- defer sc.lk.Unlock()
- return errors.New("persist server pipe count")
- }
- if resp.Close {
- // After signaling a keep-alive close, any pipelined unread
- // requests will be lost. It is up to the user to drain them
- // before signaling.
- sc.re = ErrPersistEOF
- }
- sc.lk.Unlock()
-
- err := resp.Write(c)
- sc.lk.Lock()
- defer sc.lk.Unlock()
- if err != nil {
- sc.we = err
- return err
- }
- sc.nwritten++
-
- return nil
-}
-
-// A ClientConn sends request and receives headers over an underlying
-// connection, while respecting the HTTP keepalive logic. ClientConn
-// supports hijacking the connection calling Hijack to
-// regain control of the underlying net.Conn and deal with it as desired.
-//
-// ClientConn is low-level and should not be needed by most applications.
-// See Client.
-type ClientConn struct {
- lk sync.Mutex // read-write protects the following fields
- c net.Conn
- r *bufio.Reader
- re, we error // read/write errors
- lastbody io.ReadCloser
- nread, nwritten int
- pipereq map[*http.Request]uint
-
- pipe textproto.Pipeline
- writeReq func(*http.Request, io.Writer) error
-}
-
-// NewClientConn returns a new ClientConn reading and writing c. If r is not
-// nil, it is the buffer to use when reading c.
-func NewClientConn(c net.Conn, r *bufio.Reader) *ClientConn {
- if r == nil {
- r = bufio.NewReader(c)
- }
- return &ClientConn{
- c: c,
- r: r,
- pipereq: make(map[*http.Request]uint),
- writeReq: (*http.Request).Write,
- }
-}
-
-// NewProxyClientConn works like NewClientConn but writes Requests
-// using Request's WriteProxy method.
-func NewProxyClientConn(c net.Conn, r *bufio.Reader) *ClientConn {
- cc := NewClientConn(c, r)
- cc.writeReq = (*http.Request).WriteProxy
- return cc
-}
-
-// Hijack detaches the ClientConn and returns the underlying connection as well
-// as the read-side bufio which may have some left over data. Hijack may be
-// called before the user or Read have signaled the end of the keep-alive
-// logic. The user should not call Hijack while Read or Write is in progress.
-func (cc *ClientConn) Hijack() (c net.Conn, r *bufio.Reader) {
- cc.lk.Lock()
- defer cc.lk.Unlock()
- c = cc.c
- r = cc.r
- cc.c = nil
- cc.r = nil
- return
-}
-
-// Close calls Hijack and then also closes the underlying connection
-func (cc *ClientConn) Close() error {
- c, _ := cc.Hijack()
- if c != nil {
- return c.Close()
- }
- return nil
-}
-
-// Write writes a request. An ErrPersistEOF error is returned if the connection
-// has been closed in an HTTP keepalive sense. If req.Close equals true, the
-// keepalive connection is logically closed after this request and the opposing
-// server is informed. An ErrUnexpectedEOF indicates the remote closed the
-// underlying TCP connection, which is usually considered as graceful close.
-func (cc *ClientConn) Write(req *http.Request) (err error) {
-
- // Ensure ordered execution of Writes
- id := cc.pipe.Next()
- cc.pipe.StartRequest(id)
- defer func() {
- cc.pipe.EndRequest(id)
- if err != nil {
- cc.pipe.StartResponse(id)
- cc.pipe.EndResponse(id)
- } else {
- // Remember the pipeline id of this request
- cc.lk.Lock()
- cc.pipereq[req] = id
- cc.lk.Unlock()
- }
- }()
-
- cc.lk.Lock()
- if cc.re != nil { // no point sending if read-side closed or broken
- defer cc.lk.Unlock()
- return cc.re
- }
- if cc.we != nil {
- defer cc.lk.Unlock()
- return cc.we
- }
- if cc.c == nil { // connection closed by user in the meantime
- defer cc.lk.Unlock()
- return errClosed
- }
- c := cc.c
- if req.Close {
- // We write the EOF to the write-side error, because there
- // still might be some pipelined reads
- cc.we = ErrPersistEOF
- }
- cc.lk.Unlock()
-
- err = cc.writeReq(req, c)
- cc.lk.Lock()
- defer cc.lk.Unlock()
- if err != nil {
- cc.we = err
- return err
- }
- cc.nwritten++
-
- return nil
-}
-
-// Pending returns the number of unanswered requests
-// that have been sent on the connection.
-func (cc *ClientConn) Pending() int {
- cc.lk.Lock()
- defer cc.lk.Unlock()
- return cc.nwritten - cc.nread
-}
-
-// Read reads the next response from the wire. A valid response might be
-// returned together with an ErrPersistEOF, which means that the remote
-// requested that this be the last request serviced. Read can be called
-// concurrently with Write, but not with another Read.
-func (cc *ClientConn) Read(req *http.Request) (resp *http.Response, err error) {
- // Retrieve the pipeline ID of this request/response pair
- cc.lk.Lock()
- id, ok := cc.pipereq[req]
- delete(cc.pipereq, req)
- if !ok {
- cc.lk.Unlock()
- return nil, ErrPipeline
- }
- cc.lk.Unlock()
-
- // Ensure pipeline order
- cc.pipe.StartResponse(id)
- defer cc.pipe.EndResponse(id)
-
- cc.lk.Lock()
- if cc.re != nil {
- defer cc.lk.Unlock()
- return nil, cc.re
- }
- if cc.r == nil { // connection closed by user in the meantime
- defer cc.lk.Unlock()
- return nil, errClosed
- }
- r := cc.r
- lastbody := cc.lastbody
- cc.lastbody = nil
- cc.lk.Unlock()
-
- // Make sure body is fully consumed, even if user does not call body.Close
- if lastbody != nil {
- // body.Close is assumed to be idempotent and multiple calls to
- // it should return the error that its first invocation
- // returned.
- err = lastbody.Close()
- if err != nil {
- cc.lk.Lock()
- defer cc.lk.Unlock()
- cc.re = err
- return nil, err
- }
- }
-
- resp, err = http.ReadResponse(r, req)
- cc.lk.Lock()
- defer cc.lk.Unlock()
- if err != nil {
- cc.re = err
- return resp, err
- }
- cc.lastbody = resp.Body
-
- cc.nread++
-
- if resp.Close {
- cc.re = ErrPersistEOF // don't send any more requests
- return resp, cc.re
- }
- return resp, err
-}
-
-// Do is convenience method that writes a request and reads a response.
-func (cc *ClientConn) Do(req *http.Request) (resp *http.Response, err error) {
- err = cc.Write(req)
- if err != nil {
- return
- }
- return cc.Read(req)
-}
diff --git a/gcc-4.8.1/libgo/go/net/http/httputil/reverseproxy.go b/gcc-4.8.1/libgo/go/net/http/httputil/reverseproxy.go
deleted file mode 100644
index 134c45299..000000000
--- a/gcc-4.8.1/libgo/go/net/http/httputil/reverseproxy.go
+++ /dev/null
@@ -1,188 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// HTTP reverse proxy handler
-
-package httputil
-
-import (
- "io"
- "log"
- "net"
- "net/http"
- "net/url"
- "strings"
- "sync"
- "time"
-)
-
-// onExitFlushLoop is a callback set by tests to detect the state of the
-// flushLoop() goroutine.
-var onExitFlushLoop func()
-
-// ReverseProxy is an HTTP Handler that takes an incoming request and
-// sends it to another server, proxying the response back to the
-// client.
-type ReverseProxy struct {
- // Director must be a function which modifies
- // the request into a new request to be sent
- // using Transport. Its response is then copied
- // back to the original client unmodified.
- Director func(*http.Request)
-
- // The transport used to perform proxy requests.
- // If nil, http.DefaultTransport is used.
- Transport http.RoundTripper
-
- // FlushInterval specifies the flush interval
- // to flush to the client while copying the
- // response body.
- // If zero, no periodic flushing is done.
- FlushInterval time.Duration
-}
-
-func singleJoiningSlash(a, b string) string {
- aslash := strings.HasSuffix(a, "/")
- bslash := strings.HasPrefix(b, "/")
- switch {
- case aslash && bslash:
- return a + b[1:]
- case !aslash && !bslash:
- return a + "/" + b
- }
- return a + b
-}
-
-// NewSingleHostReverseProxy returns a new ReverseProxy that rewrites
-// URLs to the scheme, host, and base path provided in target. If the
-// target's path is "/base" and the incoming request was for "/dir",
-// the target request will be for /base/dir.
-func NewSingleHostReverseProxy(target *url.URL) *ReverseProxy {
- targetQuery := target.RawQuery
- director := func(req *http.Request) {
- req.URL.Scheme = target.Scheme
- req.URL.Host = target.Host
- req.URL.Path = singleJoiningSlash(target.Path, req.URL.Path)
- if targetQuery == "" || req.URL.RawQuery == "" {
- req.URL.RawQuery = targetQuery + req.URL.RawQuery
- } else {
- req.URL.RawQuery = targetQuery + "&" + req.URL.RawQuery
- }
- }
- return &ReverseProxy{Director: director}
-}
-
-func copyHeader(dst, src http.Header) {
- for k, vv := range src {
- for _, v := range vv {
- dst.Add(k, v)
- }
- }
-}
-
-func (p *ReverseProxy) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
- transport := p.Transport
- if transport == nil {
- transport = http.DefaultTransport
- }
-
- outreq := new(http.Request)
- *outreq = *req // includes shallow copies of maps, but okay
-
- p.Director(outreq)
- outreq.Proto = "HTTP/1.1"
- outreq.ProtoMajor = 1
- outreq.ProtoMinor = 1
- outreq.Close = false
-
- // Remove the connection header to the backend. We want a
- // persistent connection, regardless of what the client sent
- // to us. This is modifying the same underlying map from req
- // (shallow copied above) so we only copy it if necessary.
- if outreq.Header.Get("Connection") != "" {
- outreq.Header = make(http.Header)
- copyHeader(outreq.Header, req.Header)
- outreq.Header.Del("Connection")
- }
-
- if clientIP, _, err := net.SplitHostPort(req.RemoteAddr); err == nil {
- // If we aren't the first proxy retain prior
- // X-Forwarded-For information as a comma+space
- // separated list and fold multiple headers into one.
- if prior, ok := outreq.Header["X-Forwarded-For"]; ok {
- clientIP = strings.Join(prior, ", ") + ", " + clientIP
- }
- outreq.Header.Set("X-Forwarded-For", clientIP)
- }
-
- res, err := transport.RoundTrip(outreq)
- if err != nil {
- log.Printf("http: proxy error: %v", err)
- rw.WriteHeader(http.StatusInternalServerError)
- return
- }
- defer res.Body.Close()
-
- copyHeader(rw.Header(), res.Header)
-
- rw.WriteHeader(res.StatusCode)
- p.copyResponse(rw, res.Body)
-}
-
-func (p *ReverseProxy) copyResponse(dst io.Writer, src io.Reader) {
- if p.FlushInterval != 0 {
- if wf, ok := dst.(writeFlusher); ok {
- mlw := &maxLatencyWriter{
- dst: wf,
- latency: p.FlushInterval,
- done: make(chan bool),
- }
- go mlw.flushLoop()
- defer mlw.stop()
- dst = mlw
- }
- }
-
- io.Copy(dst, src)
-}
-
-type writeFlusher interface {
- io.Writer
- http.Flusher
-}
-
-type maxLatencyWriter struct {
- dst writeFlusher
- latency time.Duration
-
- lk sync.Mutex // protects Write + Flush
- done chan bool
-}
-
-func (m *maxLatencyWriter) Write(p []byte) (int, error) {
- m.lk.Lock()
- defer m.lk.Unlock()
- return m.dst.Write(p)
-}
-
-func (m *maxLatencyWriter) flushLoop() {
- t := time.NewTicker(m.latency)
- defer t.Stop()
- for {
- select {
- case <-m.done:
- if onExitFlushLoop != nil {
- onExitFlushLoop()
- }
- return
- case <-t.C:
- m.lk.Lock()
- m.dst.Flush()
- m.lk.Unlock()
- }
- }
- panic("unreached")
-}
-
-func (m *maxLatencyWriter) stop() { m.done <- true }
diff --git a/gcc-4.8.1/libgo/go/net/http/httputil/reverseproxy_test.go b/gcc-4.8.1/libgo/go/net/http/httputil/reverseproxy_test.go
deleted file mode 100644
index 863927162..000000000
--- a/gcc-4.8.1/libgo/go/net/http/httputil/reverseproxy_test.go
+++ /dev/null
@@ -1,193 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Reverse proxy tests.
-
-package httputil
-
-import (
- "io/ioutil"
- "net/http"
- "net/http/httptest"
- "net/url"
- "strings"
- "testing"
- "time"
-)
-
-func TestReverseProxy(t *testing.T) {
- const backendResponse = "I am the backend"
- const backendStatus = 404
- backend := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- if len(r.TransferEncoding) > 0 {
- t.Errorf("backend got unexpected TransferEncoding: %v", r.TransferEncoding)
- }
- if r.Header.Get("X-Forwarded-For") == "" {
- t.Errorf("didn't get X-Forwarded-For header")
- }
- if c := r.Header.Get("Connection"); c != "" {
- t.Errorf("handler got Connection header value %q", c)
- }
- if g, e := r.Host, "some-name"; g != e {
- t.Errorf("backend got Host header %q, want %q", g, e)
- }
- w.Header().Set("X-Foo", "bar")
- http.SetCookie(w, &http.Cookie{Name: "flavor", Value: "chocolateChip"})
- w.WriteHeader(backendStatus)
- w.Write([]byte(backendResponse))
- }))
- defer backend.Close()
- backendURL, err := url.Parse(backend.URL)
- if err != nil {
- t.Fatal(err)
- }
- proxyHandler := NewSingleHostReverseProxy(backendURL)
- frontend := httptest.NewServer(proxyHandler)
- defer frontend.Close()
-
- getReq, _ := http.NewRequest("GET", frontend.URL, nil)
- getReq.Host = "some-name"
- getReq.Header.Set("Connection", "close")
- getReq.Close = true
- res, err := http.DefaultClient.Do(getReq)
- if err != nil {
- t.Fatalf("Get: %v", err)
- }
- if g, e := res.StatusCode, backendStatus; g != e {
- t.Errorf("got res.StatusCode %d; expected %d", g, e)
- }
- if g, e := res.Header.Get("X-Foo"), "bar"; g != e {
- t.Errorf("got X-Foo %q; expected %q", g, e)
- }
- if g, e := len(res.Header["Set-Cookie"]), 1; g != e {
- t.Fatalf("got %d SetCookies, want %d", g, e)
- }
- if cookie := res.Cookies()[0]; cookie.Name != "flavor" {
- t.Errorf("unexpected cookie %q", cookie.Name)
- }
- bodyBytes, _ := ioutil.ReadAll(res.Body)
- if g, e := string(bodyBytes), backendResponse; g != e {
- t.Errorf("got body %q; expected %q", g, e)
- }
-}
-
-func TestXForwardedFor(t *testing.T) {
- const prevForwardedFor = "client ip"
- const backendResponse = "I am the backend"
- const backendStatus = 404
- backend := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- if r.Header.Get("X-Forwarded-For") == "" {
- t.Errorf("didn't get X-Forwarded-For header")
- }
- if !strings.Contains(r.Header.Get("X-Forwarded-For"), prevForwardedFor) {
- t.Errorf("X-Forwarded-For didn't contain prior data")
- }
- w.WriteHeader(backendStatus)
- w.Write([]byte(backendResponse))
- }))
- defer backend.Close()
- backendURL, err := url.Parse(backend.URL)
- if err != nil {
- t.Fatal(err)
- }
- proxyHandler := NewSingleHostReverseProxy(backendURL)
- frontend := httptest.NewServer(proxyHandler)
- defer frontend.Close()
-
- getReq, _ := http.NewRequest("GET", frontend.URL, nil)
- getReq.Host = "some-name"
- getReq.Header.Set("Connection", "close")
- getReq.Header.Set("X-Forwarded-For", prevForwardedFor)
- getReq.Close = true
- res, err := http.DefaultClient.Do(getReq)
- if err != nil {
- t.Fatalf("Get: %v", err)
- }
- if g, e := res.StatusCode, backendStatus; g != e {
- t.Errorf("got res.StatusCode %d; expected %d", g, e)
- }
- bodyBytes, _ := ioutil.ReadAll(res.Body)
- if g, e := string(bodyBytes), backendResponse; g != e {
- t.Errorf("got body %q; expected %q", g, e)
- }
-}
-
-var proxyQueryTests = []struct {
- baseSuffix string // suffix to add to backend URL
- reqSuffix string // suffix to add to frontend's request URL
- want string // what backend should see for final request URL (without ?)
-}{
- {"", "", ""},
- {"?sta=tic", "?us=er", "sta=tic&us=er"},
- {"", "?us=er", "us=er"},
- {"?sta=tic", "", "sta=tic"},
-}
-
-func TestReverseProxyQuery(t *testing.T) {
- backend := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- w.Header().Set("X-Got-Query", r.URL.RawQuery)
- w.Write([]byte("hi"))
- }))
- defer backend.Close()
-
- for i, tt := range proxyQueryTests {
- backendURL, err := url.Parse(backend.URL + tt.baseSuffix)
- if err != nil {
- t.Fatal(err)
- }
- frontend := httptest.NewServer(NewSingleHostReverseProxy(backendURL))
- req, _ := http.NewRequest("GET", frontend.URL+tt.reqSuffix, nil)
- req.Close = true
- res, err := http.DefaultClient.Do(req)
- if err != nil {
- t.Fatalf("%d. Get: %v", i, err)
- }
- if g, e := res.Header.Get("X-Got-Query"), tt.want; g != e {
- t.Errorf("%d. got query %q; expected %q", i, g, e)
- }
- res.Body.Close()
- frontend.Close()
- }
-}
-
-func TestReverseProxyFlushInterval(t *testing.T) {
- const expected = "hi"
- backend := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- w.Write([]byte(expected))
- }))
- defer backend.Close()
-
- backendURL, err := url.Parse(backend.URL)
- if err != nil {
- t.Fatal(err)
- }
-
- proxyHandler := NewSingleHostReverseProxy(backendURL)
- proxyHandler.FlushInterval = time.Microsecond
-
- done := make(chan bool)
- onExitFlushLoop = func() { done <- true }
- defer func() { onExitFlushLoop = nil }()
-
- frontend := httptest.NewServer(proxyHandler)
- defer frontend.Close()
-
- req, _ := http.NewRequest("GET", frontend.URL, nil)
- req.Close = true
- res, err := http.DefaultClient.Do(req)
- if err != nil {
- t.Fatalf("Get: %v", err)
- }
- defer res.Body.Close()
- if bodyBytes, _ := ioutil.ReadAll(res.Body); string(bodyBytes) != expected {
- t.Errorf("got body %q; expected %q", bodyBytes, expected)
- }
-
- select {
- case <-done:
- // OK
- case <-time.After(5 * time.Second):
- t.Error("maxLatencyWriter flushLoop() never exited")
- }
-}
diff --git a/gcc-4.8.1/libgo/go/net/http/jar.go b/gcc-4.8.1/libgo/go/net/http/jar.go
deleted file mode 100644
index 35eee682f..000000000
--- a/gcc-4.8.1/libgo/go/net/http/jar.go
+++ /dev/null
@@ -1,25 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package http
-
-import (
- "net/url"
-)
-
-// A CookieJar manages storage and use of cookies in HTTP requests.
-//
-// Implementations of CookieJar must be safe for concurrent use by multiple
-// goroutines.
-type CookieJar interface {
- // SetCookies handles the receipt of the cookies in a reply for the
- // given URL. It may or may not choose to save the cookies, depending
- // on the jar's policy and implementation.
- SetCookies(u *url.URL, cookies []*Cookie)
-
- // Cookies returns the cookies to send in a request for the given URL.
- // It is up to the implementation to honor the standard cookie use
- // restrictions such as in RFC 6265.
- Cookies(u *url.URL) []*Cookie
-}
diff --git a/gcc-4.8.1/libgo/go/net/http/lex.go b/gcc-4.8.1/libgo/go/net/http/lex.go
deleted file mode 100644
index cb33318f4..000000000
--- a/gcc-4.8.1/libgo/go/net/http/lex.go
+++ /dev/null
@@ -1,96 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package http
-
-// This file deals with lexical matters of HTTP
-
-var isTokenTable = [127]bool{
- '!': true,
- '#': true,
- '$': true,
- '%': true,
- '&': true,
- '\'': true,
- '*': true,
- '+': true,
- '-': true,
- '.': true,
- '0': true,
- '1': true,
- '2': true,
- '3': true,
- '4': true,
- '5': true,
- '6': true,
- '7': true,
- '8': true,
- '9': true,
- 'A': true,
- 'B': true,
- 'C': true,
- 'D': true,
- 'E': true,
- 'F': true,
- 'G': true,
- 'H': true,
- 'I': true,
- 'J': true,
- 'K': true,
- 'L': true,
- 'M': true,
- 'N': true,
- 'O': true,
- 'P': true,
- 'Q': true,
- 'R': true,
- 'S': true,
- 'T': true,
- 'U': true,
- 'W': true,
- 'V': true,
- 'X': true,
- 'Y': true,
- 'Z': true,
- '^': true,
- '_': true,
- '`': true,
- 'a': true,
- 'b': true,
- 'c': true,
- 'd': true,
- 'e': true,
- 'f': true,
- 'g': true,
- 'h': true,
- 'i': true,
- 'j': true,
- 'k': true,
- 'l': true,
- 'm': true,
- 'n': true,
- 'o': true,
- 'p': true,
- 'q': true,
- 'r': true,
- 's': true,
- 't': true,
- 'u': true,
- 'v': true,
- 'w': true,
- 'x': true,
- 'y': true,
- 'z': true,
- '|': true,
- '~': true,
-}
-
-func isToken(r rune) bool {
- i := int(r)
- return i < len(isTokenTable) && isTokenTable[i]
-}
-
-func isNotToken(r rune) bool {
- return !isToken(r)
-}
diff --git a/gcc-4.8.1/libgo/go/net/http/lex_test.go b/gcc-4.8.1/libgo/go/net/http/lex_test.go
deleted file mode 100644
index 6d9d294f7..000000000
--- a/gcc-4.8.1/libgo/go/net/http/lex_test.go
+++ /dev/null
@@ -1,31 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package http
-
-import (
- "testing"
-)
-
-func isChar(c rune) bool { return c <= 127 }
-
-func isCtl(c rune) bool { return c <= 31 || c == 127 }
-
-func isSeparator(c rune) bool {
- switch c {
- case '(', ')', '<', '>', '@', ',', ';', ':', '\\', '"', '/', '[', ']', '?', '=', '{', '}', ' ', '\t':
- return true
- }
- return false
-}
-
-func TestIsToken(t *testing.T) {
- for i := 0; i <= 130; i++ {
- r := rune(i)
- expected := isChar(r) && !isCtl(r) && !isSeparator(r)
- if isToken(r) != expected {
- t.Errorf("isToken(0x%x) = %v", r, !expected)
- }
- }
-}
diff --git a/gcc-4.8.1/libgo/go/net/http/pprof/pprof.go b/gcc-4.8.1/libgo/go/net/http/pprof/pprof.go
deleted file mode 100644
index 0c03e5b2b..000000000
--- a/gcc-4.8.1/libgo/go/net/http/pprof/pprof.go
+++ /dev/null
@@ -1,205 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package pprof serves via its HTTP server runtime profiling data
-// in the format expected by the pprof visualization tool.
-// For more information about pprof, see
-// http://code.google.com/p/google-perftools/.
-//
-// The package is typically only imported for the side effect of
-// registering its HTTP handlers.
-// The handled paths all begin with /debug/pprof/.
-//
-// To use pprof, link this package into your program:
-// import _ "net/http/pprof"
-//
-// If your application is not already running an http server, you
-// need to start one. Add "net/http" and "log" to your imports and
-// the following code to your main function:
-//
-// go func() {
-// log.Println(http.ListenAndServe("localhost:6060", nil))
-// }()
-//
-// Then use the pprof tool to look at the heap profile:
-//
-// go tool pprof http://localhost:6060/debug/pprof/heap
-//
-// Or to look at a 30-second CPU profile:
-//
-// go tool pprof http://localhost:6060/debug/pprof/profile
-//
-// Or to look at the goroutine blocking profile:
-//
-// go tool pprof http://localhost:6060/debug/pprof/block
-//
-// To view all available profiles, open http://localhost:6060/debug/pprof/
-// in your browser.
-//
-// For a study of the facility in action, visit
-//
-// http://blog.golang.org/2011/06/profiling-go-programs.html
-//
-package pprof
-
-import (
- "bufio"
- "bytes"
- "fmt"
- "html/template"
- "io"
- "log"
- "net/http"
- "os"
- "runtime"
- "runtime/pprof"
- "strconv"
- "strings"
- "time"
-)
-
-func init() {
- http.Handle("/debug/pprof/", http.HandlerFunc(Index))
- http.Handle("/debug/pprof/cmdline", http.HandlerFunc(Cmdline))
- http.Handle("/debug/pprof/profile", http.HandlerFunc(Profile))
- http.Handle("/debug/pprof/symbol", http.HandlerFunc(Symbol))
-}
-
-// Cmdline responds with the running program's
-// command line, with arguments separated by NUL bytes.
-// The package initialization registers it as /debug/pprof/cmdline.
-func Cmdline(w http.ResponseWriter, r *http.Request) {
- w.Header().Set("Content-Type", "text/plain; charset=utf-8")
- fmt.Fprintf(w, strings.Join(os.Args, "\x00"))
-}
-
-// Profile responds with the pprof-formatted cpu profile.
-// The package initialization registers it as /debug/pprof/profile.
-func Profile(w http.ResponseWriter, r *http.Request) {
- sec, _ := strconv.ParseInt(r.FormValue("seconds"), 10, 64)
- if sec == 0 {
- sec = 30
- }
-
- // Set Content Type assuming StartCPUProfile will work,
- // because if it does it starts writing.
- w.Header().Set("Content-Type", "application/octet-stream")
- if err := pprof.StartCPUProfile(w); err != nil {
- // StartCPUProfile failed, so no writes yet.
- // Can change header back to text content
- // and send error code.
- w.Header().Set("Content-Type", "text/plain; charset=utf-8")
- w.WriteHeader(http.StatusInternalServerError)
- fmt.Fprintf(w, "Could not enable CPU profiling: %s\n", err)
- return
- }
- time.Sleep(time.Duration(sec) * time.Second)
- pprof.StopCPUProfile()
-}
-
-// Symbol looks up the program counters listed in the request,
-// responding with a table mapping program counters to function names.
-// The package initialization registers it as /debug/pprof/symbol.
-func Symbol(w http.ResponseWriter, r *http.Request) {
- w.Header().Set("Content-Type", "text/plain; charset=utf-8")
-
- // We have to read the whole POST body before
- // writing any output. Buffer the output here.
- var buf bytes.Buffer
-
- // We don't know how many symbols we have, but we
- // do have symbol information. Pprof only cares whether
- // this number is 0 (no symbols available) or > 0.
- fmt.Fprintf(&buf, "num_symbols: 1\n")
-
- var b *bufio.Reader
- if r.Method == "POST" {
- b = bufio.NewReader(r.Body)
- } else {
- b = bufio.NewReader(strings.NewReader(r.URL.RawQuery))
- }
-
- for {
- word, err := b.ReadSlice('+')
- if err == nil {
- word = word[0 : len(word)-1] // trim +
- }
- pc, _ := strconv.ParseUint(string(word), 0, 64)
- if pc != 0 {
- f := runtime.FuncForPC(uintptr(pc))
- if f != nil {
- fmt.Fprintf(&buf, "%#x %s\n", pc, f.Name())
- }
- }
-
- // Wait until here to check for err; the last
- // symbol will have an err because it doesn't end in +.
- if err != nil {
- if err != io.EOF {
- fmt.Fprintf(&buf, "reading request: %v\n", err)
- }
- break
- }
- }
-
- w.Write(buf.Bytes())
-}
-
-// Handler returns an HTTP handler that serves the named profile.
-func Handler(name string) http.Handler {
- return handler(name)
-}
-
-type handler string
-
-func (name handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
- w.Header().Set("Content-Type", "text/plain; charset=utf-8")
- debug, _ := strconv.Atoi(r.FormValue("debug"))
- p := pprof.Lookup(string(name))
- if p == nil {
- w.WriteHeader(404)
- fmt.Fprintf(w, "Unknown profile: %s\n", name)
- return
- }
- p.WriteTo(w, debug)
- return
-}
-
-// Index responds with the pprof-formatted profile named by the request.
-// For example, "/debug/pprof/heap" serves the "heap" profile.
-// Index responds to a request for "/debug/pprof/" with an HTML page
-// listing the available profiles.
-func Index(w http.ResponseWriter, r *http.Request) {
- if strings.HasPrefix(r.URL.Path, "/debug/pprof/") {
- name := r.URL.Path[len("/debug/pprof/"):]
- if name != "" {
- handler(name).ServeHTTP(w, r)
- return
- }
- }
-
- profiles := pprof.Profiles()
- if err := indexTmpl.Execute(w, profiles); err != nil {
- log.Print(err)
- }
-}
-
-var indexTmpl = template.Must(template.New("index").Parse(`<html>
-<head>
-<title>/debug/pprof/</title>
-</head>
-/debug/pprof/<br>
-<br>
-<body>
-profiles:<br>
-<table>
-{{range .}}
-<tr><td align=right>{{.Count}}<td><a href="/debug/pprof/{{.Name}}?debug=1">{{.Name}}</a>
-{{end}}
-</table>
-<br>
-<a href="/debug/pprof/goroutine?debug=2">full goroutine stack dump</a><br>
-</body>
-</html>
-`))
diff --git a/gcc-4.8.1/libgo/go/net/http/proxy_test.go b/gcc-4.8.1/libgo/go/net/http/proxy_test.go
deleted file mode 100644
index 449ccaeea..000000000
--- a/gcc-4.8.1/libgo/go/net/http/proxy_test.go
+++ /dev/null
@@ -1,78 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package http
-
-import (
- "net/url"
- "os"
- "testing"
-)
-
-// TODO(mattn):
-// test ProxyAuth
-
-var UseProxyTests = []struct {
- host string
- match bool
-}{
- // Never proxy localhost:
- {"localhost:80", false},
- {"127.0.0.1", false},
- {"127.0.0.2", false},
- {"[::1]", false},
- {"[::2]", true}, // not a loopback address
-
- {"barbaz.net", false}, // match as .barbaz.net
- {"foobar.com", false}, // have a port but match
- {"foofoobar.com", true}, // not match as a part of foobar.com
- {"baz.com", true}, // not match as a part of barbaz.com
- {"localhost.net", true}, // not match as suffix of address
- {"local.localhost", true}, // not match as prefix as address
- {"barbarbaz.net", true}, // not match because NO_PROXY have a '.'
- {"www.foobar.com", false}, // match because NO_PROXY includes "foobar.com"
-}
-
-func TestUseProxy(t *testing.T) {
- oldenv := os.Getenv("NO_PROXY")
- defer os.Setenv("NO_PROXY", oldenv)
-
- no_proxy := "foobar.com, .barbaz.net"
- os.Setenv("NO_PROXY", no_proxy)
-
- for _, test := range UseProxyTests {
- if useProxy(test.host+":80") != test.match {
- t.Errorf("useProxy(%v) = %v, want %v", test.host, !test.match, test.match)
- }
- }
-}
-
-var cacheKeysTests = []struct {
- proxy string
- scheme string
- addr string
- key string
-}{
- {"", "http", "foo.com", "|http|foo.com"},
- {"", "https", "foo.com", "|https|foo.com"},
- {"http://foo.com", "http", "foo.com", "http://foo.com|http|"},
- {"http://foo.com", "https", "foo.com", "http://foo.com|https|foo.com"},
-}
-
-func TestCacheKeys(t *testing.T) {
- for _, tt := range cacheKeysTests {
- var proxy *url.URL
- if tt.proxy != "" {
- u, err := url.Parse(tt.proxy)
- if err != nil {
- t.Fatal(err)
- }
- proxy = u
- }
- cm := connectMethod{proxy, tt.scheme, tt.addr}
- if cm.String() != tt.key {
- t.Fatalf("{%q, %q, %q} cache key %q; want %q", tt.proxy, tt.scheme, tt.addr, cm.String(), tt.key)
- }
- }
-}
diff --git a/gcc-4.8.1/libgo/go/net/http/range_test.go b/gcc-4.8.1/libgo/go/net/http/range_test.go
deleted file mode 100644
index ef911af7b..000000000
--- a/gcc-4.8.1/libgo/go/net/http/range_test.go
+++ /dev/null
@@ -1,79 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package http
-
-import (
- "testing"
-)
-
-var ParseRangeTests = []struct {
- s string
- length int64
- r []httpRange
-}{
- {"", 0, nil},
- {"", 1000, nil},
- {"foo", 0, nil},
- {"bytes=", 0, nil},
- {"bytes=7", 10, nil},
- {"bytes= 7 ", 10, nil},
- {"bytes=1-", 0, nil},
- {"bytes=5-4", 10, nil},
- {"bytes=0-2,5-4", 10, nil},
- {"bytes=2-5,4-3", 10, nil},
- {"bytes=--5,4--3", 10, nil},
- {"bytes=A-", 10, nil},
- {"bytes=A- ", 10, nil},
- {"bytes=A-Z", 10, nil},
- {"bytes= -Z", 10, nil},
- {"bytes=5-Z", 10, nil},
- {"bytes=Ran-dom, garbage", 10, nil},
- {"bytes=0x01-0x02", 10, nil},
- {"bytes= ", 10, nil},
- {"bytes= , , , ", 10, nil},
-
- {"bytes=0-9", 10, []httpRange{{0, 10}}},
- {"bytes=0-", 10, []httpRange{{0, 10}}},
- {"bytes=5-", 10, []httpRange{{5, 5}}},
- {"bytes=0-20", 10, []httpRange{{0, 10}}},
- {"bytes=15-,0-5", 10, nil},
- {"bytes=1-2,5-", 10, []httpRange{{1, 2}, {5, 5}}},
- {"bytes=-2 , 7-", 11, []httpRange{{9, 2}, {7, 4}}},
- {"bytes=0-0 ,2-2, 7-", 11, []httpRange{{0, 1}, {2, 1}, {7, 4}}},
- {"bytes=-5", 10, []httpRange{{5, 5}}},
- {"bytes=-15", 10, []httpRange{{0, 10}}},
- {"bytes=0-499", 10000, []httpRange{{0, 500}}},
- {"bytes=500-999", 10000, []httpRange{{500, 500}}},
- {"bytes=-500", 10000, []httpRange{{9500, 500}}},
- {"bytes=9500-", 10000, []httpRange{{9500, 500}}},
- {"bytes=0-0,-1", 10000, []httpRange{{0, 1}, {9999, 1}}},
- {"bytes=500-600,601-999", 10000, []httpRange{{500, 101}, {601, 399}}},
- {"bytes=500-700,601-999", 10000, []httpRange{{500, 201}, {601, 399}}},
-
- // Match Apache laxity:
- {"bytes= 1 -2 , 4- 5, 7 - 8 , ,,", 11, []httpRange{{1, 2}, {4, 2}, {7, 2}}},
-}
-
-func TestParseRange(t *testing.T) {
- for _, test := range ParseRangeTests {
- r := test.r
- ranges, err := parseRange(test.s, test.length)
- if err != nil && r != nil {
- t.Errorf("parseRange(%q) returned error %q", test.s, err)
- }
- if len(ranges) != len(r) {
- t.Errorf("len(parseRange(%q)) = %d, want %d", test.s, len(ranges), len(r))
- continue
- }
- for i := range r {
- if ranges[i].start != r[i].start {
- t.Errorf("parseRange(%q)[%d].start = %d, want %d", test.s, i, ranges[i].start, r[i].start)
- }
- if ranges[i].length != r[i].length {
- t.Errorf("parseRange(%q)[%d].length = %d, want %d", test.s, i, ranges[i].length, r[i].length)
- }
- }
- }
-}
diff --git a/gcc-4.8.1/libgo/go/net/http/readrequest_test.go b/gcc-4.8.1/libgo/go/net/http/readrequest_test.go
deleted file mode 100644
index ffdd6a892..000000000
--- a/gcc-4.8.1/libgo/go/net/http/readrequest_test.go
+++ /dev/null
@@ -1,331 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package http
-
-import (
- "bufio"
- "bytes"
- "fmt"
- "io"
- "net/url"
- "reflect"
- "testing"
-)
-
-type reqTest struct {
- Raw string
- Req *Request
- Body string
- Trailer Header
- Error string
-}
-
-var noError = ""
-var noBody = ""
-var noTrailer Header = nil
-
-var reqTests = []reqTest{
- // Baseline test; All Request fields included for template use
- {
- "GET http://www.techcrunch.com/ HTTP/1.1\r\n" +
- "Host: www.techcrunch.com\r\n" +
- "User-Agent: Fake\r\n" +
- "Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\r\n" +
- "Accept-Language: en-us,en;q=0.5\r\n" +
- "Accept-Encoding: gzip,deflate\r\n" +
- "Accept-Charset: ISO-8859-1,utf-8;q=0.7,*;q=0.7\r\n" +
- "Keep-Alive: 300\r\n" +
- "Content-Length: 7\r\n" +
- "Proxy-Connection: keep-alive\r\n\r\n" +
- "abcdef\n???",
-
- &Request{
- Method: "GET",
- URL: &url.URL{
- Scheme: "http",
- Host: "www.techcrunch.com",
- Path: "/",
- },
- Proto: "HTTP/1.1",
- ProtoMajor: 1,
- ProtoMinor: 1,
- Header: Header{
- "Accept": {"text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8"},
- "Accept-Language": {"en-us,en;q=0.5"},
- "Accept-Encoding": {"gzip,deflate"},
- "Accept-Charset": {"ISO-8859-1,utf-8;q=0.7,*;q=0.7"},
- "Keep-Alive": {"300"},
- "Proxy-Connection": {"keep-alive"},
- "Content-Length": {"7"},
- "User-Agent": {"Fake"},
- },
- Close: false,
- ContentLength: 7,
- Host: "www.techcrunch.com",
- RequestURI: "http://www.techcrunch.com/",
- },
-
- "abcdef\n",
-
- noTrailer,
- noError,
- },
-
- // GET request with no body (the normal case)
- {
- "GET / HTTP/1.1\r\n" +
- "Host: foo.com\r\n\r\n",
-
- &Request{
- Method: "GET",
- URL: &url.URL{
- Path: "/",
- },
- Proto: "HTTP/1.1",
- ProtoMajor: 1,
- ProtoMinor: 1,
- Header: Header{},
- Close: false,
- ContentLength: 0,
- Host: "foo.com",
- RequestURI: "/",
- },
-
- noBody,
- noTrailer,
- noError,
- },
-
- // Tests that we don't parse a path that looks like a
- // scheme-relative URI as a scheme-relative URI.
- {
- "GET //user@host/is/actually/a/path/ HTTP/1.1\r\n" +
- "Host: test\r\n\r\n",
-
- &Request{
- Method: "GET",
- URL: &url.URL{
- Path: "//user@host/is/actually/a/path/",
- },
- Proto: "HTTP/1.1",
- ProtoMajor: 1,
- ProtoMinor: 1,
- Header: Header{},
- Close: false,
- ContentLength: 0,
- Host: "test",
- RequestURI: "//user@host/is/actually/a/path/",
- },
-
- noBody,
- noTrailer,
- noError,
- },
-
- // Tests a bogus abs_path on the Request-Line (RFC 2616 section 5.1.2)
- {
- "GET ../../../../etc/passwd HTTP/1.1\r\n" +
- "Host: test\r\n\r\n",
- nil,
- noBody,
- noTrailer,
- "parse ../../../../etc/passwd: invalid URI for request",
- },
-
- // Tests missing URL:
- {
- "GET HTTP/1.1\r\n" +
- "Host: test\r\n\r\n",
- nil,
- noBody,
- noTrailer,
- "parse : empty url",
- },
-
- // Tests chunked body with trailer:
- {
- "POST / HTTP/1.1\r\n" +
- "Host: foo.com\r\n" +
- "Transfer-Encoding: chunked\r\n\r\n" +
- "3\r\nfoo\r\n" +
- "3\r\nbar\r\n" +
- "0\r\n" +
- "Trailer-Key: Trailer-Value\r\n" +
- "\r\n",
- &Request{
- Method: "POST",
- URL: &url.URL{
- Path: "/",
- },
- TransferEncoding: []string{"chunked"},
- Proto: "HTTP/1.1",
- ProtoMajor: 1,
- ProtoMinor: 1,
- Header: Header{},
- ContentLength: -1,
- Host: "foo.com",
- RequestURI: "/",
- },
-
- "foobar",
- Header{
- "Trailer-Key": {"Trailer-Value"},
- },
- noError,
- },
-
- // CONNECT request with domain name:
- {
- "CONNECT www.google.com:443 HTTP/1.1\r\n\r\n",
-
- &Request{
- Method: "CONNECT",
- URL: &url.URL{
- Host: "www.google.com:443",
- },
- Proto: "HTTP/1.1",
- ProtoMajor: 1,
- ProtoMinor: 1,
- Header: Header{},
- Close: false,
- ContentLength: 0,
- Host: "www.google.com:443",
- RequestURI: "www.google.com:443",
- },
-
- noBody,
- noTrailer,
- noError,
- },
-
- // CONNECT request with IP address:
- {
- "CONNECT 127.0.0.1:6060 HTTP/1.1\r\n\r\n",
-
- &Request{
- Method: "CONNECT",
- URL: &url.URL{
- Host: "127.0.0.1:6060",
- },
- Proto: "HTTP/1.1",
- ProtoMajor: 1,
- ProtoMinor: 1,
- Header: Header{},
- Close: false,
- ContentLength: 0,
- Host: "127.0.0.1:6060",
- RequestURI: "127.0.0.1:6060",
- },
-
- noBody,
- noTrailer,
- noError,
- },
-
- // CONNECT request for RPC:
- {
- "CONNECT /_goRPC_ HTTP/1.1\r\n\r\n",
-
- &Request{
- Method: "CONNECT",
- URL: &url.URL{
- Path: "/_goRPC_",
- },
- Proto: "HTTP/1.1",
- ProtoMajor: 1,
- ProtoMinor: 1,
- Header: Header{},
- Close: false,
- ContentLength: 0,
- Host: "",
- RequestURI: "/_goRPC_",
- },
-
- noBody,
- noTrailer,
- noError,
- },
-
- // SSDP Notify request. golang.org/issue/3692
- {
- "NOTIFY * HTTP/1.1\r\nServer: foo\r\n\r\n",
- &Request{
- Method: "NOTIFY",
- URL: &url.URL{
- Path: "*",
- },
- Proto: "HTTP/1.1",
- ProtoMajor: 1,
- ProtoMinor: 1,
- Header: Header{
- "Server": []string{"foo"},
- },
- Close: false,
- ContentLength: 0,
- RequestURI: "*",
- },
-
- noBody,
- noTrailer,
- noError,
- },
-
- // OPTIONS request. Similar to golang.org/issue/3692
- {
- "OPTIONS * HTTP/1.1\r\nServer: foo\r\n\r\n",
- &Request{
- Method: "OPTIONS",
- URL: &url.URL{
- Path: "*",
- },
- Proto: "HTTP/1.1",
- ProtoMajor: 1,
- ProtoMinor: 1,
- Header: Header{
- "Server": []string{"foo"},
- },
- Close: false,
- ContentLength: 0,
- RequestURI: "*",
- },
-
- noBody,
- noTrailer,
- noError,
- },
-}
-
-func TestReadRequest(t *testing.T) {
- for i := range reqTests {
- tt := &reqTests[i]
- var braw bytes.Buffer
- braw.WriteString(tt.Raw)
- req, err := ReadRequest(bufio.NewReader(&braw))
- if err != nil {
- if err.Error() != tt.Error {
- t.Errorf("#%d: error %q, want error %q", i, err.Error(), tt.Error)
- }
- continue
- }
- rbody := req.Body
- req.Body = nil
- diff(t, fmt.Sprintf("#%d Request", i), req, tt.Req)
- var bout bytes.Buffer
- if rbody != nil {
- _, err := io.Copy(&bout, rbody)
- if err != nil {
- t.Fatalf("#%d. copying body: %v", i, err)
- }
- rbody.Close()
- }
- body := bout.String()
- if body != tt.Body {
- t.Errorf("#%d: Body = %q want %q", i, body, tt.Body)
- }
- if !reflect.DeepEqual(tt.Trailer, req.Trailer) {
- t.Errorf("#%d. Trailers differ.\n got: %v\nwant: %v", i, req.Trailer, tt.Trailer)
- }
- }
-}
diff --git a/gcc-4.8.1/libgo/go/net/http/request.go b/gcc-4.8.1/libgo/go/net/http/request.go
deleted file mode 100644
index 217f35b48..000000000
--- a/gcc-4.8.1/libgo/go/net/http/request.go
+++ /dev/null
@@ -1,814 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// HTTP Request reading and parsing.
-
-package http
-
-import (
- "bufio"
- "bytes"
- "crypto/tls"
- "encoding/base64"
- "errors"
- "fmt"
- "io"
- "io/ioutil"
- "mime"
- "mime/multipart"
- "net/textproto"
- "net/url"
- "strconv"
- "strings"
-)
-
-const (
- maxValueLength = 4096
- maxHeaderLines = 1024
- chunkSize = 4 << 10 // 4 KB chunks
- defaultMaxMemory = 32 << 20 // 32 MB
-)
-
-// ErrMissingFile is returned by FormFile when the provided file field name
-// is either not present in the request or not a file field.
-var ErrMissingFile = errors.New("http: no such file")
-
-// HTTP request parsing errors.
-type ProtocolError struct {
- ErrorString string
-}
-
-func (err *ProtocolError) Error() string { return err.ErrorString }
-
-var (
- ErrHeaderTooLong = &ProtocolError{"header too long"}
- ErrShortBody = &ProtocolError{"entity body too short"}
- ErrNotSupported = &ProtocolError{"feature not supported"}
- ErrUnexpectedTrailer = &ProtocolError{"trailer header without chunked transfer encoding"}
- ErrMissingContentLength = &ProtocolError{"missing ContentLength in HEAD response"}
- ErrNotMultipart = &ProtocolError{"request Content-Type isn't multipart/form-data"}
- ErrMissingBoundary = &ProtocolError{"no multipart boundary param Content-Type"}
-)
-
-type badStringError struct {
- what string
- str string
-}
-
-func (e *badStringError) Error() string { return fmt.Sprintf("%s %q", e.what, e.str) }
-
-// Headers that Request.Write handles itself and should be skipped.
-var reqWriteExcludeHeader = map[string]bool{
- "Host": true, // not in Header map anyway
- "User-Agent": true,
- "Content-Length": true,
- "Transfer-Encoding": true,
- "Trailer": true,
-}
-
-// A Request represents an HTTP request received by a server
-// or to be sent by a client.
-type Request struct {
- Method string // GET, POST, PUT, etc.
-
- // URL is created from the URI supplied on the Request-Line
- // as stored in RequestURI.
- //
- // For most requests, fields other than Path and RawQuery
- // will be empty. (See RFC 2616, Section 5.1.2)
- URL *url.URL
-
- // The protocol version for incoming requests.
- // Outgoing requests always use HTTP/1.1.
- Proto string // "HTTP/1.0"
- ProtoMajor int // 1
- ProtoMinor int // 0
-
- // A header maps request lines to their values.
- // If the header says
- //
- // accept-encoding: gzip, deflate
- // Accept-Language: en-us
- // Connection: keep-alive
- //
- // then
- //
- // Header = map[string][]string{
- // "Accept-Encoding": {"gzip, deflate"},
- // "Accept-Language": {"en-us"},
- // "Connection": {"keep-alive"},
- // }
- //
- // HTTP defines that header names are case-insensitive.
- // The request parser implements this by canonicalizing the
- // name, making the first character and any characters
- // following a hyphen uppercase and the rest lowercase.
- Header Header
-
- // The message body.
- Body io.ReadCloser
-
- // ContentLength records the length of the associated content.
- // The value -1 indicates that the length is unknown.
- // Values >= 0 indicate that the given number of bytes may
- // be read from Body.
- // For outgoing requests, a value of 0 means unknown if Body is not nil.
- ContentLength int64
-
- // TransferEncoding lists the transfer encodings from outermost to
- // innermost. An empty list denotes the "identity" encoding.
- // TransferEncoding can usually be ignored; chunked encoding is
- // automatically added and removed as necessary when sending and
- // receiving requests.
- TransferEncoding []string
-
- // Close indicates whether to close the connection after
- // replying to this request.
- Close bool
-
- // The host on which the URL is sought.
- // Per RFC 2616, this is either the value of the Host: header
- // or the host name given in the URL itself.
- // It may be of the form "host:port".
- Host string
-
- // Form contains the parsed form data, including both the URL
- // field's query parameters and the POST or PUT form data.
- // This field is only available after ParseForm is called.
- // The HTTP client ignores Form and uses Body instead.
- Form url.Values
-
- // PostForm contains the parsed form data from POST or PUT
- // body parameters.
- // This field is only available after ParseForm is called.
- // The HTTP client ignores PostForm and uses Body instead.
- PostForm url.Values
-
- // MultipartForm is the parsed multipart form, including file uploads.
- // This field is only available after ParseMultipartForm is called.
- // The HTTP client ignores MultipartForm and uses Body instead.
- MultipartForm *multipart.Form
-
- // Trailer maps trailer keys to values. Like for Header, if the
- // response has multiple trailer lines with the same key, they will be
- // concatenated, delimited by commas.
- // For server requests, Trailer is only populated after Body has been
- // closed or fully consumed.
- // Trailer support is only partially complete.
- Trailer Header
-
- // RemoteAddr allows HTTP servers and other software to record
- // the network address that sent the request, usually for
- // logging. This field is not filled in by ReadRequest and
- // has no defined format. The HTTP server in this package
- // sets RemoteAddr to an "IP:port" address before invoking a
- // handler.
- // This field is ignored by the HTTP client.
- RemoteAddr string
-
- // RequestURI is the unmodified Request-URI of the
- // Request-Line (RFC 2616, Section 5.1) as sent by the client
- // to a server. Usually the URL field should be used instead.
- // It is an error to set this field in an HTTP client request.
- RequestURI string
-
- // TLS allows HTTP servers and other software to record
- // information about the TLS connection on which the request
- // was received. This field is not filled in by ReadRequest.
- // The HTTP server in this package sets the field for
- // TLS-enabled connections before invoking a handler;
- // otherwise it leaves the field nil.
- // This field is ignored by the HTTP client.
- TLS *tls.ConnectionState
-}
-
-// ProtoAtLeast returns whether the HTTP protocol used
-// in the request is at least major.minor.
-func (r *Request) ProtoAtLeast(major, minor int) bool {
- return r.ProtoMajor > major ||
- r.ProtoMajor == major && r.ProtoMinor >= minor
-}
-
-// UserAgent returns the client's User-Agent, if sent in the request.
-func (r *Request) UserAgent() string {
- return r.Header.Get("User-Agent")
-}
-
-// Cookies parses and returns the HTTP cookies sent with the request.
-func (r *Request) Cookies() []*Cookie {
- return readCookies(r.Header, "")
-}
-
-var ErrNoCookie = errors.New("http: named cookie not present")
-
-// Cookie returns the named cookie provided in the request or
-// ErrNoCookie if not found.
-func (r *Request) Cookie(name string) (*Cookie, error) {
- for _, c := range readCookies(r.Header, name) {
- return c, nil
- }
- return nil, ErrNoCookie
-}
-
-// AddCookie adds a cookie to the request. Per RFC 6265 section 5.4,
-// AddCookie does not attach more than one Cookie header field. That
-// means all cookies, if any, are written into the same line,
-// separated by semicolon.
-func (r *Request) AddCookie(c *Cookie) {
- s := fmt.Sprintf("%s=%s", sanitizeName(c.Name), sanitizeValue(c.Value))
- if c := r.Header.Get("Cookie"); c != "" {
- r.Header.Set("Cookie", c+"; "+s)
- } else {
- r.Header.Set("Cookie", s)
- }
-}
-
-// Referer returns the referring URL, if sent in the request.
-//
-// Referer is misspelled as in the request itself, a mistake from the
-// earliest days of HTTP. This value can also be fetched from the
-// Header map as Header["Referer"]; the benefit of making it available
-// as a method is that the compiler can diagnose programs that use the
-// alternate (correct English) spelling req.Referrer() but cannot
-// diagnose programs that use Header["Referrer"].
-func (r *Request) Referer() string {
- return r.Header.Get("Referer")
-}
-
-// multipartByReader is a sentinel value.
-// Its presence in Request.MultipartForm indicates that parsing of the request
-// body has been handed off to a MultipartReader instead of ParseMultipartFrom.
-var multipartByReader = &multipart.Form{
- Value: make(map[string][]string),
- File: make(map[string][]*multipart.FileHeader),
-}
-
-// MultipartReader returns a MIME multipart reader if this is a
-// multipart/form-data POST request, else returns nil and an error.
-// Use this function instead of ParseMultipartForm to
-// process the request body as a stream.
-func (r *Request) MultipartReader() (*multipart.Reader, error) {
- if r.MultipartForm == multipartByReader {
- return nil, errors.New("http: MultipartReader called twice")
- }
- if r.MultipartForm != nil {
- return nil, errors.New("http: multipart handled by ParseMultipartForm")
- }
- r.MultipartForm = multipartByReader
- return r.multipartReader()
-}
-
-func (r *Request) multipartReader() (*multipart.Reader, error) {
- v := r.Header.Get("Content-Type")
- if v == "" {
- return nil, ErrNotMultipart
- }
- d, params, err := mime.ParseMediaType(v)
- if err != nil || d != "multipart/form-data" {
- return nil, ErrNotMultipart
- }
- boundary, ok := params["boundary"]
- if !ok {
- return nil, ErrMissingBoundary
- }
- return multipart.NewReader(r.Body, boundary), nil
-}
-
-// Return value if nonempty, def otherwise.
-func valueOrDefault(value, def string) string {
- if value != "" {
- return value
- }
- return def
-}
-
-const defaultUserAgent = "Go http package"
-
-// Write writes an HTTP/1.1 request -- header and body -- in wire format.
-// This method consults the following fields of the request:
-// Host
-// URL
-// Method (defaults to "GET")
-// Header
-// ContentLength
-// TransferEncoding
-// Body
-//
-// If Body is present, Content-Length is <= 0 and TransferEncoding
-// hasn't been set to "identity", Write adds "Transfer-Encoding:
-// chunked" to the header. Body is closed after it is sent.
-func (r *Request) Write(w io.Writer) error {
- return r.write(w, false, nil)
-}
-
-// WriteProxy is like Write but writes the request in the form
-// expected by an HTTP proxy. In particular, WriteProxy writes the
-// initial Request-URI line of the request with an absolute URI, per
-// section 5.1.2 of RFC 2616, including the scheme and host.
-// In either case, WriteProxy also writes a Host header, using
-// either r.Host or r.URL.Host.
-func (r *Request) WriteProxy(w io.Writer) error {
- return r.write(w, true, nil)
-}
-
-// extraHeaders may be nil
-func (req *Request) write(w io.Writer, usingProxy bool, extraHeaders Header) error {
- host := req.Host
- if host == "" {
- if req.URL == nil {
- return errors.New("http: Request.Write on Request with no Host or URL set")
- }
- host = req.URL.Host
- }
-
- ruri := req.URL.RequestURI()
- if usingProxy && req.URL.Scheme != "" && req.URL.Opaque == "" {
- ruri = req.URL.Scheme + "://" + host + ruri
- } else if req.Method == "CONNECT" && req.URL.Path == "" {
- // CONNECT requests normally give just the host and port, not a full URL.
- ruri = host
- }
- // TODO(bradfitz): escape at least newlines in ruri?
-
- // Wrap the writer in a bufio Writer if it's not already buffered.
- // Don't always call NewWriter, as that forces a bytes.Buffer
- // and other small bufio Writers to have a minimum 4k buffer
- // size.
- var bw *bufio.Writer
- if _, ok := w.(io.ByteWriter); !ok {
- bw = bufio.NewWriter(w)
- w = bw
- }
-
- fmt.Fprintf(w, "%s %s HTTP/1.1\r\n", valueOrDefault(req.Method, "GET"), ruri)
-
- // Header lines
- fmt.Fprintf(w, "Host: %s\r\n", host)
-
- // Use the defaultUserAgent unless the Header contains one, which
- // may be blank to not send the header.
- userAgent := defaultUserAgent
- if req.Header != nil {
- if ua := req.Header["User-Agent"]; len(ua) > 0 {
- userAgent = ua[0]
- }
- }
- if userAgent != "" {
- fmt.Fprintf(w, "User-Agent: %s\r\n", userAgent)
- }
-
- // Process Body,ContentLength,Close,Trailer
- tw, err := newTransferWriter(req)
- if err != nil {
- return err
- }
- err = tw.WriteHeader(w)
- if err != nil {
- return err
- }
-
- // TODO: split long values? (If so, should share code with Conn.Write)
- err = req.Header.WriteSubset(w, reqWriteExcludeHeader)
- if err != nil {
- return err
- }
-
- if extraHeaders != nil {
- err = extraHeaders.Write(w)
- if err != nil {
- return err
- }
- }
-
- io.WriteString(w, "\r\n")
-
- // Write body and trailer
- err = tw.WriteBody(w)
- if err != nil {
- return err
- }
-
- if bw != nil {
- return bw.Flush()
- }
- return nil
-}
-
-// ParseHTTPVersion parses a HTTP version string.
-// "HTTP/1.0" returns (1, 0, true).
-func ParseHTTPVersion(vers string) (major, minor int, ok bool) {
- const Big = 1000000 // arbitrary upper bound
- switch vers {
- case "HTTP/1.1":
- return 1, 1, true
- case "HTTP/1.0":
- return 1, 0, true
- }
- if !strings.HasPrefix(vers, "HTTP/") {
- return 0, 0, false
- }
- dot := strings.Index(vers, ".")
- if dot < 0 {
- return 0, 0, false
- }
- major, err := strconv.Atoi(vers[5:dot])
- if err != nil || major < 0 || major > Big {
- return 0, 0, false
- }
- minor, err = strconv.Atoi(vers[dot+1:])
- if err != nil || minor < 0 || minor > Big {
- return 0, 0, false
- }
- return major, minor, true
-}
-
-// NewRequest returns a new Request given a method, URL, and optional body.
-func NewRequest(method, urlStr string, body io.Reader) (*Request, error) {
- u, err := url.Parse(urlStr)
- if err != nil {
- return nil, err
- }
- rc, ok := body.(io.ReadCloser)
- if !ok && body != nil {
- rc = ioutil.NopCloser(body)
- }
- req := &Request{
- Method: method,
- URL: u,
- Proto: "HTTP/1.1",
- ProtoMajor: 1,
- ProtoMinor: 1,
- Header: make(Header),
- Body: rc,
- Host: u.Host,
- }
- if body != nil {
- switch v := body.(type) {
- case *bytes.Buffer:
- req.ContentLength = int64(v.Len())
- case *bytes.Reader:
- req.ContentLength = int64(v.Len())
- case *strings.Reader:
- req.ContentLength = int64(v.Len())
- }
- }
-
- return req, nil
-}
-
-// SetBasicAuth sets the request's Authorization header to use HTTP
-// Basic Authentication with the provided username and password.
-//
-// With HTTP Basic Authentication the provided username and password
-// are not encrypted.
-func (r *Request) SetBasicAuth(username, password string) {
- s := username + ":" + password
- r.Header.Set("Authorization", "Basic "+base64.StdEncoding.EncodeToString([]byte(s)))
-}
-
-// ReadRequest reads and parses a request from b.
-func ReadRequest(b *bufio.Reader) (req *Request, err error) {
-
- tp := textproto.NewReader(b)
- req = new(Request)
-
- // First line: GET /index.html HTTP/1.0
- var s string
- if s, err = tp.ReadLine(); err != nil {
- return nil, err
- }
- defer func() {
- if err == io.EOF {
- err = io.ErrUnexpectedEOF
- }
- }()
-
- var f []string
- if f = strings.SplitN(s, " ", 3); len(f) < 3 {
- return nil, &badStringError{"malformed HTTP request", s}
- }
- req.Method, req.RequestURI, req.Proto = f[0], f[1], f[2]
- rawurl := req.RequestURI
- var ok bool
- if req.ProtoMajor, req.ProtoMinor, ok = ParseHTTPVersion(req.Proto); !ok {
- return nil, &badStringError{"malformed HTTP version", req.Proto}
- }
-
- // CONNECT requests are used two different ways, and neither uses a full URL:
- // The standard use is to tunnel HTTPS through an HTTP proxy.
- // It looks like "CONNECT www.google.com:443 HTTP/1.1", and the parameter is
- // just the authority section of a URL. This information should go in req.URL.Host.
- //
- // The net/rpc package also uses CONNECT, but there the parameter is a path
- // that starts with a slash. It can be parsed with the regular URL parser,
- // and the path will end up in req.URL.Path, where it needs to be in order for
- // RPC to work.
- justAuthority := req.Method == "CONNECT" && !strings.HasPrefix(rawurl, "/")
- if justAuthority {
- rawurl = "http://" + rawurl
- }
-
- if req.URL, err = url.ParseRequestURI(rawurl); err != nil {
- return nil, err
- }
-
- if justAuthority {
- // Strip the bogus "http://" back off.
- req.URL.Scheme = ""
- }
-
- // Subsequent lines: Key: value.
- mimeHeader, err := tp.ReadMIMEHeader()
- if err != nil {
- return nil, err
- }
- req.Header = Header(mimeHeader)
-
- // RFC2616: Must treat
- // GET /index.html HTTP/1.1
- // Host: www.google.com
- // and
- // GET http://www.google.com/index.html HTTP/1.1
- // Host: doesntmatter
- // the same. In the second case, any Host line is ignored.
- req.Host = req.URL.Host
- if req.Host == "" {
- req.Host = req.Header.get("Host")
- }
- delete(req.Header, "Host")
-
- fixPragmaCacheControl(req.Header)
-
- // TODO: Parse specific header values:
- // Accept
- // Accept-Encoding
- // Accept-Language
- // Authorization
- // Cache-Control
- // Connection
- // Date
- // Expect
- // From
- // If-Match
- // If-Modified-Since
- // If-None-Match
- // If-Range
- // If-Unmodified-Since
- // Max-Forwards
- // Proxy-Authorization
- // Referer [sic]
- // TE (transfer-codings)
- // Trailer
- // Transfer-Encoding
- // Upgrade
- // User-Agent
- // Via
- // Warning
-
- err = readTransfer(req, b)
- if err != nil {
- return nil, err
- }
-
- return req, nil
-}
-
-// MaxBytesReader is similar to io.LimitReader but is intended for
-// limiting the size of incoming request bodies. In contrast to
-// io.LimitReader, MaxBytesReader's result is a ReadCloser, returns a
-// non-EOF error for a Read beyond the limit, and Closes the
-// underlying reader when its Close method is called.
-//
-// MaxBytesReader prevents clients from accidentally or maliciously
-// sending a large request and wasting server resources.
-func MaxBytesReader(w ResponseWriter, r io.ReadCloser, n int64) io.ReadCloser {
- return &maxBytesReader{w: w, r: r, n: n}
-}
-
-type maxBytesReader struct {
- w ResponseWriter
- r io.ReadCloser // underlying reader
- n int64 // max bytes remaining
- stopped bool
-}
-
-func (l *maxBytesReader) Read(p []byte) (n int, err error) {
- if l.n <= 0 {
- if !l.stopped {
- l.stopped = true
- if res, ok := l.w.(*response); ok {
- res.requestTooLarge()
- }
- }
- return 0, errors.New("http: request body too large")
- }
- if int64(len(p)) > l.n {
- p = p[:l.n]
- }
- n, err = l.r.Read(p)
- l.n -= int64(n)
- return
-}
-
-func (l *maxBytesReader) Close() error {
- return l.r.Close()
-}
-
-func copyValues(dst, src url.Values) {
- for k, vs := range src {
- for _, value := range vs {
- dst.Add(k, value)
- }
- }
-}
-
-func parsePostForm(r *Request) (vs url.Values, err error) {
- if r.Body == nil {
- err = errors.New("missing form body")
- return
- }
- ct := r.Header.Get("Content-Type")
- ct, _, err = mime.ParseMediaType(ct)
- switch {
- case ct == "application/x-www-form-urlencoded":
- var reader io.Reader = r.Body
- maxFormSize := int64(1<<63 - 1)
- if _, ok := r.Body.(*maxBytesReader); !ok {
- maxFormSize = int64(10 << 20) // 10 MB is a lot of text.
- reader = io.LimitReader(r.Body, maxFormSize+1)
- }
- b, e := ioutil.ReadAll(reader)
- if e != nil {
- if err == nil {
- err = e
- }
- break
- }
- if int64(len(b)) > maxFormSize {
- err = errors.New("http: POST too large")
- return
- }
- vs, e = url.ParseQuery(string(b))
- if err == nil {
- err = e
- }
- case ct == "multipart/form-data":
- // handled by ParseMultipartForm (which is calling us, or should be)
- // TODO(bradfitz): there are too many possible
- // orders to call too many functions here.
- // Clean this up and write more tests.
- // request_test.go contains the start of this,
- // in TestRequestMultipartCallOrder.
- }
- return
-}
-
-// ParseForm parses the raw query from the URL and updates r.Form.
-//
-// For POST or PUT requests, it also parses the request body as a form and
-// put the results into both r.PostForm and r.Form.
-// POST and PUT body parameters take precedence over URL query string values
-// in r.Form.
-//
-// If the request Body's size has not already been limited by MaxBytesReader,
-// the size is capped at 10MB.
-//
-// ParseMultipartForm calls ParseForm automatically.
-// It is idempotent.
-func (r *Request) ParseForm() error {
- var err error
- if r.PostForm == nil {
- if r.Method == "POST" || r.Method == "PUT" {
- r.PostForm, err = parsePostForm(r)
- }
- if r.PostForm == nil {
- r.PostForm = make(url.Values)
- }
- }
- if r.Form == nil {
- if len(r.PostForm) > 0 {
- r.Form = make(url.Values)
- copyValues(r.Form, r.PostForm)
- }
- var newValues url.Values
- if r.URL != nil {
- var e error
- newValues, e = url.ParseQuery(r.URL.RawQuery)
- if err == nil {
- err = e
- }
- }
- if newValues == nil {
- newValues = make(url.Values)
- }
- if r.Form == nil {
- r.Form = newValues
- } else {
- copyValues(r.Form, newValues)
- }
- }
- return err
-}
-
-// ParseMultipartForm parses a request body as multipart/form-data.
-// The whole request body is parsed and up to a total of maxMemory bytes of
-// its file parts are stored in memory, with the remainder stored on
-// disk in temporary files.
-// ParseMultipartForm calls ParseForm if necessary.
-// After one call to ParseMultipartForm, subsequent calls have no effect.
-func (r *Request) ParseMultipartForm(maxMemory int64) error {
- if r.MultipartForm == multipartByReader {
- return errors.New("http: multipart handled by MultipartReader")
- }
- if r.Form == nil {
- err := r.ParseForm()
- if err != nil {
- return err
- }
- }
- if r.MultipartForm != nil {
- return nil
- }
-
- mr, err := r.multipartReader()
- if err == ErrNotMultipart {
- return nil
- } else if err != nil {
- return err
- }
-
- f, err := mr.ReadForm(maxMemory)
- if err != nil {
- return err
- }
- for k, v := range f.Value {
- r.Form[k] = append(r.Form[k], v...)
- }
- r.MultipartForm = f
-
- return nil
-}
-
-// FormValue returns the first value for the named component of the query.
-// POST and PUT body parameters take precedence over URL query string values.
-// FormValue calls ParseMultipartForm and ParseForm if necessary.
-// To access multiple values of the same key use ParseForm.
-func (r *Request) FormValue(key string) string {
- if r.Form == nil {
- r.ParseMultipartForm(defaultMaxMemory)
- }
- if vs := r.Form[key]; len(vs) > 0 {
- return vs[0]
- }
- return ""
-}
-
-// PostFormValue returns the first value for the named component of the POST
-// or PUT request body. URL query parameters are ignored.
-// PostFormValue calls ParseMultipartForm and ParseForm if necessary.
-func (r *Request) PostFormValue(key string) string {
- if r.PostForm == nil {
- r.ParseMultipartForm(defaultMaxMemory)
- }
- if vs := r.PostForm[key]; len(vs) > 0 {
- return vs[0]
- }
- return ""
-}
-
-// FormFile returns the first file for the provided form key.
-// FormFile calls ParseMultipartForm and ParseForm if necessary.
-func (r *Request) FormFile(key string) (multipart.File, *multipart.FileHeader, error) {
- if r.MultipartForm == multipartByReader {
- return nil, nil, errors.New("http: multipart handled by MultipartReader")
- }
- if r.MultipartForm == nil {
- err := r.ParseMultipartForm(defaultMaxMemory)
- if err != nil {
- return nil, nil, err
- }
- }
- if r.MultipartForm != nil && r.MultipartForm.File != nil {
- if fhs := r.MultipartForm.File[key]; len(fhs) > 0 {
- f, err := fhs[0].Open()
- return f, fhs[0], err
- }
- }
- return nil, nil, ErrMissingFile
-}
-
-func (r *Request) expectsContinue() bool {
- return hasToken(r.Header.get("Expect"), "100-continue")
-}
-
-func (r *Request) wantsHttp10KeepAlive() bool {
- if r.ProtoMajor != 1 || r.ProtoMinor != 0 {
- return false
- }
- return hasToken(r.Header.get("Connection"), "keep-alive")
-}
-
-func (r *Request) wantsClose() bool {
- return hasToken(r.Header.get("Connection"), "close")
-}
diff --git a/gcc-4.8.1/libgo/go/net/http/request_test.go b/gcc-4.8.1/libgo/go/net/http/request_test.go
deleted file mode 100644
index bd757920b..000000000
--- a/gcc-4.8.1/libgo/go/net/http/request_test.go
+++ /dev/null
@@ -1,403 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package http_test
-
-import (
- "bufio"
- "bytes"
- "fmt"
- "io"
- "io/ioutil"
- "mime/multipart"
- . "net/http"
- "net/http/httptest"
- "net/url"
- "os"
- "reflect"
- "regexp"
- "strings"
- "testing"
-)
-
-func TestQuery(t *testing.T) {
- req := &Request{Method: "GET"}
- req.URL, _ = url.Parse("http://www.google.com/search?q=foo&q=bar")
- if q := req.FormValue("q"); q != "foo" {
- t.Errorf(`req.FormValue("q") = %q, want "foo"`, q)
- }
-}
-
-func TestPostQuery(t *testing.T) {
- req, _ := NewRequest("POST", "http://www.google.com/search?q=foo&q=bar&both=x&prio=1&empty=not",
- strings.NewReader("z=post&both=y&prio=2&empty="))
- req.Header.Set("Content-Type", "application/x-www-form-urlencoded; param=value")
-
- if q := req.FormValue("q"); q != "foo" {
- t.Errorf(`req.FormValue("q") = %q, want "foo"`, q)
- }
- if z := req.FormValue("z"); z != "post" {
- t.Errorf(`req.FormValue("z") = %q, want "post"`, z)
- }
- if bq, found := req.PostForm["q"]; found {
- t.Errorf(`req.PostForm["q"] = %q, want no entry in map`, bq)
- }
- if bz := req.PostFormValue("z"); bz != "post" {
- t.Errorf(`req.PostFormValue("z") = %q, want "post"`, bz)
- }
- if qs := req.Form["q"]; !reflect.DeepEqual(qs, []string{"foo", "bar"}) {
- t.Errorf(`req.Form["q"] = %q, want ["foo", "bar"]`, qs)
- }
- if both := req.Form["both"]; !reflect.DeepEqual(both, []string{"y", "x"}) {
- t.Errorf(`req.Form["both"] = %q, want ["y", "x"]`, both)
- }
- if prio := req.FormValue("prio"); prio != "2" {
- t.Errorf(`req.FormValue("prio") = %q, want "2" (from body)`, prio)
- }
- if empty := req.FormValue("empty"); empty != "" {
- t.Errorf(`req.FormValue("empty") = %q, want "" (from body)`, empty)
- }
-}
-
-type stringMap map[string][]string
-type parseContentTypeTest struct {
- shouldError bool
- contentType stringMap
-}
-
-var parseContentTypeTests = []parseContentTypeTest{
- {false, stringMap{"Content-Type": {"text/plain"}}},
- // Non-existent keys are not placed. The value nil is illegal.
- {true, stringMap{}},
- {true, stringMap{"Content-Type": {"text/plain; boundary="}}},
- {false, stringMap{"Content-Type": {"application/unknown"}}},
-}
-
-func TestParseFormUnknownContentType(t *testing.T) {
- for i, test := range parseContentTypeTests {
- req := &Request{
- Method: "POST",
- Header: Header(test.contentType),
- Body: ioutil.NopCloser(bytes.NewBufferString("body")),
- }
- err := req.ParseForm()
- switch {
- case err == nil && test.shouldError:
- t.Errorf("test %d should have returned error", i)
- case err != nil && !test.shouldError:
- t.Errorf("test %d should not have returned error, got %v", i, err)
- }
- }
-}
-
-func TestParseFormInitializeOnError(t *testing.T) {
- nilBody, _ := NewRequest("POST", "http://www.google.com/search?q=foo", nil)
- tests := []*Request{
- nilBody,
- {Method: "GET", URL: nil},
- }
- for i, req := range tests {
- err := req.ParseForm()
- if req.Form == nil {
- t.Errorf("%d. Form not initialized, error %v", i, err)
- }
- if req.PostForm == nil {
- t.Errorf("%d. PostForm not initialized, error %v", i, err)
- }
- }
-}
-
-func TestMultipartReader(t *testing.T) {
- req := &Request{
- Method: "POST",
- Header: Header{"Content-Type": {`multipart/form-data; boundary="foo123"`}},
- Body: ioutil.NopCloser(new(bytes.Buffer)),
- }
- multipart, err := req.MultipartReader()
- if multipart == nil {
- t.Errorf("expected multipart; error: %v", err)
- }
-
- req.Header = Header{"Content-Type": {"text/plain"}}
- multipart, err = req.MultipartReader()
- if multipart != nil {
- t.Errorf("unexpected multipart for text/plain")
- }
-}
-
-func TestRedirect(t *testing.T) {
- ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
- switch r.URL.Path {
- case "/":
- w.Header().Set("Location", "/foo/")
- w.WriteHeader(StatusSeeOther)
- case "/foo/":
- fmt.Fprintf(w, "foo")
- default:
- w.WriteHeader(StatusBadRequest)
- }
- }))
- defer ts.Close()
-
- var end = regexp.MustCompile("/foo/$")
- r, err := Get(ts.URL)
- if err != nil {
- t.Fatal(err)
- }
- r.Body.Close()
- url := r.Request.URL.String()
- if r.StatusCode != 200 || !end.MatchString(url) {
- t.Fatalf("Get got status %d at %q, want 200 matching /foo/$", r.StatusCode, url)
- }
-}
-
-func TestSetBasicAuth(t *testing.T) {
- r, _ := NewRequest("GET", "http://example.com/", nil)
- r.SetBasicAuth("Aladdin", "open sesame")
- if g, e := r.Header.Get("Authorization"), "Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ=="; g != e {
- t.Errorf("got header %q, want %q", g, e)
- }
-}
-
-func TestMultipartRequest(t *testing.T) {
- // Test that we can read the values and files of a
- // multipart request with FormValue and FormFile,
- // and that ParseMultipartForm can be called multiple times.
- req := newTestMultipartRequest(t)
- if err := req.ParseMultipartForm(25); err != nil {
- t.Fatal("ParseMultipartForm first call:", err)
- }
- defer req.MultipartForm.RemoveAll()
- validateTestMultipartContents(t, req, false)
- if err := req.ParseMultipartForm(25); err != nil {
- t.Fatal("ParseMultipartForm second call:", err)
- }
- validateTestMultipartContents(t, req, false)
-}
-
-func TestMultipartRequestAuto(t *testing.T) {
- // Test that FormValue and FormFile automatically invoke
- // ParseMultipartForm and return the right values.
- req := newTestMultipartRequest(t)
- defer func() {
- if req.MultipartForm != nil {
- req.MultipartForm.RemoveAll()
- }
- }()
- validateTestMultipartContents(t, req, true)
-}
-
-func TestEmptyMultipartRequest(t *testing.T) {
- // Test that FormValue and FormFile automatically invoke
- // ParseMultipartForm and return the right values.
- req, err := NewRequest("GET", "/", nil)
- if err != nil {
- t.Errorf("NewRequest err = %q", err)
- }
- testMissingFile(t, req)
-}
-
-func TestRequestMultipartCallOrder(t *testing.T) {
- req := newTestMultipartRequest(t)
- _, err := req.MultipartReader()
- if err != nil {
- t.Fatalf("MultipartReader: %v", err)
- }
- err = req.ParseMultipartForm(1024)
- if err == nil {
- t.Errorf("expected an error from ParseMultipartForm after call to MultipartReader")
- }
-}
-
-var readRequestErrorTests = []struct {
- in string
- err error
-}{
- {"GET / HTTP/1.1\r\nheader:foo\r\n\r\n", nil},
- {"GET / HTTP/1.1\r\nheader:foo\r\n", io.ErrUnexpectedEOF},
- {"", io.EOF},
-}
-
-func TestReadRequestErrors(t *testing.T) {
- for i, tt := range readRequestErrorTests {
- _, err := ReadRequest(bufio.NewReader(strings.NewReader(tt.in)))
- if err != tt.err {
- t.Errorf("%d. got error = %v; want %v", i, err, tt.err)
- }
- }
-}
-
-func TestNewRequestHost(t *testing.T) {
- req, err := NewRequest("GET", "http://localhost:1234/", nil)
- if err != nil {
- t.Fatal(err)
- }
- if req.Host != "localhost:1234" {
- t.Errorf("Host = %q; want localhost:1234", req.Host)
- }
-}
-
-func TestNewRequestContentLength(t *testing.T) {
- readByte := func(r io.Reader) io.Reader {
- var b [1]byte
- r.Read(b[:])
- return r
- }
- tests := []struct {
- r io.Reader
- want int64
- }{
- {bytes.NewReader([]byte("123")), 3},
- {bytes.NewBuffer([]byte("1234")), 4},
- {strings.NewReader("12345"), 5},
- // Not detected:
- {struct{ io.Reader }{strings.NewReader("xyz")}, 0},
- {io.NewSectionReader(strings.NewReader("x"), 0, 6), 0},
- {readByte(io.NewSectionReader(strings.NewReader("xy"), 0, 6)), 0},
- }
- for _, tt := range tests {
- req, err := NewRequest("POST", "http://localhost/", tt.r)
- if err != nil {
- t.Fatal(err)
- }
- if req.ContentLength != tt.want {
- t.Errorf("ContentLength(%#T) = %d; want %d", tt.r, req.ContentLength, tt.want)
- }
- }
-}
-
-type logWrites struct {
- t *testing.T
- dst *[]string
-}
-
-func (l logWrites) WriteByte(c byte) error {
- l.t.Fatalf("unexpected WriteByte call")
- return nil
-}
-
-func (l logWrites) Write(p []byte) (n int, err error) {
- *l.dst = append(*l.dst, string(p))
- return len(p), nil
-}
-
-func TestRequestWriteBufferedWriter(t *testing.T) {
- got := []string{}
- req, _ := NewRequest("GET", "http://foo.com/", nil)
- req.Write(logWrites{t, &got})
- want := []string{
- "GET / HTTP/1.1\r\n",
- "Host: foo.com\r\n",
- "User-Agent: Go http package\r\n",
- "\r\n",
- }
- if !reflect.DeepEqual(got, want) {
- t.Errorf("Writes = %q\n Want = %q", got, want)
- }
-}
-
-func testMissingFile(t *testing.T, req *Request) {
- f, fh, err := req.FormFile("missing")
- if f != nil {
- t.Errorf("FormFile file = %q, want nil", f)
- }
- if fh != nil {
- t.Errorf("FormFile file header = %q, want nil", fh)
- }
- if err != ErrMissingFile {
- t.Errorf("FormFile err = %q, want ErrMissingFile", err)
- }
-}
-
-func newTestMultipartRequest(t *testing.T) *Request {
- b := bytes.NewBufferString(strings.Replace(message, "\n", "\r\n", -1))
- req, err := NewRequest("POST", "/", b)
- if err != nil {
- t.Fatal("NewRequest:", err)
- }
- ctype := fmt.Sprintf(`multipart/form-data; boundary="%s"`, boundary)
- req.Header.Set("Content-type", ctype)
- return req
-}
-
-func validateTestMultipartContents(t *testing.T, req *Request, allMem bool) {
- if g, e := req.FormValue("texta"), textaValue; g != e {
- t.Errorf("texta value = %q, want %q", g, e)
- }
- if g, e := req.FormValue("textb"), textbValue; g != e {
- t.Errorf("textb value = %q, want %q", g, e)
- }
- if g := req.FormValue("missing"); g != "" {
- t.Errorf("missing value = %q, want empty string", g)
- }
-
- assertMem := func(n string, fd multipart.File) {
- if _, ok := fd.(*os.File); ok {
- t.Error(n, " is *os.File, should not be")
- }
- }
- fda := testMultipartFile(t, req, "filea", "filea.txt", fileaContents)
- defer fda.Close()
- assertMem("filea", fda)
- fdb := testMultipartFile(t, req, "fileb", "fileb.txt", filebContents)
- defer fdb.Close()
- if allMem {
- assertMem("fileb", fdb)
- } else {
- if _, ok := fdb.(*os.File); !ok {
- t.Errorf("fileb has unexpected underlying type %T", fdb)
- }
- }
-
- testMissingFile(t, req)
-}
-
-func testMultipartFile(t *testing.T, req *Request, key, expectFilename, expectContent string) multipart.File {
- f, fh, err := req.FormFile(key)
- if err != nil {
- t.Fatalf("FormFile(%q): %q", key, err)
- }
- if fh.Filename != expectFilename {
- t.Errorf("filename = %q, want %q", fh.Filename, expectFilename)
- }
- var b bytes.Buffer
- _, err = io.Copy(&b, f)
- if err != nil {
- t.Fatal("copying contents:", err)
- }
- if g := b.String(); g != expectContent {
- t.Errorf("contents = %q, want %q", g, expectContent)
- }
- return f
-}
-
-const (
- fileaContents = "This is a test file."
- filebContents = "Another test file."
- textaValue = "foo"
- textbValue = "bar"
- boundary = `MyBoundary`
-)
-
-const message = `
---MyBoundary
-Content-Disposition: form-data; name="filea"; filename="filea.txt"
-Content-Type: text/plain
-
-` + fileaContents + `
---MyBoundary
-Content-Disposition: form-data; name="fileb"; filename="fileb.txt"
-Content-Type: text/plain
-
-` + filebContents + `
---MyBoundary
-Content-Disposition: form-data; name="texta"
-
-` + textaValue + `
---MyBoundary
-Content-Disposition: form-data; name="textb"
-
-` + textbValue + `
---MyBoundary--
-`
diff --git a/gcc-4.8.1/libgo/go/net/http/requestwrite_test.go b/gcc-4.8.1/libgo/go/net/http/requestwrite_test.go
deleted file mode 100644
index fc3186f0c..000000000
--- a/gcc-4.8.1/libgo/go/net/http/requestwrite_test.go
+++ /dev/null
@@ -1,438 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package http
-
-import (
- "bytes"
- "errors"
- "fmt"
- "io"
- "io/ioutil"
- "net/url"
- "strings"
- "testing"
-)
-
-type reqWriteTest struct {
- Req Request
- Body interface{} // optional []byte or func() io.ReadCloser to populate Req.Body
-
- // Any of these three may be empty to skip that test.
- WantWrite string // Request.Write
- WantProxy string // Request.WriteProxy
-
- WantError error // wanted error from Request.Write
-}
-
-var reqWriteTests = []reqWriteTest{
- // HTTP/1.1 => chunked coding; no body; no trailer
- {
- Req: Request{
- Method: "GET",
- URL: &url.URL{
- Scheme: "http",
- Host: "www.techcrunch.com",
- Path: "/",
- },
- Proto: "HTTP/1.1",
- ProtoMajor: 1,
- ProtoMinor: 1,
- Header: Header{
- "Accept": {"text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8"},
- "Accept-Charset": {"ISO-8859-1,utf-8;q=0.7,*;q=0.7"},
- "Accept-Encoding": {"gzip,deflate"},
- "Accept-Language": {"en-us,en;q=0.5"},
- "Keep-Alive": {"300"},
- "Proxy-Connection": {"keep-alive"},
- "User-Agent": {"Fake"},
- },
- Body: nil,
- Close: false,
- Host: "www.techcrunch.com",
- Form: map[string][]string{},
- },
-
- WantWrite: "GET / HTTP/1.1\r\n" +
- "Host: www.techcrunch.com\r\n" +
- "User-Agent: Fake\r\n" +
- "Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\r\n" +
- "Accept-Charset: ISO-8859-1,utf-8;q=0.7,*;q=0.7\r\n" +
- "Accept-Encoding: gzip,deflate\r\n" +
- "Accept-Language: en-us,en;q=0.5\r\n" +
- "Keep-Alive: 300\r\n" +
- "Proxy-Connection: keep-alive\r\n\r\n",
-
- WantProxy: "GET http://www.techcrunch.com/ HTTP/1.1\r\n" +
- "Host: www.techcrunch.com\r\n" +
- "User-Agent: Fake\r\n" +
- "Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\r\n" +
- "Accept-Charset: ISO-8859-1,utf-8;q=0.7,*;q=0.7\r\n" +
- "Accept-Encoding: gzip,deflate\r\n" +
- "Accept-Language: en-us,en;q=0.5\r\n" +
- "Keep-Alive: 300\r\n" +
- "Proxy-Connection: keep-alive\r\n\r\n",
- },
- // HTTP/1.1 => chunked coding; body; empty trailer
- {
- Req: Request{
- Method: "GET",
- URL: &url.URL{
- Scheme: "http",
- Host: "www.google.com",
- Path: "/search",
- },
- ProtoMajor: 1,
- ProtoMinor: 1,
- Header: Header{},
- TransferEncoding: []string{"chunked"},
- },
-
- Body: []byte("abcdef"),
-
- WantWrite: "GET /search HTTP/1.1\r\n" +
- "Host: www.google.com\r\n" +
- "User-Agent: Go http package\r\n" +
- "Transfer-Encoding: chunked\r\n\r\n" +
- chunk("abcdef") + chunk(""),
-
- WantProxy: "GET http://www.google.com/search HTTP/1.1\r\n" +
- "Host: www.google.com\r\n" +
- "User-Agent: Go http package\r\n" +
- "Transfer-Encoding: chunked\r\n\r\n" +
- chunk("abcdef") + chunk(""),
- },
- // HTTP/1.1 POST => chunked coding; body; empty trailer
- {
- Req: Request{
- Method: "POST",
- URL: &url.URL{
- Scheme: "http",
- Host: "www.google.com",
- Path: "/search",
- },
- ProtoMajor: 1,
- ProtoMinor: 1,
- Header: Header{},
- Close: true,
- TransferEncoding: []string{"chunked"},
- },
-
- Body: []byte("abcdef"),
-
- WantWrite: "POST /search HTTP/1.1\r\n" +
- "Host: www.google.com\r\n" +
- "User-Agent: Go http package\r\n" +
- "Connection: close\r\n" +
- "Transfer-Encoding: chunked\r\n\r\n" +
- chunk("abcdef") + chunk(""),
-
- WantProxy: "POST http://www.google.com/search HTTP/1.1\r\n" +
- "Host: www.google.com\r\n" +
- "User-Agent: Go http package\r\n" +
- "Connection: close\r\n" +
- "Transfer-Encoding: chunked\r\n\r\n" +
- chunk("abcdef") + chunk(""),
- },
-
- // HTTP/1.1 POST with Content-Length, no chunking
- {
- Req: Request{
- Method: "POST",
- URL: &url.URL{
- Scheme: "http",
- Host: "www.google.com",
- Path: "/search",
- },
- ProtoMajor: 1,
- ProtoMinor: 1,
- Header: Header{},
- Close: true,
- ContentLength: 6,
- },
-
- Body: []byte("abcdef"),
-
- WantWrite: "POST /search HTTP/1.1\r\n" +
- "Host: www.google.com\r\n" +
- "User-Agent: Go http package\r\n" +
- "Connection: close\r\n" +
- "Content-Length: 6\r\n" +
- "\r\n" +
- "abcdef",
-
- WantProxy: "POST http://www.google.com/search HTTP/1.1\r\n" +
- "Host: www.google.com\r\n" +
- "User-Agent: Go http package\r\n" +
- "Connection: close\r\n" +
- "Content-Length: 6\r\n" +
- "\r\n" +
- "abcdef",
- },
-
- // HTTP/1.1 POST with Content-Length in headers
- {
- Req: Request{
- Method: "POST",
- URL: mustParseURL("http://example.com/"),
- Host: "example.com",
- Header: Header{
- "Content-Length": []string{"10"}, // ignored
- },
- ContentLength: 6,
- },
-
- Body: []byte("abcdef"),
-
- WantWrite: "POST / HTTP/1.1\r\n" +
- "Host: example.com\r\n" +
- "User-Agent: Go http package\r\n" +
- "Content-Length: 6\r\n" +
- "\r\n" +
- "abcdef",
-
- WantProxy: "POST http://example.com/ HTTP/1.1\r\n" +
- "Host: example.com\r\n" +
- "User-Agent: Go http package\r\n" +
- "Content-Length: 6\r\n" +
- "\r\n" +
- "abcdef",
- },
-
- // default to HTTP/1.1
- {
- Req: Request{
- Method: "GET",
- URL: mustParseURL("/search"),
- Host: "www.google.com",
- },
-
- WantWrite: "GET /search HTTP/1.1\r\n" +
- "Host: www.google.com\r\n" +
- "User-Agent: Go http package\r\n" +
- "\r\n",
- },
-
- // Request with a 0 ContentLength and a 0 byte body.
- {
- Req: Request{
- Method: "POST",
- URL: mustParseURL("/"),
- Host: "example.com",
- ProtoMajor: 1,
- ProtoMinor: 1,
- ContentLength: 0, // as if unset by user
- },
-
- Body: func() io.ReadCloser { return ioutil.NopCloser(io.LimitReader(strings.NewReader("xx"), 0)) },
-
- // RFC 2616 Section 14.13 says Content-Length should be specified
- // unless body is prohibited by the request method.
- // Also, nginx expects it for POST and PUT.
- WantWrite: "POST / HTTP/1.1\r\n" +
- "Host: example.com\r\n" +
- "User-Agent: Go http package\r\n" +
- "Content-Length: 0\r\n" +
- "\r\n",
-
- WantProxy: "POST / HTTP/1.1\r\n" +
- "Host: example.com\r\n" +
- "User-Agent: Go http package\r\n" +
- "Content-Length: 0\r\n" +
- "\r\n",
- },
-
- // Request with a 0 ContentLength and a 1 byte body.
- {
- Req: Request{
- Method: "POST",
- URL: mustParseURL("/"),
- Host: "example.com",
- ProtoMajor: 1,
- ProtoMinor: 1,
- ContentLength: 0, // as if unset by user
- },
-
- Body: func() io.ReadCloser { return ioutil.NopCloser(io.LimitReader(strings.NewReader("xx"), 1)) },
-
- WantWrite: "POST / HTTP/1.1\r\n" +
- "Host: example.com\r\n" +
- "User-Agent: Go http package\r\n" +
- "Transfer-Encoding: chunked\r\n\r\n" +
- chunk("x") + chunk(""),
-
- WantProxy: "POST / HTTP/1.1\r\n" +
- "Host: example.com\r\n" +
- "User-Agent: Go http package\r\n" +
- "Transfer-Encoding: chunked\r\n\r\n" +
- chunk("x") + chunk(""),
- },
-
- // Request with a ContentLength of 10 but a 5 byte body.
- {
- Req: Request{
- Method: "POST",
- URL: mustParseURL("/"),
- Host: "example.com",
- ProtoMajor: 1,
- ProtoMinor: 1,
- ContentLength: 10, // but we're going to send only 5 bytes
- },
- Body: []byte("12345"),
- WantError: errors.New("http: Request.ContentLength=10 with Body length 5"),
- },
-
- // Request with a ContentLength of 4 but an 8 byte body.
- {
- Req: Request{
- Method: "POST",
- URL: mustParseURL("/"),
- Host: "example.com",
- ProtoMajor: 1,
- ProtoMinor: 1,
- ContentLength: 4, // but we're going to try to send 8 bytes
- },
- Body: []byte("12345678"),
- WantError: errors.New("http: Request.ContentLength=4 with Body length 8"),
- },
-
- // Request with a 5 ContentLength and nil body.
- {
- Req: Request{
- Method: "POST",
- URL: mustParseURL("/"),
- Host: "example.com",
- ProtoMajor: 1,
- ProtoMinor: 1,
- ContentLength: 5, // but we'll omit the body
- },
- WantError: errors.New("http: Request.ContentLength=5 with nil Body"),
- },
-
- // Verify that DumpRequest preserves the HTTP version number, doesn't add a Host,
- // and doesn't add a User-Agent.
- {
- Req: Request{
- Method: "GET",
- URL: mustParseURL("/foo"),
- ProtoMajor: 1,
- ProtoMinor: 0,
- Header: Header{
- "X-Foo": []string{"X-Bar"},
- },
- },
-
- WantWrite: "GET /foo HTTP/1.1\r\n" +
- "Host: \r\n" +
- "User-Agent: Go http package\r\n" +
- "X-Foo: X-Bar\r\n\r\n",
- },
-}
-
-func TestRequestWrite(t *testing.T) {
- for i := range reqWriteTests {
- tt := &reqWriteTests[i]
-
- setBody := func() {
- if tt.Body == nil {
- return
- }
- switch b := tt.Body.(type) {
- case []byte:
- tt.Req.Body = ioutil.NopCloser(bytes.NewBuffer(b))
- case func() io.ReadCloser:
- tt.Req.Body = b()
- }
- }
- setBody()
- if tt.Req.Header == nil {
- tt.Req.Header = make(Header)
- }
-
- var braw bytes.Buffer
- err := tt.Req.Write(&braw)
- if g, e := fmt.Sprintf("%v", err), fmt.Sprintf("%v", tt.WantError); g != e {
- t.Errorf("writing #%d, err = %q, want %q", i, g, e)
- continue
- }
- if err != nil {
- continue
- }
-
- if tt.WantWrite != "" {
- sraw := braw.String()
- if sraw != tt.WantWrite {
- t.Errorf("Test %d, expecting:\n%s\nGot:\n%s\n", i, tt.WantWrite, sraw)
- continue
- }
- }
-
- if tt.WantProxy != "" {
- setBody()
- var praw bytes.Buffer
- err = tt.Req.WriteProxy(&praw)
- if err != nil {
- t.Errorf("WriteProxy #%d: %s", i, err)
- continue
- }
- sraw := praw.String()
- if sraw != tt.WantProxy {
- t.Errorf("Test Proxy %d, expecting:\n%s\nGot:\n%s\n", i, tt.WantProxy, sraw)
- continue
- }
- }
- }
-}
-
-type closeChecker struct {
- io.Reader
- closed bool
-}
-
-func (rc *closeChecker) Close() error {
- rc.closed = true
- return nil
-}
-
-// TestRequestWriteClosesBody tests that Request.Write does close its request.Body.
-// It also indirectly tests NewRequest and that it doesn't wrap an existing Closer
-// inside a NopCloser, and that it serializes it correctly.
-func TestRequestWriteClosesBody(t *testing.T) {
- rc := &closeChecker{Reader: strings.NewReader("my body")}
- req, _ := NewRequest("POST", "http://foo.com/", rc)
- if req.ContentLength != 0 {
- t.Errorf("got req.ContentLength %d, want 0", req.ContentLength)
- }
- buf := new(bytes.Buffer)
- req.Write(buf)
- if !rc.closed {
- t.Error("body not closed after write")
- }
- expected := "POST / HTTP/1.1\r\n" +
- "Host: foo.com\r\n" +
- "User-Agent: Go http package\r\n" +
- "Transfer-Encoding: chunked\r\n\r\n" +
- // TODO: currently we don't buffer before chunking, so we get a
- // single "m" chunk before the other chunks, as this was the 1-byte
- // read from our MultiReader where we stiched the Body back together
- // after sniffing whether the Body was 0 bytes or not.
- chunk("m") +
- chunk("y body") +
- chunk("")
- if buf.String() != expected {
- t.Errorf("write:\n got: %s\nwant: %s", buf.String(), expected)
- }
-}
-
-func chunk(s string) string {
- return fmt.Sprintf("%x\r\n%s\r\n", len(s), s)
-}
-
-func mustParseURL(s string) *url.URL {
- u, err := url.Parse(s)
- if err != nil {
- panic(fmt.Sprintf("Error parsing URL %q: %v", s, err))
- }
- return u
-}
diff --git a/gcc-4.8.1/libgo/go/net/http/response.go b/gcc-4.8.1/libgo/go/net/http/response.go
deleted file mode 100644
index 7901c49f5..000000000
--- a/gcc-4.8.1/libgo/go/net/http/response.go
+++ /dev/null
@@ -1,233 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// HTTP Response reading and parsing.
-
-package http
-
-import (
- "bufio"
- "errors"
- "io"
- "net/textproto"
- "net/url"
- "strconv"
- "strings"
-)
-
-var respExcludeHeader = map[string]bool{
- "Content-Length": true,
- "Transfer-Encoding": true,
- "Trailer": true,
-}
-
-// Response represents the response from an HTTP request.
-//
-type Response struct {
- Status string // e.g. "200 OK"
- StatusCode int // e.g. 200
- Proto string // e.g. "HTTP/1.0"
- ProtoMajor int // e.g. 1
- ProtoMinor int // e.g. 0
-
- // Header maps header keys to values. If the response had multiple
- // headers with the same key, they will be concatenated, with comma
- // delimiters. (Section 4.2 of RFC 2616 requires that multiple headers
- // be semantically equivalent to a comma-delimited sequence.) Values
- // duplicated by other fields in this struct (e.g., ContentLength) are
- // omitted from Header.
- //
- // Keys in the map are canonicalized (see CanonicalHeaderKey).
- Header Header
-
- // Body represents the response body.
- //
- // The http Client and Transport guarantee that Body is always
- // non-nil, even on responses without a body or responses with
- // a zero-lengthed body.
- Body io.ReadCloser
-
- // ContentLength records the length of the associated content. The
- // value -1 indicates that the length is unknown. Unless Request.Method
- // is "HEAD", values >= 0 indicate that the given number of bytes may
- // be read from Body.
- ContentLength int64
-
- // Contains transfer encodings from outer-most to inner-most. Value is
- // nil, means that "identity" encoding is used.
- TransferEncoding []string
-
- // Close records whether the header directed that the connection be
- // closed after reading Body. The value is advice for clients: neither
- // ReadResponse nor Response.Write ever closes a connection.
- Close bool
-
- // Trailer maps trailer keys to values, in the same
- // format as the header.
- Trailer Header
-
- // The Request that was sent to obtain this Response.
- // Request's Body is nil (having already been consumed).
- // This is only populated for Client requests.
- Request *Request
-}
-
-// Cookies parses and returns the cookies set in the Set-Cookie headers.
-func (r *Response) Cookies() []*Cookie {
- return readSetCookies(r.Header)
-}
-
-var ErrNoLocation = errors.New("http: no Location header in response")
-
-// Location returns the URL of the response's "Location" header,
-// if present. Relative redirects are resolved relative to
-// the Response's Request. ErrNoLocation is returned if no
-// Location header is present.
-func (r *Response) Location() (*url.URL, error) {
- lv := r.Header.Get("Location")
- if lv == "" {
- return nil, ErrNoLocation
- }
- if r.Request != nil && r.Request.URL != nil {
- return r.Request.URL.Parse(lv)
- }
- return url.Parse(lv)
-}
-
-// ReadResponse reads and returns an HTTP response from r. The
-// req parameter specifies the Request that corresponds to
-// this Response. Clients must call resp.Body.Close when finished
-// reading resp.Body. After that call, clients can inspect
-// resp.Trailer to find key/value pairs included in the response
-// trailer.
-func ReadResponse(r *bufio.Reader, req *Request) (resp *Response, err error) {
-
- tp := textproto.NewReader(r)
- resp = new(Response)
-
- resp.Request = req
-
- // Parse the first line of the response.
- line, err := tp.ReadLine()
- if err != nil {
- if err == io.EOF {
- err = io.ErrUnexpectedEOF
- }
- return nil, err
- }
- f := strings.SplitN(line, " ", 3)
- if len(f) < 2 {
- return nil, &badStringError{"malformed HTTP response", line}
- }
- reasonPhrase := ""
- if len(f) > 2 {
- reasonPhrase = f[2]
- }
- resp.Status = f[1] + " " + reasonPhrase
- resp.StatusCode, err = strconv.Atoi(f[1])
- if err != nil {
- return nil, &badStringError{"malformed HTTP status code", f[1]}
- }
-
- resp.Proto = f[0]
- var ok bool
- if resp.ProtoMajor, resp.ProtoMinor, ok = ParseHTTPVersion(resp.Proto); !ok {
- return nil, &badStringError{"malformed HTTP version", resp.Proto}
- }
-
- // Parse the response headers.
- mimeHeader, err := tp.ReadMIMEHeader()
- if err != nil {
- return nil, err
- }
- resp.Header = Header(mimeHeader)
-
- fixPragmaCacheControl(resp.Header)
-
- err = readTransfer(resp, r)
- if err != nil {
- return nil, err
- }
-
- return resp, nil
-}
-
-// RFC2616: Should treat
-// Pragma: no-cache
-// like
-// Cache-Control: no-cache
-func fixPragmaCacheControl(header Header) {
- if hp, ok := header["Pragma"]; ok && len(hp) > 0 && hp[0] == "no-cache" {
- if _, presentcc := header["Cache-Control"]; !presentcc {
- header["Cache-Control"] = []string{"no-cache"}
- }
- }
-}
-
-// ProtoAtLeast returns whether the HTTP protocol used
-// in the response is at least major.minor.
-func (r *Response) ProtoAtLeast(major, minor int) bool {
- return r.ProtoMajor > major ||
- r.ProtoMajor == major && r.ProtoMinor >= minor
-}
-
-// Writes the response (header, body and trailer) in wire format. This method
-// consults the following fields of the response:
-//
-// StatusCode
-// ProtoMajor
-// ProtoMinor
-// Request.Method
-// TransferEncoding
-// Trailer
-// Body
-// ContentLength
-// Header, values for non-canonical keys will have unpredictable behavior
-//
-func (r *Response) Write(w io.Writer) error {
-
- // Status line
- text := r.Status
- if text == "" {
- var ok bool
- text, ok = statusText[r.StatusCode]
- if !ok {
- text = "status code " + strconv.Itoa(r.StatusCode)
- }
- }
- protoMajor, protoMinor := strconv.Itoa(r.ProtoMajor), strconv.Itoa(r.ProtoMinor)
- statusCode := strconv.Itoa(r.StatusCode) + " "
- if strings.HasPrefix(text, statusCode) {
- text = text[len(statusCode):]
- }
- io.WriteString(w, "HTTP/"+protoMajor+"."+protoMinor+" "+statusCode+text+"\r\n")
-
- // Process Body,ContentLength,Close,Trailer
- tw, err := newTransferWriter(r)
- if err != nil {
- return err
- }
- err = tw.WriteHeader(w)
- if err != nil {
- return err
- }
-
- // Rest of header
- err = r.Header.WriteSubset(w, respExcludeHeader)
- if err != nil {
- return err
- }
-
- // End-of-header
- io.WriteString(w, "\r\n")
-
- // Write body and trailer
- err = tw.WriteBody(w)
- if err != nil {
- return err
- }
-
- // Success
- return nil
-}
diff --git a/gcc-4.8.1/libgo/go/net/http/response_test.go b/gcc-4.8.1/libgo/go/net/http/response_test.go
deleted file mode 100644
index a00a4ae0a..000000000
--- a/gcc-4.8.1/libgo/go/net/http/response_test.go
+++ /dev/null
@@ -1,526 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package http
-
-import (
- "bufio"
- "bytes"
- "compress/gzip"
- "crypto/rand"
- "fmt"
- "io"
- "io/ioutil"
- "net/url"
- "reflect"
- "strings"
- "testing"
-)
-
-type respTest struct {
- Raw string
- Resp Response
- Body string
-}
-
-func dummyReq(method string) *Request {
- return &Request{Method: method}
-}
-
-var respTests = []respTest{
- // Unchunked response without Content-Length.
- {
- "HTTP/1.0 200 OK\r\n" +
- "Connection: close\r\n" +
- "\r\n" +
- "Body here\n",
-
- Response{
- Status: "200 OK",
- StatusCode: 200,
- Proto: "HTTP/1.0",
- ProtoMajor: 1,
- ProtoMinor: 0,
- Request: dummyReq("GET"),
- Header: Header{
- "Connection": {"close"}, // TODO(rsc): Delete?
- },
- Close: true,
- ContentLength: -1,
- },
-
- "Body here\n",
- },
-
- // Unchunked HTTP/1.1 response without Content-Length or
- // Connection headers.
- {
- "HTTP/1.1 200 OK\r\n" +
- "\r\n" +
- "Body here\n",
-
- Response{
- Status: "200 OK",
- StatusCode: 200,
- Proto: "HTTP/1.1",
- ProtoMajor: 1,
- ProtoMinor: 1,
- Header: Header{},
- Request: dummyReq("GET"),
- Close: true,
- ContentLength: -1,
- },
-
- "Body here\n",
- },
-
- // Unchunked HTTP/1.1 204 response without Content-Length.
- {
- "HTTP/1.1 204 No Content\r\n" +
- "\r\n" +
- "Body should not be read!\n",
-
- Response{
- Status: "204 No Content",
- StatusCode: 204,
- Proto: "HTTP/1.1",
- ProtoMajor: 1,
- ProtoMinor: 1,
- Header: Header{},
- Request: dummyReq("GET"),
- Close: false,
- ContentLength: 0,
- },
-
- "",
- },
-
- // Unchunked response with Content-Length.
- {
- "HTTP/1.0 200 OK\r\n" +
- "Content-Length: 10\r\n" +
- "Connection: close\r\n" +
- "\r\n" +
- "Body here\n",
-
- Response{
- Status: "200 OK",
- StatusCode: 200,
- Proto: "HTTP/1.0",
- ProtoMajor: 1,
- ProtoMinor: 0,
- Request: dummyReq("GET"),
- Header: Header{
- "Connection": {"close"}, // TODO(rsc): Delete?
- "Content-Length": {"10"}, // TODO(rsc): Delete?
- },
- Close: true,
- ContentLength: 10,
- },
-
- "Body here\n",
- },
-
- // Chunked response without Content-Length.
- {
- "HTTP/1.1 200 OK\r\n" +
- "Transfer-Encoding: chunked\r\n" +
- "\r\n" +
- "0a\r\n" +
- "Body here\n\r\n" +
- "09\r\n" +
- "continued\r\n" +
- "0\r\n" +
- "\r\n",
-
- Response{
- Status: "200 OK",
- StatusCode: 200,
- Proto: "HTTP/1.1",
- ProtoMajor: 1,
- ProtoMinor: 1,
- Request: dummyReq("GET"),
- Header: Header{},
- Close: false,
- ContentLength: -1,
- TransferEncoding: []string{"chunked"},
- },
-
- "Body here\ncontinued",
- },
-
- // Chunked response with Content-Length.
- {
- "HTTP/1.1 200 OK\r\n" +
- "Transfer-Encoding: chunked\r\n" +
- "Content-Length: 10\r\n" +
- "\r\n" +
- "0a\r\n" +
- "Body here\n" +
- "0\r\n" +
- "\r\n",
-
- Response{
- Status: "200 OK",
- StatusCode: 200,
- Proto: "HTTP/1.1",
- ProtoMajor: 1,
- ProtoMinor: 1,
- Request: dummyReq("GET"),
- Header: Header{},
- Close: false,
- ContentLength: -1, // TODO(rsc): Fix?
- TransferEncoding: []string{"chunked"},
- },
-
- "Body here\n",
- },
-
- // Chunked response in response to a HEAD request
- {
- "HTTP/1.1 200 OK\r\n" +
- "Transfer-Encoding: chunked\r\n" +
- "\r\n",
-
- Response{
- Status: "200 OK",
- StatusCode: 200,
- Proto: "HTTP/1.1",
- ProtoMajor: 1,
- ProtoMinor: 1,
- Request: dummyReq("HEAD"),
- Header: Header{},
- TransferEncoding: []string{"chunked"},
- Close: false,
- ContentLength: -1,
- },
-
- "",
- },
-
- // Content-Length in response to a HEAD request
- {
- "HTTP/1.0 200 OK\r\n" +
- "Content-Length: 256\r\n" +
- "\r\n",
-
- Response{
- Status: "200 OK",
- StatusCode: 200,
- Proto: "HTTP/1.0",
- ProtoMajor: 1,
- ProtoMinor: 0,
- Request: dummyReq("HEAD"),
- Header: Header{"Content-Length": {"256"}},
- TransferEncoding: nil,
- Close: true,
- ContentLength: 256,
- },
-
- "",
- },
-
- // Content-Length in response to a HEAD request with HTTP/1.1
- {
- "HTTP/1.1 200 OK\r\n" +
- "Content-Length: 256\r\n" +
- "\r\n",
-
- Response{
- Status: "200 OK",
- StatusCode: 200,
- Proto: "HTTP/1.1",
- ProtoMajor: 1,
- ProtoMinor: 1,
- Request: dummyReq("HEAD"),
- Header: Header{"Content-Length": {"256"}},
- TransferEncoding: nil,
- Close: false,
- ContentLength: 256,
- },
-
- "",
- },
-
- // No Content-Length or Chunked in response to a HEAD request
- {
- "HTTP/1.0 200 OK\r\n" +
- "\r\n",
-
- Response{
- Status: "200 OK",
- StatusCode: 200,
- Proto: "HTTP/1.0",
- ProtoMajor: 1,
- ProtoMinor: 0,
- Request: dummyReq("HEAD"),
- Header: Header{},
- TransferEncoding: nil,
- Close: true,
- ContentLength: -1,
- },
-
- "",
- },
-
- // explicit Content-Length of 0.
- {
- "HTTP/1.1 200 OK\r\n" +
- "Content-Length: 0\r\n" +
- "\r\n",
-
- Response{
- Status: "200 OK",
- StatusCode: 200,
- Proto: "HTTP/1.1",
- ProtoMajor: 1,
- ProtoMinor: 1,
- Request: dummyReq("GET"),
- Header: Header{
- "Content-Length": {"0"},
- },
- Close: false,
- ContentLength: 0,
- },
-
- "",
- },
-
- // Status line without a Reason-Phrase, but trailing space.
- // (permitted by RFC 2616)
- {
- "HTTP/1.0 303 \r\n\r\n",
- Response{
- Status: "303 ",
- StatusCode: 303,
- Proto: "HTTP/1.0",
- ProtoMajor: 1,
- ProtoMinor: 0,
- Request: dummyReq("GET"),
- Header: Header{},
- Close: true,
- ContentLength: -1,
- },
-
- "",
- },
-
- // Status line without a Reason-Phrase, and no trailing space.
- // (not permitted by RFC 2616, but we'll accept it anyway)
- {
- "HTTP/1.0 303\r\n\r\n",
- Response{
- Status: "303 ",
- StatusCode: 303,
- Proto: "HTTP/1.0",
- ProtoMajor: 1,
- ProtoMinor: 0,
- Request: dummyReq("GET"),
- Header: Header{},
- Close: true,
- ContentLength: -1,
- },
-
- "",
- },
-}
-
-func TestReadResponse(t *testing.T) {
- for i := range respTests {
- tt := &respTests[i]
- var braw bytes.Buffer
- braw.WriteString(tt.Raw)
- resp, err := ReadResponse(bufio.NewReader(&braw), tt.Resp.Request)
- if err != nil {
- t.Errorf("#%d: %s", i, err)
- continue
- }
- rbody := resp.Body
- resp.Body = nil
- diff(t, fmt.Sprintf("#%d Response", i), resp, &tt.Resp)
- var bout bytes.Buffer
- if rbody != nil {
- io.Copy(&bout, rbody)
- rbody.Close()
- }
- body := bout.String()
- if body != tt.Body {
- t.Errorf("#%d: Body = %q want %q", i, body, tt.Body)
- }
- }
-}
-
-var readResponseCloseInMiddleTests = []struct {
- chunked, compressed bool
-}{
- {false, false},
- {true, false},
- {true, true},
-}
-
-// TestReadResponseCloseInMiddle tests that closing a body after
-// reading only part of its contents advances the read to the end of
-// the request, right up until the next request.
-func TestReadResponseCloseInMiddle(t *testing.T) {
- for _, test := range readResponseCloseInMiddleTests {
- fatalf := func(format string, args ...interface{}) {
- args = append([]interface{}{test.chunked, test.compressed}, args...)
- t.Fatalf("on test chunked=%v, compressed=%v: "+format, args...)
- }
- checkErr := func(err error, msg string) {
- if err == nil {
- return
- }
- fatalf(msg+": %v", err)
- }
- var buf bytes.Buffer
- buf.WriteString("HTTP/1.1 200 OK\r\n")
- if test.chunked {
- buf.WriteString("Transfer-Encoding: chunked\r\n")
- } else {
- buf.WriteString("Content-Length: 1000000\r\n")
- }
- var wr io.Writer = &buf
- if test.chunked {
- wr = newChunkedWriter(wr)
- }
- if test.compressed {
- buf.WriteString("Content-Encoding: gzip\r\n")
- wr = gzip.NewWriter(wr)
- }
- buf.WriteString("\r\n")
-
- chunk := bytes.Repeat([]byte{'x'}, 1000)
- for i := 0; i < 1000; i++ {
- if test.compressed {
- // Otherwise this compresses too well.
- _, err := io.ReadFull(rand.Reader, chunk)
- checkErr(err, "rand.Reader ReadFull")
- }
- wr.Write(chunk)
- }
- if test.compressed {
- err := wr.(*gzip.Writer).Close()
- checkErr(err, "compressor close")
- }
- if test.chunked {
- buf.WriteString("0\r\n\r\n")
- }
- buf.WriteString("Next Request Here")
-
- bufr := bufio.NewReader(&buf)
- resp, err := ReadResponse(bufr, dummyReq("GET"))
- checkErr(err, "ReadResponse")
- expectedLength := int64(-1)
- if !test.chunked {
- expectedLength = 1000000
- }
- if resp.ContentLength != expectedLength {
- fatalf("expected response length %d, got %d", expectedLength, resp.ContentLength)
- }
- if resp.Body == nil {
- fatalf("nil body")
- }
- if test.compressed {
- gzReader, err := gzip.NewReader(resp.Body)
- checkErr(err, "gzip.NewReader")
- resp.Body = &readFirstCloseBoth{gzReader, resp.Body}
- }
-
- rbuf := make([]byte, 2500)
- n, err := io.ReadFull(resp.Body, rbuf)
- checkErr(err, "2500 byte ReadFull")
- if n != 2500 {
- fatalf("ReadFull only read %d bytes", n)
- }
- if test.compressed == false && !bytes.Equal(bytes.Repeat([]byte{'x'}, 2500), rbuf) {
- fatalf("ReadFull didn't read 2500 'x'; got %q", string(rbuf))
- }
- resp.Body.Close()
-
- rest, err := ioutil.ReadAll(bufr)
- checkErr(err, "ReadAll on remainder")
- if e, g := "Next Request Here", string(rest); e != g {
- fatalf("remainder = %q, expected %q", g, e)
- }
- }
-}
-
-func diff(t *testing.T, prefix string, have, want interface{}) {
- hv := reflect.ValueOf(have).Elem()
- wv := reflect.ValueOf(want).Elem()
- if hv.Type() != wv.Type() {
- t.Errorf("%s: type mismatch %v want %v", prefix, hv.Type(), wv.Type())
- }
- for i := 0; i < hv.NumField(); i++ {
- hf := hv.Field(i).Interface()
- wf := wv.Field(i).Interface()
- if !reflect.DeepEqual(hf, wf) {
- t.Errorf("%s: %s = %v want %v", prefix, hv.Type().Field(i).Name, hf, wf)
- }
- }
-}
-
-type responseLocationTest struct {
- location string // Response's Location header or ""
- requrl string // Response.Request.URL or ""
- want string
- wantErr error
-}
-
-var responseLocationTests = []responseLocationTest{
- {"/foo", "http://bar.com/baz", "http://bar.com/foo", nil},
- {"http://foo.com/", "http://bar.com/baz", "http://foo.com/", nil},
- {"", "http://bar.com/baz", "", ErrNoLocation},
-}
-
-func TestLocationResponse(t *testing.T) {
- for i, tt := range responseLocationTests {
- res := new(Response)
- res.Header = make(Header)
- res.Header.Set("Location", tt.location)
- if tt.requrl != "" {
- res.Request = &Request{}
- var err error
- res.Request.URL, err = url.Parse(tt.requrl)
- if err != nil {
- t.Fatalf("bad test URL %q: %v", tt.requrl, err)
- }
- }
-
- got, err := res.Location()
- if tt.wantErr != nil {
- if err == nil {
- t.Errorf("%d. err=nil; want %q", i, tt.wantErr)
- continue
- }
- if g, e := err.Error(), tt.wantErr.Error(); g != e {
- t.Errorf("%d. err=%q; want %q", i, g, e)
- continue
- }
- continue
- }
- if err != nil {
- t.Errorf("%d. err=%q", i, err)
- continue
- }
- if g, e := got.String(), tt.want; g != e {
- t.Errorf("%d. Location=%q; want %q", i, g, e)
- }
- }
-}
-
-func TestResponseStatusStutter(t *testing.T) {
- r := &Response{
- Status: "123 some status",
- StatusCode: 123,
- ProtoMajor: 1,
- ProtoMinor: 3,
- }
- var buf bytes.Buffer
- r.Write(&buf)
- if strings.Contains(buf.String(), "123 123") {
- t.Errorf("stutter in status: %s", buf.String())
- }
-}
diff --git a/gcc-4.8.1/libgo/go/net/http/responsewrite_test.go b/gcc-4.8.1/libgo/go/net/http/responsewrite_test.go
deleted file mode 100644
index 5c10e2161..000000000
--- a/gcc-4.8.1/libgo/go/net/http/responsewrite_test.go
+++ /dev/null
@@ -1,109 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package http
-
-import (
- "bytes"
- "io/ioutil"
- "testing"
-)
-
-type respWriteTest struct {
- Resp Response
- Raw string
-}
-
-func TestResponseWrite(t *testing.T) {
- respWriteTests := []respWriteTest{
- // HTTP/1.0, identity coding; no trailer
- {
- Response{
- StatusCode: 503,
- ProtoMajor: 1,
- ProtoMinor: 0,
- Request: dummyReq("GET"),
- Header: Header{},
- Body: ioutil.NopCloser(bytes.NewBufferString("abcdef")),
- ContentLength: 6,
- },
-
- "HTTP/1.0 503 Service Unavailable\r\n" +
- "Content-Length: 6\r\n\r\n" +
- "abcdef",
- },
- // Unchunked response without Content-Length.
- {
- Response{
- StatusCode: 200,
- ProtoMajor: 1,
- ProtoMinor: 0,
- Request: dummyReq("GET"),
- Header: Header{},
- Body: ioutil.NopCloser(bytes.NewBufferString("abcdef")),
- ContentLength: -1,
- },
- "HTTP/1.0 200 OK\r\n" +
- "\r\n" +
- "abcdef",
- },
- // HTTP/1.1, chunked coding; empty trailer; close
- {
- Response{
- StatusCode: 200,
- ProtoMajor: 1,
- ProtoMinor: 1,
- Request: dummyReq("GET"),
- Header: Header{},
- Body: ioutil.NopCloser(bytes.NewBufferString("abcdef")),
- ContentLength: 6,
- TransferEncoding: []string{"chunked"},
- Close: true,
- },
-
- "HTTP/1.1 200 OK\r\n" +
- "Connection: close\r\n" +
- "Transfer-Encoding: chunked\r\n\r\n" +
- "6\r\nabcdef\r\n0\r\n\r\n",
- },
-
- // Header value with a newline character (Issue 914).
- // Also tests removal of leading and trailing whitespace.
- {
- Response{
- StatusCode: 204,
- ProtoMajor: 1,
- ProtoMinor: 1,
- Request: dummyReq("GET"),
- Header: Header{
- "Foo": []string{" Bar\nBaz "},
- },
- Body: nil,
- ContentLength: 0,
- TransferEncoding: []string{"chunked"},
- Close: true,
- },
-
- "HTTP/1.1 204 No Content\r\n" +
- "Connection: close\r\n" +
- "Foo: Bar Baz\r\n" +
- "\r\n",
- },
- }
-
- for i := range respWriteTests {
- tt := &respWriteTests[i]
- var braw bytes.Buffer
- err := tt.Resp.Write(&braw)
- if err != nil {
- t.Errorf("error writing #%d: %s", i, err)
- continue
- }
- sraw := braw.String()
- if sraw != tt.Raw {
- t.Errorf("Test %d, expecting:\n%q\nGot:\n%q\n", i, tt.Raw, sraw)
- continue
- }
- }
-}
diff --git a/gcc-4.8.1/libgo/go/net/http/serve_test.go b/gcc-4.8.1/libgo/go/net/http/serve_test.go
deleted file mode 100644
index 886ed4e8f..000000000
--- a/gcc-4.8.1/libgo/go/net/http/serve_test.go
+++ /dev/null
@@ -1,1526 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// End-to-end serving tests
-
-package http_test
-
-import (
- "bufio"
- "bytes"
- "crypto/tls"
- "fmt"
- "io"
- "io/ioutil"
- "log"
- "net"
- . "net/http"
- "net/http/httptest"
- "net/http/httputil"
- "net/url"
- "os"
- "os/exec"
- "reflect"
- "runtime"
- "strconv"
- "strings"
- "sync"
- "sync/atomic"
- "syscall"
- "testing"
- "time"
-)
-
-type dummyAddr string
-type oneConnListener struct {
- conn net.Conn
-}
-
-func (l *oneConnListener) Accept() (c net.Conn, err error) {
- c = l.conn
- if c == nil {
- err = io.EOF
- return
- }
- err = nil
- l.conn = nil
- return
-}
-
-func (l *oneConnListener) Close() error {
- return nil
-}
-
-func (l *oneConnListener) Addr() net.Addr {
- return dummyAddr("test-address")
-}
-
-func (a dummyAddr) Network() string {
- return string(a)
-}
-
-func (a dummyAddr) String() string {
- return string(a)
-}
-
-type testConn struct {
- readBuf bytes.Buffer
- writeBuf bytes.Buffer
- closec chan bool // if non-nil, send value to it on close
-}
-
-func (c *testConn) Read(b []byte) (int, error) {
- return c.readBuf.Read(b)
-}
-
-func (c *testConn) Write(b []byte) (int, error) {
- return c.writeBuf.Write(b)
-}
-
-func (c *testConn) Close() error {
- select {
- case c.closec <- true:
- default:
- }
- return nil
-}
-
-func (c *testConn) LocalAddr() net.Addr {
- return dummyAddr("local-addr")
-}
-
-func (c *testConn) RemoteAddr() net.Addr {
- return dummyAddr("remote-addr")
-}
-
-func (c *testConn) SetDeadline(t time.Time) error {
- return nil
-}
-
-func (c *testConn) SetReadDeadline(t time.Time) error {
- return nil
-}
-
-func (c *testConn) SetWriteDeadline(t time.Time) error {
- return nil
-}
-
-func TestConsumingBodyOnNextConn(t *testing.T) {
- conn := new(testConn)
- for i := 0; i < 2; i++ {
- conn.readBuf.Write([]byte(
- "POST / HTTP/1.1\r\n" +
- "Host: test\r\n" +
- "Content-Length: 11\r\n" +
- "\r\n" +
- "foo=1&bar=1"))
- }
-
- reqNum := 0
- ch := make(chan *Request)
- servech := make(chan error)
- listener := &oneConnListener{conn}
- handler := func(res ResponseWriter, req *Request) {
- reqNum++
- ch <- req
- }
-
- go func() {
- servech <- Serve(listener, HandlerFunc(handler))
- }()
-
- var req *Request
- req = <-ch
- if req == nil {
- t.Fatal("Got nil first request.")
- }
- if req.Method != "POST" {
- t.Errorf("For request #1's method, got %q; expected %q",
- req.Method, "POST")
- }
-
- req = <-ch
- if req == nil {
- t.Fatal("Got nil first request.")
- }
- if req.Method != "POST" {
- t.Errorf("For request #2's method, got %q; expected %q",
- req.Method, "POST")
- }
-
- if serveerr := <-servech; serveerr != io.EOF {
- t.Errorf("Serve returned %q; expected EOF", serveerr)
- }
-}
-
-type stringHandler string
-
-func (s stringHandler) ServeHTTP(w ResponseWriter, r *Request) {
- w.Header().Set("Result", string(s))
-}
-
-var handlers = []struct {
- pattern string
- msg string
-}{
- {"/", "Default"},
- {"/someDir/", "someDir"},
- {"someHost.com/someDir/", "someHost.com/someDir"},
-}
-
-var vtests = []struct {
- url string
- expected string
-}{
- {"http://localhost/someDir/apage", "someDir"},
- {"http://localhost/otherDir/apage", "Default"},
- {"http://someHost.com/someDir/apage", "someHost.com/someDir"},
- {"http://otherHost.com/someDir/apage", "someDir"},
- {"http://otherHost.com/aDir/apage", "Default"},
- // redirections for trees
- {"http://localhost/someDir", "/someDir/"},
- {"http://someHost.com/someDir", "/someDir/"},
-}
-
-func TestHostHandlers(t *testing.T) {
- mux := NewServeMux()
- for _, h := range handlers {
- mux.Handle(h.pattern, stringHandler(h.msg))
- }
- ts := httptest.NewServer(mux)
- defer ts.Close()
-
- conn, err := net.Dial("tcp", ts.Listener.Addr().String())
- if err != nil {
- t.Fatal(err)
- }
- defer conn.Close()
- cc := httputil.NewClientConn(conn, nil)
- for _, vt := range vtests {
- var r *Response
- var req Request
- if req.URL, err = url.Parse(vt.url); err != nil {
- t.Errorf("cannot parse url: %v", err)
- continue
- }
- if err := cc.Write(&req); err != nil {
- t.Errorf("writing request: %v", err)
- continue
- }
- r, err := cc.Read(&req)
- if err != nil {
- t.Errorf("reading response: %v", err)
- continue
- }
- switch r.StatusCode {
- case StatusOK:
- s := r.Header.Get("Result")
- if s != vt.expected {
- t.Errorf("Get(%q) = %q, want %q", vt.url, s, vt.expected)
- }
- case StatusMovedPermanently:
- s := r.Header.Get("Location")
- if s != vt.expected {
- t.Errorf("Get(%q) = %q, want %q", vt.url, s, vt.expected)
- }
- default:
- t.Errorf("Get(%q) unhandled status code %d", vt.url, r.StatusCode)
- }
- }
-}
-
-// Tests for http://code.google.com/p/go/issues/detail?id=900
-func TestMuxRedirectLeadingSlashes(t *testing.T) {
- paths := []string{"//foo.txt", "///foo.txt", "/../../foo.txt"}
- for _, path := range paths {
- req, err := ReadRequest(bufio.NewReader(bytes.NewBufferString("GET " + path + " HTTP/1.1\r\nHost: test\r\n\r\n")))
- if err != nil {
- t.Errorf("%s", err)
- }
- mux := NewServeMux()
- resp := httptest.NewRecorder()
-
- mux.ServeHTTP(resp, req)
-
- if loc, expected := resp.Header().Get("Location"), "/foo.txt"; loc != expected {
- t.Errorf("Expected Location header set to %q; got %q", expected, loc)
- return
- }
-
- if code, expected := resp.Code, StatusMovedPermanently; code != expected {
- t.Errorf("Expected response code of StatusMovedPermanently; got %d", code)
- return
- }
- }
-}
-
-func TestServerTimeouts(t *testing.T) {
- // TODO(bradfitz): convert this to use httptest.Server
- l, err := net.Listen("tcp", "127.0.0.1:0")
- if err != nil {
- t.Fatalf("listen error: %v", err)
- }
- addr, _ := l.Addr().(*net.TCPAddr)
-
- reqNum := 0
- handler := HandlerFunc(func(res ResponseWriter, req *Request) {
- reqNum++
- fmt.Fprintf(res, "req=%d", reqNum)
- })
-
- server := &Server{Handler: handler, ReadTimeout: 250 * time.Millisecond, WriteTimeout: 250 * time.Millisecond}
- go server.Serve(l)
-
- url := fmt.Sprintf("http://%s/", addr)
-
- // Hit the HTTP server successfully.
- tr := &Transport{DisableKeepAlives: true} // they interfere with this test
- c := &Client{Transport: tr}
- r, err := c.Get(url)
- if err != nil {
- t.Fatalf("http Get #1: %v", err)
- }
- got, _ := ioutil.ReadAll(r.Body)
- expected := "req=1"
- if string(got) != expected {
- t.Errorf("Unexpected response for request #1; got %q; expected %q",
- string(got), expected)
- }
-
- // Slow client that should timeout.
- t1 := time.Now()
- conn, err := net.Dial("tcp", addr.String())
- if err != nil {
- t.Fatalf("Dial: %v", err)
- }
- buf := make([]byte, 1)
- n, err := conn.Read(buf)
- latency := time.Now().Sub(t1)
- if n != 0 || err != io.EOF {
- t.Errorf("Read = %v, %v, wanted %v, %v", n, err, 0, io.EOF)
- }
- if latency < 200*time.Millisecond /* fudge from 250 ms above */ {
- t.Errorf("got EOF after %s, want >= %s", latency, 200*time.Millisecond)
- }
-
- // Hit the HTTP server successfully again, verifying that the
- // previous slow connection didn't run our handler. (that we
- // get "req=2", not "req=3")
- r, err = Get(url)
- if err != nil {
- t.Fatalf("http Get #2: %v", err)
- }
- got, _ = ioutil.ReadAll(r.Body)
- expected = "req=2"
- if string(got) != expected {
- t.Errorf("Get #2 got %q, want %q", string(got), expected)
- }
-
- l.Close()
-}
-
-// TestIdentityResponse verifies that a handler can unset
-func TestIdentityResponse(t *testing.T) {
- handler := HandlerFunc(func(rw ResponseWriter, req *Request) {
- rw.Header().Set("Content-Length", "3")
- rw.Header().Set("Transfer-Encoding", req.FormValue("te"))
- switch {
- case req.FormValue("overwrite") == "1":
- _, err := rw.Write([]byte("foo TOO LONG"))
- if err != ErrContentLength {
- t.Errorf("expected ErrContentLength; got %v", err)
- }
- case req.FormValue("underwrite") == "1":
- rw.Header().Set("Content-Length", "500")
- rw.Write([]byte("too short"))
- default:
- rw.Write([]byte("foo"))
- }
- })
-
- ts := httptest.NewServer(handler)
- defer ts.Close()
-
- // Note: this relies on the assumption (which is true) that
- // Get sends HTTP/1.1 or greater requests. Otherwise the
- // server wouldn't have the choice to send back chunked
- // responses.
- for _, te := range []string{"", "identity"} {
- url := ts.URL + "/?te=" + te
- res, err := Get(url)
- if err != nil {
- t.Fatalf("error with Get of %s: %v", url, err)
- }
- if cl, expected := res.ContentLength, int64(3); cl != expected {
- t.Errorf("for %s expected res.ContentLength of %d; got %d", url, expected, cl)
- }
- if cl, expected := res.Header.Get("Content-Length"), "3"; cl != expected {
- t.Errorf("for %s expected Content-Length header of %q; got %q", url, expected, cl)
- }
- if tl, expected := len(res.TransferEncoding), 0; tl != expected {
- t.Errorf("for %s expected len(res.TransferEncoding) of %d; got %d (%v)",
- url, expected, tl, res.TransferEncoding)
- }
- res.Body.Close()
- }
-
- // Verify that ErrContentLength is returned
- url := ts.URL + "/?overwrite=1"
- _, err := Get(url)
- if err != nil {
- t.Fatalf("error with Get of %s: %v", url, err)
- }
- // Verify that the connection is closed when the declared Content-Length
- // is larger than what the handler wrote.
- conn, err := net.Dial("tcp", ts.Listener.Addr().String())
- if err != nil {
- t.Fatalf("error dialing: %v", err)
- }
- _, err = conn.Write([]byte("GET /?underwrite=1 HTTP/1.1\r\nHost: foo\r\n\r\n"))
- if err != nil {
- t.Fatalf("error writing: %v", err)
- }
-
- // The ReadAll will hang for a failing test, so use a Timer to
- // fail explicitly.
- goTimeout(t, 2*time.Second, func() {
- got, _ := ioutil.ReadAll(conn)
- expectedSuffix := "\r\n\r\ntoo short"
- if !strings.HasSuffix(string(got), expectedSuffix) {
- t.Errorf("Expected output to end with %q; got response body %q",
- expectedSuffix, string(got))
- }
- })
-}
-
-func testTCPConnectionCloses(t *testing.T, req string, h Handler) {
- s := httptest.NewServer(h)
- defer s.Close()
-
- conn, err := net.Dial("tcp", s.Listener.Addr().String())
- if err != nil {
- t.Fatal("dial error:", err)
- }
- defer conn.Close()
-
- _, err = fmt.Fprint(conn, req)
- if err != nil {
- t.Fatal("print error:", err)
- }
-
- r := bufio.NewReader(conn)
- res, err := ReadResponse(r, &Request{Method: "GET"})
- if err != nil {
- t.Fatal("ReadResponse error:", err)
- }
-
- didReadAll := make(chan bool, 1)
- go func() {
- select {
- case <-time.After(5 * time.Second):
- t.Error("body not closed after 5s")
- return
- case <-didReadAll:
- }
- }()
-
- _, err = ioutil.ReadAll(r)
- if err != nil {
- t.Fatal("read error:", err)
- }
- didReadAll <- true
-
- if !res.Close {
- t.Errorf("Response.Close = false; want true")
- }
-}
-
-// TestServeHTTP10Close verifies that HTTP/1.0 requests won't be kept alive.
-func TestServeHTTP10Close(t *testing.T) {
- testTCPConnectionCloses(t, "GET / HTTP/1.0\r\n\r\n", HandlerFunc(func(w ResponseWriter, r *Request) {
- ServeFile(w, r, "testdata/file")
- }))
-}
-
-// TestClientCanClose verifies that clients can also force a connection to close.
-func TestClientCanClose(t *testing.T) {
- testTCPConnectionCloses(t, "GET / HTTP/1.1\r\nConnection: close\r\n\r\n", HandlerFunc(func(w ResponseWriter, r *Request) {
- // Nothing.
- }))
-}
-
-// TestHandlersCanSetConnectionClose verifies that handlers can force a connection to close,
-// even for HTTP/1.1 requests.
-func TestHandlersCanSetConnectionClose11(t *testing.T) {
- testTCPConnectionCloses(t, "GET / HTTP/1.1\r\n\r\n", HandlerFunc(func(w ResponseWriter, r *Request) {
- w.Header().Set("Connection", "close")
- }))
-}
-
-func TestHandlersCanSetConnectionClose10(t *testing.T) {
- testTCPConnectionCloses(t, "GET / HTTP/1.0\r\nConnection: keep-alive\r\n\r\n", HandlerFunc(func(w ResponseWriter, r *Request) {
- w.Header().Set("Connection", "close")
- }))
-}
-
-func TestSetsRemoteAddr(t *testing.T) {
- ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
- fmt.Fprintf(w, "%s", r.RemoteAddr)
- }))
- defer ts.Close()
-
- res, err := Get(ts.URL)
- if err != nil {
- t.Fatalf("Get error: %v", err)
- }
- body, err := ioutil.ReadAll(res.Body)
- if err != nil {
- t.Fatalf("ReadAll error: %v", err)
- }
- ip := string(body)
- if !strings.HasPrefix(ip, "127.0.0.1:") && !strings.HasPrefix(ip, "[::1]:") {
- t.Fatalf("Expected local addr; got %q", ip)
- }
-}
-
-func TestChunkedResponseHeaders(t *testing.T) {
- log.SetOutput(ioutil.Discard) // is noisy otherwise
- defer log.SetOutput(os.Stderr)
-
- ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
- w.Header().Set("Content-Length", "intentional gibberish") // we check that this is deleted
- w.(Flusher).Flush()
- fmt.Fprintf(w, "I am a chunked response.")
- }))
- defer ts.Close()
-
- res, err := Get(ts.URL)
- if err != nil {
- t.Fatalf("Get error: %v", err)
- }
- if g, e := res.ContentLength, int64(-1); g != e {
- t.Errorf("expected ContentLength of %d; got %d", e, g)
- }
- if g, e := res.TransferEncoding, []string{"chunked"}; !reflect.DeepEqual(g, e) {
- t.Errorf("expected TransferEncoding of %v; got %v", e, g)
- }
- if _, haveCL := res.Header["Content-Length"]; haveCL {
- t.Errorf("Unexpected Content-Length")
- }
-}
-
-// Test304Responses verifies that 304s don't declare that they're
-// chunking in their response headers and aren't allowed to produce
-// output.
-func Test304Responses(t *testing.T) {
- ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
- w.WriteHeader(StatusNotModified)
- _, err := w.Write([]byte("illegal body"))
- if err != ErrBodyNotAllowed {
- t.Errorf("on Write, expected ErrBodyNotAllowed, got %v", err)
- }
- }))
- defer ts.Close()
- res, err := Get(ts.URL)
- if err != nil {
- t.Error(err)
- }
- if len(res.TransferEncoding) > 0 {
- t.Errorf("expected no TransferEncoding; got %v", res.TransferEncoding)
- }
- body, err := ioutil.ReadAll(res.Body)
- if err != nil {
- t.Error(err)
- }
- if len(body) > 0 {
- t.Errorf("got unexpected body %q", string(body))
- }
-}
-
-// TestHeadResponses verifies that responses to HEAD requests don't
-// declare that they're chunking in their response headers, aren't
-// allowed to produce output, and don't set a Content-Type since
-// the real type of the body data cannot be inferred.
-func TestHeadResponses(t *testing.T) {
- ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
- _, err := w.Write([]byte("Ignored body"))
- if err != ErrBodyNotAllowed {
- t.Errorf("on Write, expected ErrBodyNotAllowed, got %v", err)
- }
-
- // Also exercise the ReaderFrom path
- _, err = io.Copy(w, strings.NewReader("Ignored body"))
- if err != ErrBodyNotAllowed {
- t.Errorf("on Copy, expected ErrBodyNotAllowed, got %v", err)
- }
- }))
- defer ts.Close()
- res, err := Head(ts.URL)
- if err != nil {
- t.Error(err)
- }
- if len(res.TransferEncoding) > 0 {
- t.Errorf("expected no TransferEncoding; got %v", res.TransferEncoding)
- }
- ct := res.Header.Get("Content-Type")
- if ct != "" {
- t.Errorf("expected no Content-Type; got %s", ct)
- }
- body, err := ioutil.ReadAll(res.Body)
- if err != nil {
- t.Error(err)
- }
- if len(body) > 0 {
- t.Errorf("got unexpected body %q", string(body))
- }
-}
-
-func TestTLSHandshakeTimeout(t *testing.T) {
- ts := httptest.NewUnstartedServer(HandlerFunc(func(w ResponseWriter, r *Request) {}))
- ts.Config.ReadTimeout = 250 * time.Millisecond
- ts.StartTLS()
- defer ts.Close()
- conn, err := net.Dial("tcp", ts.Listener.Addr().String())
- if err != nil {
- t.Fatalf("Dial: %v", err)
- }
- defer conn.Close()
- goTimeout(t, 10*time.Second, func() {
- var buf [1]byte
- n, err := conn.Read(buf[:])
- if err == nil || n != 0 {
- t.Errorf("Read = %d, %v; want an error and no bytes", n, err)
- }
- })
-}
-
-func TestTLSServer(t *testing.T) {
- ts := httptest.NewTLSServer(HandlerFunc(func(w ResponseWriter, r *Request) {
- if r.TLS != nil {
- w.Header().Set("X-TLS-Set", "true")
- if r.TLS.HandshakeComplete {
- w.Header().Set("X-TLS-HandshakeComplete", "true")
- }
- }
- }))
- defer ts.Close()
-
- // Connect an idle TCP connection to this server before we run
- // our real tests. This idle connection used to block forever
- // in the TLS handshake, preventing future connections from
- // being accepted. It may prevent future accidental blocking
- // in newConn.
- idleConn, err := net.Dial("tcp", ts.Listener.Addr().String())
- if err != nil {
- t.Fatalf("Dial: %v", err)
- }
- defer idleConn.Close()
- goTimeout(t, 10*time.Second, func() {
- if !strings.HasPrefix(ts.URL, "https://") {
- t.Errorf("expected test TLS server to start with https://, got %q", ts.URL)
- return
- }
- noVerifyTransport := &Transport{
- TLSClientConfig: &tls.Config{
- InsecureSkipVerify: true,
- },
- }
- client := &Client{Transport: noVerifyTransport}
- res, err := client.Get(ts.URL)
- if err != nil {
- t.Error(err)
- return
- }
- if res == nil {
- t.Errorf("got nil Response")
- return
- }
- defer res.Body.Close()
- if res.Header.Get("X-TLS-Set") != "true" {
- t.Errorf("expected X-TLS-Set response header")
- return
- }
- if res.Header.Get("X-TLS-HandshakeComplete") != "true" {
- t.Errorf("expected X-TLS-HandshakeComplete header")
- }
- })
-}
-
-type serverExpectTest struct {
- contentLength int // of request body
- expectation string // e.g. "100-continue"
- readBody bool // whether handler should read the body (if false, sends StatusUnauthorized)
- expectedResponse string // expected substring in first line of http response
-}
-
-var serverExpectTests = []serverExpectTest{
- // Normal 100-continues, case-insensitive.
- {100, "100-continue", true, "100 Continue"},
- {100, "100-cOntInUE", true, "100 Continue"},
-
- // No 100-continue.
- {100, "", true, "200 OK"},
-
- // 100-continue but requesting client to deny us,
- // so it never reads the body.
- {100, "100-continue", false, "401 Unauthorized"},
- // Likewise without 100-continue:
- {100, "", false, "401 Unauthorized"},
-
- // Non-standard expectations are failures
- {0, "a-pony", false, "417 Expectation Failed"},
-
- // Expect-100 requested but no body
- {0, "100-continue", true, "400 Bad Request"},
-}
-
-// Tests that the server responds to the "Expect" request header
-// correctly.
-func TestServerExpect(t *testing.T) {
- ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
- // Note using r.FormValue("readbody") because for POST
- // requests that would read from r.Body, which we only
- // conditionally want to do.
- if strings.Contains(r.URL.RawQuery, "readbody=true") {
- ioutil.ReadAll(r.Body)
- w.Write([]byte("Hi"))
- } else {
- w.WriteHeader(StatusUnauthorized)
- }
- }))
- defer ts.Close()
-
- runTest := func(test serverExpectTest) {
- conn, err := net.Dial("tcp", ts.Listener.Addr().String())
- if err != nil {
- t.Fatalf("Dial: %v", err)
- }
- defer conn.Close()
-
- // Only send the body immediately if we're acting like an HTTP client
- // that doesn't send 100-continue expectations.
- writeBody := test.contentLength > 0 && strings.ToLower(test.expectation) != "100-continue"
-
- go func() {
- _, err := fmt.Fprintf(conn, "POST /?readbody=%v HTTP/1.1\r\n"+
- "Connection: close\r\n"+
- "Content-Length: %d\r\n"+
- "Expect: %s\r\nHost: foo\r\n\r\n",
- test.readBody, test.contentLength, test.expectation)
- if err != nil {
- t.Errorf("On test %#v, error writing request headers: %v", test, err)
- return
- }
- if writeBody {
- body := strings.Repeat("A", test.contentLength)
- _, err = fmt.Fprint(conn, body)
- if err != nil {
- if !test.readBody {
- // Server likely already hung up on us.
- // See larger comment below.
- t.Logf("On test %#v, acceptable error writing request body: %v", test, err)
- return
- }
- t.Errorf("On test %#v, error writing request body: %v", test, err)
- }
- }
- }()
- bufr := bufio.NewReader(conn)
- line, err := bufr.ReadString('\n')
- if err != nil {
- if writeBody && !test.readBody {
- // This is an acceptable failure due to a possible TCP race:
- // We were still writing data and the server hung up on us. A TCP
- // implementation may send a RST if our request body data was known
- // to be lost, which may trigger our reads to fail.
- // See RFC 1122 page 88.
- t.Logf("On test %#v, acceptable error from ReadString: %v", test, err)
- return
- }
- t.Fatalf("On test %#v, ReadString: %v", test, err)
- }
- if !strings.Contains(line, test.expectedResponse) {
- t.Errorf("On test %#v, got first line = %q; want %q", test, line, test.expectedResponse)
- }
- }
-
- for _, test := range serverExpectTests {
- runTest(test)
- }
-}
-
-// Under a ~256KB (maxPostHandlerReadBytes) threshold, the server
-// should consume client request bodies that a handler didn't read.
-func TestServerUnreadRequestBodyLittle(t *testing.T) {
- conn := new(testConn)
- body := strings.Repeat("x", 100<<10)
- conn.readBuf.Write([]byte(fmt.Sprintf(
- "POST / HTTP/1.1\r\n"+
- "Host: test\r\n"+
- "Content-Length: %d\r\n"+
- "\r\n", len(body))))
- conn.readBuf.Write([]byte(body))
-
- done := make(chan bool)
-
- ls := &oneConnListener{conn}
- go Serve(ls, HandlerFunc(func(rw ResponseWriter, req *Request) {
- defer close(done)
- if conn.readBuf.Len() < len(body)/2 {
- t.Errorf("on request, read buffer length is %d; expected about 100 KB", conn.readBuf.Len())
- }
- rw.WriteHeader(200)
- rw.(Flusher).Flush()
- if g, e := conn.readBuf.Len(), 0; g != e {
- t.Errorf("after WriteHeader, read buffer length is %d; want %d", g, e)
- }
- if c := rw.Header().Get("Connection"); c != "" {
- t.Errorf(`Connection header = %q; want ""`, c)
- }
- }))
- <-done
-}
-
-// Over a ~256KB (maxPostHandlerReadBytes) threshold, the server
-// should ignore client request bodies that a handler didn't read
-// and close the connection.
-func TestServerUnreadRequestBodyLarge(t *testing.T) {
- conn := new(testConn)
- body := strings.Repeat("x", 1<<20)
- conn.readBuf.Write([]byte(fmt.Sprintf(
- "POST / HTTP/1.1\r\n"+
- "Host: test\r\n"+
- "Content-Length: %d\r\n"+
- "\r\n", len(body))))
- conn.readBuf.Write([]byte(body))
- conn.closec = make(chan bool, 1)
-
- ls := &oneConnListener{conn}
- go Serve(ls, HandlerFunc(func(rw ResponseWriter, req *Request) {
- if conn.readBuf.Len() < len(body)/2 {
- t.Errorf("on request, read buffer length is %d; expected about 1MB", conn.readBuf.Len())
- }
- rw.WriteHeader(200)
- rw.(Flusher).Flush()
- if conn.readBuf.Len() < len(body)/2 {
- t.Errorf("post-WriteHeader, read buffer length is %d; expected about 1MB", conn.readBuf.Len())
- }
- }))
- <-conn.closec
-
- if res := conn.writeBuf.String(); !strings.Contains(res, "Connection: close") {
- t.Errorf("Expected a Connection: close header; got response: %s", res)
- }
-}
-
-func TestTimeoutHandler(t *testing.T) {
- sendHi := make(chan bool, 1)
- writeErrors := make(chan error, 1)
- sayHi := HandlerFunc(func(w ResponseWriter, r *Request) {
- <-sendHi
- _, werr := w.Write([]byte("hi"))
- writeErrors <- werr
- })
- timeout := make(chan time.Time, 1) // write to this to force timeouts
- ts := httptest.NewServer(NewTestTimeoutHandler(sayHi, timeout))
- defer ts.Close()
-
- // Succeed without timing out:
- sendHi <- true
- res, err := Get(ts.URL)
- if err != nil {
- t.Error(err)
- }
- if g, e := res.StatusCode, StatusOK; g != e {
- t.Errorf("got res.StatusCode %d; expected %d", g, e)
- }
- body, _ := ioutil.ReadAll(res.Body)
- if g, e := string(body), "hi"; g != e {
- t.Errorf("got body %q; expected %q", g, e)
- }
- if g := <-writeErrors; g != nil {
- t.Errorf("got unexpected Write error on first request: %v", g)
- }
-
- // Times out:
- timeout <- time.Time{}
- res, err = Get(ts.URL)
- if err != nil {
- t.Error(err)
- }
- if g, e := res.StatusCode, StatusServiceUnavailable; g != e {
- t.Errorf("got res.StatusCode %d; expected %d", g, e)
- }
- body, _ = ioutil.ReadAll(res.Body)
- if !strings.Contains(string(body), "<title>Timeout</title>") {
- t.Errorf("expected timeout body; got %q", string(body))
- }
-
- // Now make the previously-timed out handler speak again,
- // which verifies the panic is handled:
- sendHi <- true
- if g, e := <-writeErrors, ErrHandlerTimeout; g != e {
- t.Errorf("expected Write error of %v; got %v", e, g)
- }
-}
-
-// Verifies we don't path.Clean() on the wrong parts in redirects.
-func TestRedirectMunging(t *testing.T) {
- req, _ := NewRequest("GET", "http://example.com/", nil)
-
- resp := httptest.NewRecorder()
- Redirect(resp, req, "/foo?next=http://bar.com/", 302)
- if g, e := resp.Header().Get("Location"), "/foo?next=http://bar.com/"; g != e {
- t.Errorf("Location header was %q; want %q", g, e)
- }
-
- resp = httptest.NewRecorder()
- Redirect(resp, req, "http://localhost:8080/_ah/login?continue=http://localhost:8080/", 302)
- if g, e := resp.Header().Get("Location"), "http://localhost:8080/_ah/login?continue=http://localhost:8080/"; g != e {
- t.Errorf("Location header was %q; want %q", g, e)
- }
-}
-
-// TestZeroLengthPostAndResponse exercises an optimization done by the Transport:
-// when there is no body (either because the method doesn't permit a body, or an
-// explicit Content-Length of zero is present), then the transport can re-use the
-// connection immediately. But when it re-uses the connection, it typically closes
-// the previous request's body, which is not optimal for zero-lengthed bodies,
-// as the client would then see http.ErrBodyReadAfterClose and not 0, io.EOF.
-func TestZeroLengthPostAndResponse(t *testing.T) {
- ts := httptest.NewServer(HandlerFunc(func(rw ResponseWriter, r *Request) {
- all, err := ioutil.ReadAll(r.Body)
- if err != nil {
- t.Fatalf("handler ReadAll: %v", err)
- }
- if len(all) != 0 {
- t.Errorf("handler got %d bytes; expected 0", len(all))
- }
- rw.Header().Set("Content-Length", "0")
- }))
- defer ts.Close()
-
- req, err := NewRequest("POST", ts.URL, strings.NewReader(""))
- if err != nil {
- t.Fatal(err)
- }
- req.ContentLength = 0
-
- var resp [5]*Response
- for i := range resp {
- resp[i], err = DefaultClient.Do(req)
- if err != nil {
- t.Fatalf("client post #%d: %v", i, err)
- }
- }
-
- for i := range resp {
- all, err := ioutil.ReadAll(resp[i].Body)
- if err != nil {
- t.Fatalf("req #%d: client ReadAll: %v", i, err)
- }
- if len(all) != 0 {
- t.Errorf("req #%d: client got %d bytes; expected 0", i, len(all))
- }
- }
-}
-
-func TestHandlerPanicNil(t *testing.T) {
- testHandlerPanic(t, false, nil)
-}
-
-func TestHandlerPanic(t *testing.T) {
- testHandlerPanic(t, false, "intentional death for testing")
-}
-
-func TestHandlerPanicWithHijack(t *testing.T) {
- testHandlerPanic(t, true, "intentional death for testing")
-}
-
-func testHandlerPanic(t *testing.T, withHijack bool, panicValue interface{}) {
- // Unlike the other tests that set the log output to ioutil.Discard
- // to quiet the output, this test uses a pipe. The pipe serves three
- // purposes:
- //
- // 1) The log.Print from the http server (generated by the caught
- // panic) will go to the pipe instead of stderr, making the
- // output quiet.
- //
- // 2) We read from the pipe to verify that the handler
- // actually caught the panic and logged something.
- //
- // 3) The blocking Read call prevents this TestHandlerPanic
- // function from exiting before the HTTP server handler
- // finishes crashing. If this text function exited too
- // early (and its defer log.SetOutput(os.Stderr) ran),
- // then the crash output could spill into the next test.
- pr, pw := io.Pipe()
- log.SetOutput(pw)
- defer log.SetOutput(os.Stderr)
-
- ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
- if withHijack {
- rwc, _, err := w.(Hijacker).Hijack()
- if err != nil {
- t.Logf("unexpected error: %v", err)
- }
- defer rwc.Close()
- }
- panic(panicValue)
- }))
- defer ts.Close()
-
- // Do a blocking read on the log output pipe so its logging
- // doesn't bleed into the next test. But wait only 5 seconds
- // for it.
- done := make(chan bool, 1)
- go func() {
- buf := make([]byte, 4<<10)
- _, err := pr.Read(buf)
- pr.Close()
- if err != nil {
- t.Error(err)
- }
- done <- true
- }()
-
- _, err := Get(ts.URL)
- if err == nil {
- t.Logf("expected an error")
- }
-
- if panicValue == nil {
- return
- }
-
- select {
- case <-done:
- return
- case <-time.After(5 * time.Second):
- t.Fatal("expected server handler to log an error")
- }
-}
-
-func TestNoDate(t *testing.T) {
- ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
- w.Header()["Date"] = nil
- }))
- defer ts.Close()
- res, err := Get(ts.URL)
- if err != nil {
- t.Fatal(err)
- }
- _, present := res.Header["Date"]
- if present {
- t.Fatalf("Expected no Date header; got %v", res.Header["Date"])
- }
-}
-
-func TestStripPrefix(t *testing.T) {
- h := HandlerFunc(func(w ResponseWriter, r *Request) {
- w.Header().Set("X-Path", r.URL.Path)
- })
- ts := httptest.NewServer(StripPrefix("/foo", h))
- defer ts.Close()
-
- res, err := Get(ts.URL + "/foo/bar")
- if err != nil {
- t.Fatal(err)
- }
- if g, e := res.Header.Get("X-Path"), "/bar"; g != e {
- t.Errorf("test 1: got %s, want %s", g, e)
- }
-
- res, err = Get(ts.URL + "/bar")
- if err != nil {
- t.Fatal(err)
- }
- if g, e := res.StatusCode, 404; g != e {
- t.Errorf("test 2: got status %v, want %v", g, e)
- }
-}
-
-func TestRequestLimit(t *testing.T) {
- ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
- t.Fatalf("didn't expect to get request in Handler")
- }))
- defer ts.Close()
- req, _ := NewRequest("GET", ts.URL, nil)
- var bytesPerHeader = len("header12345: val12345\r\n")
- for i := 0; i < ((DefaultMaxHeaderBytes+4096)/bytesPerHeader)+1; i++ {
- req.Header.Set(fmt.Sprintf("header%05d", i), fmt.Sprintf("val%05d", i))
- }
- res, err := DefaultClient.Do(req)
- if err != nil {
- // Some HTTP clients may fail on this undefined behavior (server replying and
- // closing the connection while the request is still being written), but
- // we do support it (at least currently), so we expect a response below.
- t.Fatalf("Do: %v", err)
- }
- if res.StatusCode != 413 {
- t.Fatalf("expected 413 response status; got: %d %s", res.StatusCode, res.Status)
- }
-}
-
-type neverEnding byte
-
-func (b neverEnding) Read(p []byte) (n int, err error) {
- for i := range p {
- p[i] = byte(b)
- }
- return len(p), nil
-}
-
-type countReader struct {
- r io.Reader
- n *int64
-}
-
-func (cr countReader) Read(p []byte) (n int, err error) {
- n, err = cr.r.Read(p)
- atomic.AddInt64(cr.n, int64(n))
- return
-}
-
-func TestRequestBodyLimit(t *testing.T) {
- const limit = 1 << 20
- ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
- r.Body = MaxBytesReader(w, r.Body, limit)
- n, err := io.Copy(ioutil.Discard, r.Body)
- if err == nil {
- t.Errorf("expected error from io.Copy")
- }
- if n != limit {
- t.Errorf("io.Copy = %d, want %d", n, limit)
- }
- }))
- defer ts.Close()
-
- nWritten := new(int64)
- req, _ := NewRequest("POST", ts.URL, io.LimitReader(countReader{neverEnding('a'), nWritten}, limit*200))
-
- // Send the POST, but don't care it succeeds or not. The
- // remote side is going to reply and then close the TCP
- // connection, and HTTP doesn't really define if that's
- // allowed or not. Some HTTP clients will get the response
- // and some (like ours, currently) will complain that the
- // request write failed, without reading the response.
- //
- // But that's okay, since what we're really testing is that
- // the remote side hung up on us before we wrote too much.
- _, _ = DefaultClient.Do(req)
-
- if atomic.LoadInt64(nWritten) > limit*100 {
- t.Errorf("handler restricted the request body to %d bytes, but client managed to write %d",
- limit, nWritten)
- }
-}
-
-// TestClientWriteShutdown tests that if the client shuts down the write
-// side of their TCP connection, the server doesn't send a 400 Bad Request.
-func TestClientWriteShutdown(t *testing.T) {
- ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {}))
- defer ts.Close()
- conn, err := net.Dial("tcp", ts.Listener.Addr().String())
- if err != nil {
- t.Fatalf("Dial: %v", err)
- }
- err = conn.(*net.TCPConn).CloseWrite()
- if err != nil {
- t.Fatalf("Dial: %v", err)
- }
- donec := make(chan bool)
- go func() {
- defer close(donec)
- bs, err := ioutil.ReadAll(conn)
- if err != nil {
- t.Fatalf("ReadAll: %v", err)
- }
- got := string(bs)
- if got != "" {
- t.Errorf("read %q from server; want nothing", got)
- }
- }()
- select {
- case <-donec:
- case <-time.After(10 * time.Second):
- t.Fatalf("timeout")
- }
-}
-
-// Tests that chunked server responses that write 1 byte at a time are
-// buffered before chunk headers are added, not after chunk headers.
-func TestServerBufferedChunking(t *testing.T) {
- conn := new(testConn)
- conn.readBuf.Write([]byte("GET / HTTP/1.1\r\n\r\n"))
- conn.closec = make(chan bool, 1)
- ls := &oneConnListener{conn}
- go Serve(ls, HandlerFunc(func(rw ResponseWriter, req *Request) {
- rw.(Flusher).Flush() // force the Header to be sent, in chunking mode, not counting the length
- rw.Write([]byte{'x'})
- rw.Write([]byte{'y'})
- rw.Write([]byte{'z'})
- }))
- <-conn.closec
- if !bytes.HasSuffix(conn.writeBuf.Bytes(), []byte("\r\n\r\n3\r\nxyz\r\n0\r\n\r\n")) {
- t.Errorf("response didn't end with a single 3 byte 'xyz' chunk; got:\n%q",
- conn.writeBuf.Bytes())
- }
-}
-
-// Tests that the server flushes its response headers out when it's
-// ignoring the response body and waits a bit before forcefully
-// closing the TCP connection, causing the client to get a RST.
-// See http://golang.org/issue/3595
-func TestServerGracefulClose(t *testing.T) {
- ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
- Error(w, "bye", StatusUnauthorized)
- }))
- defer ts.Close()
-
- conn, err := net.Dial("tcp", ts.Listener.Addr().String())
- if err != nil {
- t.Fatal(err)
- }
- defer conn.Close()
- const bodySize = 5 << 20
- req := []byte(fmt.Sprintf("POST / HTTP/1.1\r\nHost: foo.com\r\nContent-Length: %d\r\n\r\n", bodySize))
- for i := 0; i < bodySize; i++ {
- req = append(req, 'x')
- }
- writeErr := make(chan error)
- go func() {
- _, err := conn.Write(req)
- writeErr <- err
- }()
- br := bufio.NewReader(conn)
- lineNum := 0
- for {
- line, err := br.ReadString('\n')
- if err == io.EOF {
- break
- }
- if err != nil {
- t.Fatalf("ReadLine: %v", err)
- }
- lineNum++
- if lineNum == 1 && !strings.Contains(line, "401 Unauthorized") {
- t.Errorf("Response line = %q; want a 401", line)
- }
- }
- // Wait for write to finish. This is a broken pipe on both
- // Darwin and Linux, but checking this isn't the point of
- // the test.
- <-writeErr
-}
-
-func TestCaseSensitiveMethod(t *testing.T) {
- ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
- if r.Method != "get" {
- t.Errorf(`Got method %q; want "get"`, r.Method)
- }
- }))
- defer ts.Close()
- req, _ := NewRequest("get", ts.URL, nil)
- res, err := DefaultClient.Do(req)
- if err != nil {
- t.Error(err)
- return
- }
- res.Body.Close()
-}
-
-// TestContentLengthZero tests that for both an HTTP/1.0 and HTTP/1.1
-// request (both keep-alive), when a Handler never writes any
-// response, the net/http package adds a "Content-Length: 0" response
-// header.
-func TestContentLengthZero(t *testing.T) {
- ts := httptest.NewServer(HandlerFunc(func(rw ResponseWriter, req *Request) {}))
- defer ts.Close()
-
- for _, version := range []string{"HTTP/1.0", "HTTP/1.1"} {
- conn, err := net.Dial("tcp", ts.Listener.Addr().String())
- if err != nil {
- t.Fatalf("error dialing: %v", err)
- }
- _, err = fmt.Fprintf(conn, "GET / %v\r\nConnection: keep-alive\r\nHost: foo\r\n\r\n", version)
- if err != nil {
- t.Fatalf("error writing: %v", err)
- }
- req, _ := NewRequest("GET", "/", nil)
- res, err := ReadResponse(bufio.NewReader(conn), req)
- if err != nil {
- t.Fatalf("error reading response: %v", err)
- }
- if te := res.TransferEncoding; len(te) > 0 {
- t.Errorf("For version %q, Transfer-Encoding = %q; want none", version, te)
- }
- if cl := res.ContentLength; cl != 0 {
- t.Errorf("For version %q, Content-Length = %v; want 0", version, cl)
- }
- conn.Close()
- }
-}
-
-func TestCloseNotifier(t *testing.T) {
- gotReq := make(chan bool, 1)
- sawClose := make(chan bool, 1)
- ts := httptest.NewServer(HandlerFunc(func(rw ResponseWriter, req *Request) {
- gotReq <- true
- cc := rw.(CloseNotifier).CloseNotify()
- <-cc
- sawClose <- true
- }))
- conn, err := net.Dial("tcp", ts.Listener.Addr().String())
- if err != nil {
- t.Fatalf("error dialing: %v", err)
- }
- diec := make(chan bool)
- go func() {
- _, err = fmt.Fprintf(conn, "GET / HTTP/1.1\r\nConnection: keep-alive\r\nHost: foo\r\n\r\n")
- if err != nil {
- t.Fatal(err)
- }
- <-diec
- conn.Close()
- }()
-For:
- for {
- select {
- case <-gotReq:
- diec <- true
- case <-sawClose:
- break For
- case <-time.After(5 * time.Second):
- t.Fatal("timeout")
- }
- }
- ts.Close()
-}
-
-func TestOptions(t *testing.T) {
- uric := make(chan string, 2) // only expect 1, but leave space for 2
- mux := NewServeMux()
- mux.HandleFunc("/", func(w ResponseWriter, r *Request) {
- uric <- r.RequestURI
- })
- ts := httptest.NewServer(mux)
- defer ts.Close()
-
- conn, err := net.Dial("tcp", ts.Listener.Addr().String())
- if err != nil {
- t.Fatal(err)
- }
- defer conn.Close()
-
- // An OPTIONS * request should succeed.
- _, err = conn.Write([]byte("OPTIONS * HTTP/1.1\r\nHost: foo.com\r\n\r\n"))
- if err != nil {
- t.Fatal(err)
- }
- br := bufio.NewReader(conn)
- res, err := ReadResponse(br, &Request{Method: "OPTIONS"})
- if err != nil {
- t.Fatal(err)
- }
- if res.StatusCode != 200 {
- t.Errorf("Got non-200 response to OPTIONS *: %#v", res)
- }
-
- // A GET * request on a ServeMux should fail.
- _, err = conn.Write([]byte("GET * HTTP/1.1\r\nHost: foo.com\r\n\r\n"))
- if err != nil {
- t.Fatal(err)
- }
- res, err = ReadResponse(br, &Request{Method: "GET"})
- if err != nil {
- t.Fatal(err)
- }
- if res.StatusCode != 400 {
- t.Errorf("Got non-400 response to GET *: %#v", res)
- }
-
- res, err = Get(ts.URL + "/second")
- if err != nil {
- t.Fatal(err)
- }
- res.Body.Close()
- if got := <-uric; got != "/second" {
- t.Errorf("Handler saw request for %q; want /second", got)
- }
-}
-
-// goTimeout runs f, failing t if f takes more than ns to complete.
-func goTimeout(t *testing.T, d time.Duration, f func()) {
- ch := make(chan bool, 2)
- timer := time.AfterFunc(d, func() {
- t.Errorf("Timeout expired after %v", d)
- ch <- true
- })
- defer timer.Stop()
- go func() {
- defer func() { ch <- true }()
- f()
- }()
- <-ch
-}
-
-type errorListener struct {
- errs []error
-}
-
-func (l *errorListener) Accept() (c net.Conn, err error) {
- if len(l.errs) == 0 {
- return nil, io.EOF
- }
- err = l.errs[0]
- l.errs = l.errs[1:]
- return
-}
-
-func (l *errorListener) Close() error {
- return nil
-}
-
-func (l *errorListener) Addr() net.Addr {
- return dummyAddr("test-address")
-}
-
-func TestAcceptMaxFds(t *testing.T) {
- log.SetOutput(ioutil.Discard) // is noisy otherwise
- defer log.SetOutput(os.Stderr)
-
- ln := &errorListener{[]error{
- &net.OpError{
- Op: "accept",
- Err: syscall.EMFILE,
- }}}
- err := Serve(ln, HandlerFunc(HandlerFunc(func(ResponseWriter, *Request) {})))
- if err != io.EOF {
- t.Errorf("got error %v, want EOF", err)
- }
-}
-
-func BenchmarkClientServer(b *testing.B) {
- b.StopTimer()
- ts := httptest.NewServer(HandlerFunc(func(rw ResponseWriter, r *Request) {
- fmt.Fprintf(rw, "Hello world.\n")
- }))
- defer ts.Close()
- b.StartTimer()
-
- for i := 0; i < b.N; i++ {
- res, err := Get(ts.URL)
- if err != nil {
- b.Fatal("Get:", err)
- }
- all, err := ioutil.ReadAll(res.Body)
- if err != nil {
- b.Fatal("ReadAll:", err)
- }
- body := string(all)
- if body != "Hello world.\n" {
- b.Fatal("Got body:", body)
- }
- }
-
- b.StopTimer()
-}
-
-func BenchmarkClientServerParallel4(b *testing.B) {
- benchmarkClientServerParallel(b, 4)
-}
-
-func BenchmarkClientServerParallel64(b *testing.B) {
- benchmarkClientServerParallel(b, 64)
-}
-
-func benchmarkClientServerParallel(b *testing.B, conc int) {
- b.StopTimer()
- ts := httptest.NewServer(HandlerFunc(func(rw ResponseWriter, r *Request) {
- fmt.Fprintf(rw, "Hello world.\n")
- }))
- defer ts.Close()
- b.StartTimer()
-
- numProcs := runtime.GOMAXPROCS(-1) * conc
- var wg sync.WaitGroup
- wg.Add(numProcs)
- n := int32(b.N)
- for p := 0; p < numProcs; p++ {
- go func() {
- for atomic.AddInt32(&n, -1) >= 0 {
- res, err := Get(ts.URL)
- if err != nil {
- b.Logf("Get: %v", err)
- continue
- }
- all, err := ioutil.ReadAll(res.Body)
- if err != nil {
- b.Logf("ReadAll: %v", err)
- continue
- }
- body := string(all)
- if body != "Hello world.\n" {
- panic("Got body: " + body)
- }
- }
- wg.Done()
- }()
- }
- wg.Wait()
-}
-
-// A benchmark for profiling the server without the HTTP client code.
-// The client code runs in a subprocess.
-//
-// For use like:
-// $ go test -c
-// $ ./http.test -test.run=XX -test.bench=BenchmarkServer -test.benchtime=15s -test.cpuprofile=http.prof
-// $ go tool pprof http.test http.prof
-// (pprof) web
-func BenchmarkServer(b *testing.B) {
- // Child process mode;
- if url := os.Getenv("TEST_BENCH_SERVER_URL"); url != "" {
- n, err := strconv.Atoi(os.Getenv("TEST_BENCH_CLIENT_N"))
- if err != nil {
- panic(err)
- }
- for i := 0; i < n; i++ {
- res, err := Get(url)
- if err != nil {
- log.Panicf("Get: %v", err)
- }
- all, err := ioutil.ReadAll(res.Body)
- if err != nil {
- log.Panicf("ReadAll: %v", err)
- }
- body := string(all)
- if body != "Hello world.\n" {
- log.Panicf("Got body: %q", body)
- }
- }
- os.Exit(0)
- return
- }
-
- var res = []byte("Hello world.\n")
- b.StopTimer()
- ts := httptest.NewServer(HandlerFunc(func(rw ResponseWriter, r *Request) {
- rw.Header().Set("Content-Type", "text/html; charset=utf-8")
- rw.Write(res)
- }))
- defer ts.Close()
- b.StartTimer()
-
- cmd := exec.Command(os.Args[0], "-test.run=XXXX", "-test.bench=BenchmarkServer")
- cmd.Env = append([]string{
- fmt.Sprintf("TEST_BENCH_CLIENT_N=%d", b.N),
- fmt.Sprintf("TEST_BENCH_SERVER_URL=%s", ts.URL),
- }, os.Environ()...)
- out, err := cmd.CombinedOutput()
- if err != nil {
- b.Errorf("Test failure: %v, with output: %s", err, out)
- }
-}
diff --git a/gcc-4.8.1/libgo/go/net/http/server.go b/gcc-4.8.1/libgo/go/net/http/server.go
deleted file mode 100644
index 434943d49..000000000
--- a/gcc-4.8.1/libgo/go/net/http/server.go
+++ /dev/null
@@ -1,1537 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// HTTP server. See RFC 2616.
-
-// TODO(rsc):
-// logging
-
-package http
-
-import (
- "bufio"
- "crypto/tls"
- "errors"
- "fmt"
- "io"
- "io/ioutil"
- "log"
- "net"
- "net/url"
- "path"
- "runtime"
- "strconv"
- "strings"
- "sync"
- "time"
-)
-
-// Errors introduced by the HTTP server.
-var (
- ErrWriteAfterFlush = errors.New("Conn.Write called after Flush")
- ErrBodyNotAllowed = errors.New("http: request method or response status code does not allow body")
- ErrHijacked = errors.New("Conn has been hijacked")
- ErrContentLength = errors.New("Conn.Write wrote more than the declared Content-Length")
-)
-
-// Objects implementing the Handler interface can be
-// registered to serve a particular path or subtree
-// in the HTTP server.
-//
-// ServeHTTP should write reply headers and data to the ResponseWriter
-// and then return. Returning signals that the request is finished
-// and that the HTTP server can move on to the next request on
-// the connection.
-type Handler interface {
- ServeHTTP(ResponseWriter, *Request)
-}
-
-// A ResponseWriter interface is used by an HTTP handler to
-// construct an HTTP response.
-type ResponseWriter interface {
- // Header returns the header map that will be sent by WriteHeader.
- // Changing the header after a call to WriteHeader (or Write) has
- // no effect.
- Header() Header
-
- // Write writes the data to the connection as part of an HTTP reply.
- // If WriteHeader has not yet been called, Write calls WriteHeader(http.StatusOK)
- // before writing the data. If the Header does not contain a
- // Content-Type line, Write adds a Content-Type set to the result of passing
- // the initial 512 bytes of written data to DetectContentType.
- Write([]byte) (int, error)
-
- // WriteHeader sends an HTTP response header with status code.
- // If WriteHeader is not called explicitly, the first call to Write
- // will trigger an implicit WriteHeader(http.StatusOK).
- // Thus explicit calls to WriteHeader are mainly used to
- // send error codes.
- WriteHeader(int)
-}
-
-// The Flusher interface is implemented by ResponseWriters that allow
-// an HTTP handler to flush buffered data to the client.
-//
-// Note that even for ResponseWriters that support Flush,
-// if the client is connected through an HTTP proxy,
-// the buffered data may not reach the client until the response
-// completes.
-type Flusher interface {
- // Flush sends any buffered data to the client.
- Flush()
-}
-
-// The Hijacker interface is implemented by ResponseWriters that allow
-// an HTTP handler to take over the connection.
-type Hijacker interface {
- // Hijack lets the caller take over the connection.
- // After a call to Hijack(), the HTTP server library
- // will not do anything else with the connection.
- // It becomes the caller's responsibility to manage
- // and close the connection.
- Hijack() (net.Conn, *bufio.ReadWriter, error)
-}
-
-// The CloseNotifier interface is implemented by ResponseWriters which
-// allow detecting when the underlying connection has gone away.
-//
-// This mechanism can be used to cancel long operations on the server
-// if the client has disconnected before the response is ready.
-type CloseNotifier interface {
- // CloseNotify returns a channel that receives a single value
- // when the client connection has gone away.
- CloseNotify() <-chan bool
-}
-
-// A conn represents the server side of an HTTP connection.
-type conn struct {
- remoteAddr string // network address of remote side
- server *Server // the Server on which the connection arrived
- rwc net.Conn // i/o connection
- sr switchReader // where the LimitReader reads from; usually the rwc
- lr *io.LimitedReader // io.LimitReader(sr)
- buf *bufio.ReadWriter // buffered(lr,rwc), reading from bufio->limitReader->sr->rwc
- tlsState *tls.ConnectionState // or nil when not using TLS
-
- mu sync.Mutex // guards the following
- clientGone bool // if client has disconnected mid-request
- closeNotifyc chan bool // made lazily
- hijackedv bool // connection has been hijacked by handler
-}
-
-func (c *conn) hijacked() bool {
- c.mu.Lock()
- defer c.mu.Unlock()
- return c.hijackedv
-}
-
-func (c *conn) hijack() (rwc net.Conn, buf *bufio.ReadWriter, err error) {
- c.mu.Lock()
- defer c.mu.Unlock()
- if c.hijackedv {
- return nil, nil, ErrHijacked
- }
- if c.closeNotifyc != nil {
- return nil, nil, errors.New("http: Hijack is incompatible with use of CloseNotifier")
- }
- c.hijackedv = true
- rwc = c.rwc
- buf = c.buf
- c.rwc = nil
- c.buf = nil
- return
-}
-
-func (c *conn) closeNotify() <-chan bool {
- c.mu.Lock()
- defer c.mu.Unlock()
- if c.closeNotifyc == nil {
- c.closeNotifyc = make(chan bool)
- if c.hijackedv {
- // to obey the function signature, even though
- // it'll never receive a value.
- return c.closeNotifyc
- }
- pr, pw := io.Pipe()
-
- readSource := c.sr.r
- c.sr.Lock()
- c.sr.r = pr
- c.sr.Unlock()
- go func() {
- _, err := io.Copy(pw, readSource)
- if err == nil {
- err = io.EOF
- }
- pw.CloseWithError(err)
- c.noteClientGone()
- }()
- }
- return c.closeNotifyc
-}
-
-func (c *conn) noteClientGone() {
- c.mu.Lock()
- defer c.mu.Unlock()
- if c.closeNotifyc != nil && !c.clientGone {
- c.closeNotifyc <- true
- }
- c.clientGone = true
-}
-
-type switchReader struct {
- sync.Mutex
- r io.Reader
-}
-
-func (sr *switchReader) Read(p []byte) (n int, err error) {
- sr.Lock()
- r := sr.r
- sr.Unlock()
- return r.Read(p)
-}
-
-// This should be >= 512 bytes for DetectContentType,
-// but otherwise it's somewhat arbitrary.
-const bufferBeforeChunkingSize = 2048
-
-// chunkWriter writes to a response's conn buffer, and is the writer
-// wrapped by the response.bufw buffered writer.
-//
-// chunkWriter also is responsible for finalizing the Header, including
-// conditionally setting the Content-Type and setting a Content-Length
-// in cases where the handler's final output is smaller than the buffer
-// size. It also conditionally adds chunk headers, when in chunking mode.
-//
-// See the comment above (*response).Write for the entire write flow.
-type chunkWriter struct {
- res *response
- header Header // a deep copy of r.Header, once WriteHeader is called
- wroteHeader bool // whether the header's been sent
-
- // set by the writeHeader method:
- chunking bool // using chunked transfer encoding for reply body
-}
-
-var crlf = []byte("\r\n")
-
-func (cw *chunkWriter) Write(p []byte) (n int, err error) {
- if !cw.wroteHeader {
- cw.writeHeader(p)
- }
- if cw.chunking {
- _, err = fmt.Fprintf(cw.res.conn.buf, "%x\r\n", len(p))
- if err != nil {
- return
- }
- }
- n, err = cw.res.conn.buf.Write(p)
- if cw.chunking && err == nil {
- _, err = cw.res.conn.buf.Write(crlf)
- }
- return
-}
-
-func (cw *chunkWriter) flush() {
- if !cw.wroteHeader {
- cw.writeHeader(nil)
- }
- cw.res.conn.buf.Flush()
-}
-
-func (cw *chunkWriter) close() {
- if !cw.wroteHeader {
- cw.writeHeader(nil)
- }
- if cw.chunking {
- // zero EOF chunk, trailer key/value pairs (currently
- // unsupported in Go's server), followed by a blank
- // line.
- io.WriteString(cw.res.conn.buf, "0\r\n\r\n")
- }
-}
-
-// A response represents the server side of an HTTP response.
-type response struct {
- conn *conn
- req *Request // request for this response
- wroteHeader bool // reply header has been (logically) written
- wroteContinue bool // 100 Continue response was written
-
- w *bufio.Writer // buffers output in chunks to chunkWriter
- cw *chunkWriter
-
- // handlerHeader is the Header that Handlers get access to,
- // which may be retained and mutated even after WriteHeader.
- // handlerHeader is copied into cw.header at WriteHeader
- // time, and privately mutated thereafter.
- handlerHeader Header
-
- written int64 // number of bytes written in body
- contentLength int64 // explicitly-declared Content-Length; or -1
- status int // status code passed to WriteHeader
-
- // close connection after this reply. set on request and
- // updated after response from handler if there's a
- // "Connection: keep-alive" response header and a
- // Content-Length.
- closeAfterReply bool
-
- // requestBodyLimitHit is set by requestTooLarge when
- // maxBytesReader hits its max size. It is checked in
- // WriteHeader, to make sure we don't consume the
- // remaining request body to try to advance to the next HTTP
- // request. Instead, when this is set, we stop reading
- // subsequent requests on this connection and stop reading
- // input from it.
- requestBodyLimitHit bool
-
- handlerDone bool // set true when the handler exits
-}
-
-// requestTooLarge is called by maxBytesReader when too much input has
-// been read from the client.
-func (w *response) requestTooLarge() {
- w.closeAfterReply = true
- w.requestBodyLimitHit = true
- if !w.wroteHeader {
- w.Header().Set("Connection", "close")
- }
-}
-
-// needsSniff returns whether a Content-Type still needs to be sniffed.
-func (w *response) needsSniff() bool {
- return !w.cw.wroteHeader && w.handlerHeader.Get("Content-Type") == "" && w.written < sniffLen
-}
-
-type writerOnly struct {
- io.Writer
-}
-
-func (w *response) ReadFrom(src io.Reader) (n int64, err error) {
- if !w.wroteHeader {
- w.WriteHeader(StatusOK)
- }
-
- if w.needsSniff() {
- n0, err := io.Copy(writerOnly{w}, io.LimitReader(src, sniffLen))
- n += n0
- if err != nil {
- return n, err
- }
- }
-
- w.w.Flush() // get rid of any previous writes
- w.cw.flush() // make sure Header is written; flush data to rwc
-
- // Now that cw has been flushed, its chunking field is guaranteed initialized.
- if !w.cw.chunking && w.bodyAllowed() {
- if rf, ok := w.conn.rwc.(io.ReaderFrom); ok {
- n0, err := rf.ReadFrom(src)
- n += n0
- w.written += n0
- return n, err
- }
- }
-
- // Fall back to default io.Copy implementation.
- // Use wrapper to hide w.ReadFrom from io.Copy.
- n0, err := io.Copy(writerOnly{w}, src)
- n += n0
- return n, err
-}
-
-// noLimit is an effective infinite upper bound for io.LimitedReader
-const noLimit int64 = (1 << 63) - 1
-
-// debugServerConnections controls whether all server connections are wrapped
-// with a verbose logging wrapper.
-const debugServerConnections = false
-
-// Create new connection from rwc.
-func (srv *Server) newConn(rwc net.Conn) (c *conn, err error) {
- c = new(conn)
- c.remoteAddr = rwc.RemoteAddr().String()
- c.server = srv
- c.rwc = rwc
- if debugServerConnections {
- c.rwc = newLoggingConn("server", c.rwc)
- }
- c.sr = switchReader{r: c.rwc}
- c.lr = io.LimitReader(&c.sr, noLimit).(*io.LimitedReader)
- br := bufio.NewReader(c.lr)
- bw := bufio.NewWriter(c.rwc)
- c.buf = bufio.NewReadWriter(br, bw)
- return c, nil
-}
-
-// DefaultMaxHeaderBytes is the maximum permitted size of the headers
-// in an HTTP request.
-// This can be overridden by setting Server.MaxHeaderBytes.
-const DefaultMaxHeaderBytes = 1 << 20 // 1 MB
-
-func (srv *Server) maxHeaderBytes() int {
- if srv.MaxHeaderBytes > 0 {
- return srv.MaxHeaderBytes
- }
- return DefaultMaxHeaderBytes
-}
-
-// wrapper around io.ReaderCloser which on first read, sends an
-// HTTP/1.1 100 Continue header
-type expectContinueReader struct {
- resp *response
- readCloser io.ReadCloser
- closed bool
-}
-
-func (ecr *expectContinueReader) Read(p []byte) (n int, err error) {
- if ecr.closed {
- return 0, ErrBodyReadAfterClose
- }
- if !ecr.resp.wroteContinue && !ecr.resp.conn.hijacked() {
- ecr.resp.wroteContinue = true
- io.WriteString(ecr.resp.conn.buf, "HTTP/1.1 100 Continue\r\n\r\n")
- ecr.resp.conn.buf.Flush()
- }
- return ecr.readCloser.Read(p)
-}
-
-func (ecr *expectContinueReader) Close() error {
- ecr.closed = true
- return ecr.readCloser.Close()
-}
-
-// TimeFormat is the time format to use with
-// time.Parse and time.Time.Format when parsing
-// or generating times in HTTP headers.
-// It is like time.RFC1123 but hard codes GMT as the time zone.
-const TimeFormat = "Mon, 02 Jan 2006 15:04:05 GMT"
-
-var errTooLarge = errors.New("http: request too large")
-
-// Read next request from connection.
-func (c *conn) readRequest() (w *response, err error) {
- if c.hijacked() {
- return nil, ErrHijacked
- }
- c.lr.N = int64(c.server.maxHeaderBytes()) + 4096 /* bufio slop */
- var req *Request
- if req, err = ReadRequest(c.buf.Reader); err != nil {
- if c.lr.N == 0 {
- return nil, errTooLarge
- }
- return nil, err
- }
- c.lr.N = noLimit
-
- req.RemoteAddr = c.remoteAddr
- req.TLS = c.tlsState
-
- w = &response{
- conn: c,
- req: req,
- handlerHeader: make(Header),
- contentLength: -1,
- cw: new(chunkWriter),
- }
- w.cw.res = w
- w.w = bufio.NewWriterSize(w.cw, bufferBeforeChunkingSize)
- return w, nil
-}
-
-func (w *response) Header() Header {
- return w.handlerHeader
-}
-
-// maxPostHandlerReadBytes is the max number of Request.Body bytes not
-// consumed by a handler that the server will read from the client
-// in order to keep a connection alive. If there are more bytes than
-// this then the server to be paranoid instead sends a "Connection:
-// close" response.
-//
-// This number is approximately what a typical machine's TCP buffer
-// size is anyway. (if we have the bytes on the machine, we might as
-// well read them)
-const maxPostHandlerReadBytes = 256 << 10
-
-func (w *response) WriteHeader(code int) {
- if w.conn.hijacked() {
- log.Print("http: response.WriteHeader on hijacked connection")
- return
- }
- if w.wroteHeader {
- log.Print("http: multiple response.WriteHeader calls")
- return
- }
- w.wroteHeader = true
- w.status = code
-
- w.cw.header = w.handlerHeader.clone()
-
- if cl := w.cw.header.get("Content-Length"); cl != "" {
- v, err := strconv.ParseInt(cl, 10, 64)
- if err == nil && v >= 0 {
- w.contentLength = v
- } else {
- log.Printf("http: invalid Content-Length of %q", cl)
- w.cw.header.Del("Content-Length")
- }
- }
-}
-
-// writeHeader finalizes the header sent to the client and writes it
-// to cw.res.conn.buf.
-//
-// p is not written by writeHeader, but is the first chunk of the body
-// that will be written. It is sniffed for a Content-Type if none is
-// set explicitly. It's also used to set the Content-Length, if the
-// total body size was small and the handler has already finished
-// running.
-func (cw *chunkWriter) writeHeader(p []byte) {
- if cw.wroteHeader {
- return
- }
- cw.wroteHeader = true
-
- w := cw.res
- code := w.status
- done := w.handlerDone
-
- // If the handler is done but never sent a Content-Length
- // response header and this is our first (and last) write, set
- // it, even to zero. This helps HTTP/1.0 clients keep their
- // "keep-alive" connections alive.
- if done && cw.header.get("Content-Length") == "" && w.req.Method != "HEAD" {
- w.contentLength = int64(len(p))
- cw.header.Set("Content-Length", strconv.Itoa(len(p)))
- }
-
- // If this was an HTTP/1.0 request with keep-alive and we sent a
- // Content-Length back, we can make this a keep-alive response ...
- if w.req.wantsHttp10KeepAlive() {
- sentLength := cw.header.get("Content-Length") != ""
- if sentLength && cw.header.get("Connection") == "keep-alive" {
- w.closeAfterReply = false
- }
- }
-
- // Check for a explicit (and valid) Content-Length header.
- hasCL := w.contentLength != -1
-
- if w.req.wantsHttp10KeepAlive() && (w.req.Method == "HEAD" || hasCL) {
- _, connectionHeaderSet := cw.header["Connection"]
- if !connectionHeaderSet {
- cw.header.Set("Connection", "keep-alive")
- }
- } else if !w.req.ProtoAtLeast(1, 1) || w.req.wantsClose() {
- w.closeAfterReply = true
- }
-
- if cw.header.get("Connection") == "close" {
- w.closeAfterReply = true
- }
-
- // Per RFC 2616, we should consume the request body before
- // replying, if the handler hasn't already done so. But we
- // don't want to do an unbounded amount of reading here for
- // DoS reasons, so we only try up to a threshold.
- if w.req.ContentLength != 0 && !w.closeAfterReply {
- ecr, isExpecter := w.req.Body.(*expectContinueReader)
- if !isExpecter || ecr.resp.wroteContinue {
- n, _ := io.CopyN(ioutil.Discard, w.req.Body, maxPostHandlerReadBytes+1)
- if n >= maxPostHandlerReadBytes {
- w.requestTooLarge()
- cw.header.Set("Connection", "close")
- } else {
- w.req.Body.Close()
- }
- }
- }
-
- if code == StatusNotModified {
- // Must not have body.
- for _, header := range []string{"Content-Type", "Content-Length", "Transfer-Encoding"} {
- // RFC 2616 section 10.3.5: "the response MUST NOT include other entity-headers"
- if cw.header.get(header) != "" {
- cw.header.Del(header)
- }
- }
- } else {
- // If no content type, apply sniffing algorithm to body.
- if cw.header.get("Content-Type") == "" && w.req.Method != "HEAD" {
- cw.header.Set("Content-Type", DetectContentType(p))
- }
- }
-
- if _, ok := cw.header["Date"]; !ok {
- cw.header.Set("Date", time.Now().UTC().Format(TimeFormat))
- }
-
- te := cw.header.get("Transfer-Encoding")
- hasTE := te != ""
- if hasCL && hasTE && te != "identity" {
- // TODO: return an error if WriteHeader gets a return parameter
- // For now just ignore the Content-Length.
- log.Printf("http: WriteHeader called with both Transfer-Encoding of %q and a Content-Length of %d",
- te, w.contentLength)
- cw.header.Del("Content-Length")
- hasCL = false
- }
-
- if w.req.Method == "HEAD" || code == StatusNotModified {
- // do nothing
- } else if code == StatusNoContent {
- cw.header.Del("Transfer-Encoding")
- } else if hasCL {
- cw.header.Del("Transfer-Encoding")
- } else if w.req.ProtoAtLeast(1, 1) {
- // HTTP/1.1 or greater: use chunked transfer encoding
- // to avoid closing the connection at EOF.
- // TODO: this blows away any custom or stacked Transfer-Encoding they
- // might have set. Deal with that as need arises once we have a valid
- // use case.
- cw.chunking = true
- cw.header.Set("Transfer-Encoding", "chunked")
- } else {
- // HTTP version < 1.1: cannot do chunked transfer
- // encoding and we don't know the Content-Length so
- // signal EOF by closing connection.
- w.closeAfterReply = true
- cw.header.Del("Transfer-Encoding") // in case already set
- }
-
- // Cannot use Content-Length with non-identity Transfer-Encoding.
- if cw.chunking {
- cw.header.Del("Content-Length")
- }
- if !w.req.ProtoAtLeast(1, 0) {
- return
- }
-
- if w.closeAfterReply && !hasToken(cw.header.get("Connection"), "close") {
- cw.header.Set("Connection", "close")
- }
-
- proto := "HTTP/1.0"
- if w.req.ProtoAtLeast(1, 1) {
- proto = "HTTP/1.1"
- }
- codestring := strconv.Itoa(code)
- text, ok := statusText[code]
- if !ok {
- text = "status code " + codestring
- }
- io.WriteString(w.conn.buf, proto+" "+codestring+" "+text+"\r\n")
- cw.header.Write(w.conn.buf)
- w.conn.buf.Write(crlf)
-}
-
-// bodyAllowed returns true if a Write is allowed for this response type.
-// It's illegal to call this before the header has been flushed.
-func (w *response) bodyAllowed() bool {
- if !w.wroteHeader {
- panic("")
- }
- return w.status != StatusNotModified && w.req.Method != "HEAD"
-}
-
-// The Life Of A Write is like this:
-//
-// Handler starts. No header has been sent. The handler can either
-// write a header, or just start writing. Writing before sending a header
-// sends an implicity empty 200 OK header.
-//
-// If the handler didn't declare a Content-Length up front, we either
-// go into chunking mode or, if the handler finishes running before
-// the chunking buffer size, we compute a Content-Length and send that
-// in the header instead.
-//
-// Likewise, if the handler didn't set a Content-Type, we sniff that
-// from the initial chunk of output.
-//
-// The Writers are wired together like:
-//
-// 1. *response (the ResponseWriter) ->
-// 2. (*response).w, a *bufio.Writer of bufferBeforeChunkingSize bytes
-// 3. chunkWriter.Writer (whose writeHeader finalizes Content-Length/Type)
-// and which writes the chunk headers, if needed.
-// 4. conn.buf, a bufio.Writer of default (4kB) bytes
-// 5. the rwc, the net.Conn.
-//
-// TODO(bradfitz): short-circuit some of the buffering when the
-// initial header contains both a Content-Type and Content-Length.
-// Also short-circuit in (1) when the header's been sent and not in
-// chunking mode, writing directly to (4) instead, if (2) has no
-// buffered data. More generally, we could short-circuit from (1) to
-// (3) even in chunking mode if the write size from (1) is over some
-// threshold and nothing is in (2). The answer might be mostly making
-// bufferBeforeChunkingSize smaller and having bufio's fast-paths deal
-// with this instead.
-func (w *response) Write(data []byte) (n int, err error) {
- if w.conn.hijacked() {
- log.Print("http: response.Write on hijacked connection")
- return 0, ErrHijacked
- }
- if !w.wroteHeader {
- w.WriteHeader(StatusOK)
- }
- if len(data) == 0 {
- return 0, nil
- }
- if !w.bodyAllowed() {
- return 0, ErrBodyNotAllowed
- }
-
- w.written += int64(len(data)) // ignoring errors, for errorKludge
- if w.contentLength != -1 && w.written > w.contentLength {
- return 0, ErrContentLength
- }
- return w.w.Write(data)
-}
-
-func (w *response) finishRequest() {
- w.handlerDone = true
-
- if !w.wroteHeader {
- w.WriteHeader(StatusOK)
- }
-
- w.w.Flush()
- w.cw.close()
- w.conn.buf.Flush()
-
- // Close the body, unless we're about to close the whole TCP connection
- // anyway.
- if !w.closeAfterReply {
- w.req.Body.Close()
- }
- if w.req.MultipartForm != nil {
- w.req.MultipartForm.RemoveAll()
- }
-
- if w.contentLength != -1 && w.bodyAllowed() && w.contentLength != w.written {
- // Did not write enough. Avoid getting out of sync.
- w.closeAfterReply = true
- }
-}
-
-func (w *response) Flush() {
- if !w.wroteHeader {
- w.WriteHeader(StatusOK)
- }
- w.w.Flush()
- w.cw.flush()
-}
-
-func (c *conn) finalFlush() {
- if c.buf != nil {
- c.buf.Flush()
- c.buf = nil
- }
-}
-
-// Close the connection.
-func (c *conn) close() {
- c.finalFlush()
- if c.rwc != nil {
- c.rwc.Close()
- c.rwc = nil
- }
-}
-
-// rstAvoidanceDelay is the amount of time we sleep after closing the
-// write side of a TCP connection before closing the entire socket.
-// By sleeping, we increase the chances that the client sees our FIN
-// and processes its final data before they process the subsequent RST
-// from closing a connection with known unread data.
-// This RST seems to occur mostly on BSD systems. (And Windows?)
-// This timeout is somewhat arbitrary (~latency around the planet).
-const rstAvoidanceDelay = 500 * time.Millisecond
-
-// closeWrite flushes any outstanding data and sends a FIN packet (if
-// client is connected via TCP), signalling that we're done. We then
-// pause for a bit, hoping the client processes it before `any
-// subsequent RST.
-//
-// See http://golang.org/issue/3595
-func (c *conn) closeWriteAndWait() {
- c.finalFlush()
- if tcp, ok := c.rwc.(*net.TCPConn); ok {
- tcp.CloseWrite()
- }
- time.Sleep(rstAvoidanceDelay)
-}
-
-// Serve a new connection.
-func (c *conn) serve() {
- defer func() {
- if err := recover(); err != nil {
- const size = 4096
- buf := make([]byte, size)
- buf = buf[:runtime.Stack(buf, false)]
- log.Printf("http: panic serving %v: %v\n%s", c.remoteAddr, err, buf)
- }
- if !c.hijacked() {
- c.close()
- }
- }()
-
- if tlsConn, ok := c.rwc.(*tls.Conn); ok {
- if err := tlsConn.Handshake(); err != nil {
- return
- }
- c.tlsState = new(tls.ConnectionState)
- *c.tlsState = tlsConn.ConnectionState()
- }
-
- for {
- w, err := c.readRequest()
- if err != nil {
- if err == errTooLarge {
- // Their HTTP client may or may not be
- // able to read this if we're
- // responding to them and hanging up
- // while they're still writing their
- // request. Undefined behavior.
- io.WriteString(c.rwc, "HTTP/1.1 413 Request Entity Too Large\r\n\r\n")
- c.closeWriteAndWait()
- break
- } else if err == io.EOF {
- break // Don't reply
- } else if neterr, ok := err.(net.Error); ok && neterr.Timeout() {
- break // Don't reply
- }
- io.WriteString(c.rwc, "HTTP/1.1 400 Bad Request\r\n\r\n")
- break
- }
-
- // Expect 100 Continue support
- req := w.req
- if req.expectsContinue() {
- if req.ProtoAtLeast(1, 1) {
- // Wrap the Body reader with one that replies on the connection
- req.Body = &expectContinueReader{readCloser: req.Body, resp: w}
- }
- if req.ContentLength == 0 {
- w.Header().Set("Connection", "close")
- w.WriteHeader(StatusBadRequest)
- w.finishRequest()
- break
- }
- req.Header.Del("Expect")
- } else if req.Header.get("Expect") != "" {
- w.sendExpectationFailed()
- break
- }
-
- handler := c.server.Handler
- if handler == nil {
- handler = DefaultServeMux
- }
- if req.RequestURI == "*" && req.Method == "OPTIONS" {
- handler = globalOptionsHandler{}
- }
-
- // HTTP cannot have multiple simultaneous active requests.[*]
- // Until the server replies to this request, it can't read another,
- // so we might as well run the handler in this goroutine.
- // [*] Not strictly true: HTTP pipelining. We could let them all process
- // in parallel even if their responses need to be serialized.
- handler.ServeHTTP(w, w.req)
- if c.hijacked() {
- return
- }
- w.finishRequest()
- if w.closeAfterReply {
- if w.requestBodyLimitHit {
- c.closeWriteAndWait()
- }
- break
- }
- }
-}
-
-func (w *response) sendExpectationFailed() {
- // TODO(bradfitz): let ServeHTTP handlers handle
- // requests with non-standard expectation[s]? Seems
- // theoretical at best, and doesn't fit into the
- // current ServeHTTP model anyway. We'd need to
- // make the ResponseWriter an optional
- // "ExpectReplier" interface or something.
- //
- // For now we'll just obey RFC 2616 14.20 which says
- // "If a server receives a request containing an
- // Expect field that includes an expectation-
- // extension that it does not support, it MUST
- // respond with a 417 (Expectation Failed) status."
- w.Header().Set("Connection", "close")
- w.WriteHeader(StatusExpectationFailed)
- w.finishRequest()
-}
-
-// Hijack implements the Hijacker.Hijack method. Our response is both a ResponseWriter
-// and a Hijacker.
-func (w *response) Hijack() (rwc net.Conn, buf *bufio.ReadWriter, err error) {
- if w.wroteHeader {
- w.cw.flush()
- }
- return w.conn.hijack()
-}
-
-func (w *response) CloseNotify() <-chan bool {
- return w.conn.closeNotify()
-}
-
-// The HandlerFunc type is an adapter to allow the use of
-// ordinary functions as HTTP handlers. If f is a function
-// with the appropriate signature, HandlerFunc(f) is a
-// Handler object that calls f.
-type HandlerFunc func(ResponseWriter, *Request)
-
-// ServeHTTP calls f(w, r).
-func (f HandlerFunc) ServeHTTP(w ResponseWriter, r *Request) {
- f(w, r)
-}
-
-// Helper handlers
-
-// Error replies to the request with the specified error message and HTTP code.
-func Error(w ResponseWriter, error string, code int) {
- w.Header().Set("Content-Type", "text/plain; charset=utf-8")
- w.WriteHeader(code)
- fmt.Fprintln(w, error)
-}
-
-// NotFound replies to the request with an HTTP 404 not found error.
-func NotFound(w ResponseWriter, r *Request) { Error(w, "404 page not found", StatusNotFound) }
-
-// NotFoundHandler returns a simple request handler
-// that replies to each request with a ``404 page not found'' reply.
-func NotFoundHandler() Handler { return HandlerFunc(NotFound) }
-
-// StripPrefix returns a handler that serves HTTP requests
-// by removing the given prefix from the request URL's Path
-// and invoking the handler h. StripPrefix handles a
-// request for a path that doesn't begin with prefix by
-// replying with an HTTP 404 not found error.
-func StripPrefix(prefix string, h Handler) Handler {
- return HandlerFunc(func(w ResponseWriter, r *Request) {
- if !strings.HasPrefix(r.URL.Path, prefix) {
- NotFound(w, r)
- return
- }
- r.URL.Path = r.URL.Path[len(prefix):]
- h.ServeHTTP(w, r)
- })
-}
-
-// Redirect replies to the request with a redirect to url,
-// which may be a path relative to the request path.
-func Redirect(w ResponseWriter, r *Request, urlStr string, code int) {
- if u, err := url.Parse(urlStr); err == nil {
- // If url was relative, make absolute by
- // combining with request path.
- // The browser would probably do this for us,
- // but doing it ourselves is more reliable.
-
- // NOTE(rsc): RFC 2616 says that the Location
- // line must be an absolute URI, like
- // "http://www.google.com/redirect/",
- // not a path like "/redirect/".
- // Unfortunately, we don't know what to
- // put in the host name section to get the
- // client to connect to us again, so we can't
- // know the right absolute URI to send back.
- // Because of this problem, no one pays attention
- // to the RFC; they all send back just a new path.
- // So do we.
- oldpath := r.URL.Path
- if oldpath == "" { // should not happen, but avoid a crash if it does
- oldpath = "/"
- }
- if u.Scheme == "" {
- // no leading http://server
- if urlStr == "" || urlStr[0] != '/' {
- // make relative path absolute
- olddir, _ := path.Split(oldpath)
- urlStr = olddir + urlStr
- }
-
- var query string
- if i := strings.Index(urlStr, "?"); i != -1 {
- urlStr, query = urlStr[:i], urlStr[i:]
- }
-
- // clean up but preserve trailing slash
- trailing := urlStr[len(urlStr)-1] == '/'
- urlStr = path.Clean(urlStr)
- if trailing && urlStr[len(urlStr)-1] != '/' {
- urlStr += "/"
- }
- urlStr += query
- }
- }
-
- w.Header().Set("Location", urlStr)
- w.WriteHeader(code)
-
- // RFC2616 recommends that a short note "SHOULD" be included in the
- // response because older user agents may not understand 301/307.
- // Shouldn't send the response for POST or HEAD; that leaves GET.
- if r.Method == "GET" {
- note := "<a href=\"" + htmlEscape(urlStr) + "\">" + statusText[code] + "</a>.\n"
- fmt.Fprintln(w, note)
- }
-}
-
-var htmlReplacer = strings.NewReplacer(
- "&", "&amp;",
- "<", "&lt;",
- ">", "&gt;",
- // "&#34;" is shorter than "&quot;".
- `"`, "&#34;",
- // "&#39;" is shorter than "&apos;" and apos was not in HTML until HTML5.
- "'", "&#39;",
-)
-
-func htmlEscape(s string) string {
- return htmlReplacer.Replace(s)
-}
-
-// Redirect to a fixed URL
-type redirectHandler struct {
- url string
- code int
-}
-
-func (rh *redirectHandler) ServeHTTP(w ResponseWriter, r *Request) {
- Redirect(w, r, rh.url, rh.code)
-}
-
-// RedirectHandler returns a request handler that redirects
-// each request it receives to the given url using the given
-// status code.
-func RedirectHandler(url string, code int) Handler {
- return &redirectHandler{url, code}
-}
-
-// ServeMux is an HTTP request multiplexer.
-// It matches the URL of each incoming request against a list of registered
-// patterns and calls the handler for the pattern that
-// most closely matches the URL.
-//
-// Patterns name fixed, rooted paths, like "/favicon.ico",
-// or rooted subtrees, like "/images/" (note the trailing slash).
-// Longer patterns take precedence over shorter ones, so that
-// if there are handlers registered for both "/images/"
-// and "/images/thumbnails/", the latter handler will be
-// called for paths beginning "/images/thumbnails/" and the
-// former will receive requests for any other paths in the
-// "/images/" subtree.
-//
-// Patterns may optionally begin with a host name, restricting matches to
-// URLs on that host only. Host-specific patterns take precedence over
-// general patterns, so that a handler might register for the two patterns
-// "/codesearch" and "codesearch.google.com/" without also taking over
-// requests for "http://www.google.com/".
-//
-// ServeMux also takes care of sanitizing the URL request path,
-// redirecting any request containing . or .. elements to an
-// equivalent .- and ..-free URL.
-type ServeMux struct {
- mu sync.RWMutex
- m map[string]muxEntry
- hosts bool // whether any patterns contain hostnames
-}
-
-type muxEntry struct {
- explicit bool
- h Handler
- pattern string
-}
-
-// NewServeMux allocates and returns a new ServeMux.
-func NewServeMux() *ServeMux { return &ServeMux{m: make(map[string]muxEntry)} }
-
-// DefaultServeMux is the default ServeMux used by Serve.
-var DefaultServeMux = NewServeMux()
-
-// Does path match pattern?
-func pathMatch(pattern, path string) bool {
- if len(pattern) == 0 {
- // should not happen
- return false
- }
- n := len(pattern)
- if pattern[n-1] != '/' {
- return pattern == path
- }
- return len(path) >= n && path[0:n] == pattern
-}
-
-// Return the canonical path for p, eliminating . and .. elements.
-func cleanPath(p string) string {
- if p == "" {
- return "/"
- }
- if p[0] != '/' {
- p = "/" + p
- }
- np := path.Clean(p)
- // path.Clean removes trailing slash except for root;
- // put the trailing slash back if necessary.
- if p[len(p)-1] == '/' && np != "/" {
- np += "/"
- }
- return np
-}
-
-// Find a handler on a handler map given a path string
-// Most-specific (longest) pattern wins
-func (mux *ServeMux) match(path string) (h Handler, pattern string) {
- var n = 0
- for k, v := range mux.m {
- if !pathMatch(k, path) {
- continue
- }
- if h == nil || len(k) > n {
- n = len(k)
- h = v.h
- pattern = v.pattern
- }
- }
- return
-}
-
-// Handler returns the handler to use for the given request,
-// consulting r.Method, r.Host, and r.URL.Path. It always returns
-// a non-nil handler. If the path is not in its canonical form, the
-// handler will be an internally-generated handler that redirects
-// to the canonical path.
-//
-// Handler also returns the registered pattern that matches the
-// request or, in the case of internally-generated redirects,
-// the pattern that will match after following the redirect.
-//
-// If there is no registered handler that applies to the request,
-// Handler returns a ``page not found'' handler and an empty pattern.
-func (mux *ServeMux) Handler(r *Request) (h Handler, pattern string) {
- if r.Method != "CONNECT" {
- if p := cleanPath(r.URL.Path); p != r.URL.Path {
- _, pattern = mux.handler(r.Host, p)
- return RedirectHandler(p, StatusMovedPermanently), pattern
- }
- }
-
- return mux.handler(r.Host, r.URL.Path)
-}
-
-// handler is the main implementation of Handler.
-// The path is known to be in canonical form, except for CONNECT methods.
-func (mux *ServeMux) handler(host, path string) (h Handler, pattern string) {
- mux.mu.RLock()
- defer mux.mu.RUnlock()
-
- // Host-specific pattern takes precedence over generic ones
- if mux.hosts {
- h, pattern = mux.match(host + path)
- }
- if h == nil {
- h, pattern = mux.match(path)
- }
- if h == nil {
- h, pattern = NotFoundHandler(), ""
- }
- return
-}
-
-// ServeHTTP dispatches the request to the handler whose
-// pattern most closely matches the request URL.
-func (mux *ServeMux) ServeHTTP(w ResponseWriter, r *Request) {
- if r.RequestURI == "*" {
- w.Header().Set("Connection", "close")
- w.WriteHeader(StatusBadRequest)
- return
- }
- h, _ := mux.Handler(r)
- h.ServeHTTP(w, r)
-}
-
-// Handle registers the handler for the given pattern.
-// If a handler already exists for pattern, Handle panics.
-func (mux *ServeMux) Handle(pattern string, handler Handler) {
- mux.mu.Lock()
- defer mux.mu.Unlock()
-
- if pattern == "" {
- panic("http: invalid pattern " + pattern)
- }
- if handler == nil {
- panic("http: nil handler")
- }
- if mux.m[pattern].explicit {
- panic("http: multiple registrations for " + pattern)
- }
-
- mux.m[pattern] = muxEntry{explicit: true, h: handler, pattern: pattern}
-
- if pattern[0] != '/' {
- mux.hosts = true
- }
-
- // Helpful behavior:
- // If pattern is /tree/, insert an implicit permanent redirect for /tree.
- // It can be overridden by an explicit registration.
- n := len(pattern)
- if n > 0 && pattern[n-1] == '/' && !mux.m[pattern[0:n-1]].explicit {
- // If pattern contains a host name, strip it and use remaining
- // path for redirect.
- path := pattern
- if pattern[0] != '/' {
- // In pattern, at least the last character is a '/', so
- // strings.Index can't be -1.
- path = pattern[strings.Index(pattern, "/"):]
- }
- mux.m[pattern[0:n-1]] = muxEntry{h: RedirectHandler(path, StatusMovedPermanently), pattern: pattern}
- }
-}
-
-// HandleFunc registers the handler function for the given pattern.
-func (mux *ServeMux) HandleFunc(pattern string, handler func(ResponseWriter, *Request)) {
- mux.Handle(pattern, HandlerFunc(handler))
-}
-
-// Handle registers the handler for the given pattern
-// in the DefaultServeMux.
-// The documentation for ServeMux explains how patterns are matched.
-func Handle(pattern string, handler Handler) { DefaultServeMux.Handle(pattern, handler) }
-
-// HandleFunc registers the handler function for the given pattern
-// in the DefaultServeMux.
-// The documentation for ServeMux explains how patterns are matched.
-func HandleFunc(pattern string, handler func(ResponseWriter, *Request)) {
- DefaultServeMux.HandleFunc(pattern, handler)
-}
-
-// Serve accepts incoming HTTP connections on the listener l,
-// creating a new service goroutine for each. The service goroutines
-// read requests and then call handler to reply to them.
-// Handler is typically nil, in which case the DefaultServeMux is used.
-func Serve(l net.Listener, handler Handler) error {
- srv := &Server{Handler: handler}
- return srv.Serve(l)
-}
-
-// A Server defines parameters for running an HTTP server.
-type Server struct {
- Addr string // TCP address to listen on, ":http" if empty
- Handler Handler // handler to invoke, http.DefaultServeMux if nil
- ReadTimeout time.Duration // maximum duration before timing out read of the request
- WriteTimeout time.Duration // maximum duration before timing out write of the response
- MaxHeaderBytes int // maximum size of request headers, DefaultMaxHeaderBytes if 0
- TLSConfig *tls.Config // optional TLS config, used by ListenAndServeTLS
-}
-
-// ListenAndServe listens on the TCP network address srv.Addr and then
-// calls Serve to handle requests on incoming connections. If
-// srv.Addr is blank, ":http" is used.
-func (srv *Server) ListenAndServe() error {
- addr := srv.Addr
- if addr == "" {
- addr = ":http"
- }
- l, e := net.Listen("tcp", addr)
- if e != nil {
- return e
- }
- return srv.Serve(l)
-}
-
-// Serve accepts incoming connections on the Listener l, creating a
-// new service goroutine for each. The service goroutines read requests and
-// then call srv.Handler to reply to them.
-func (srv *Server) Serve(l net.Listener) error {
- defer l.Close()
- var tempDelay time.Duration // how long to sleep on accept failure
- for {
- rw, e := l.Accept()
- if e != nil {
- if ne, ok := e.(net.Error); ok && ne.Temporary() {
- if tempDelay == 0 {
- tempDelay = 5 * time.Millisecond
- } else {
- tempDelay *= 2
- }
- if max := 1 * time.Second; tempDelay > max {
- tempDelay = max
- }
- log.Printf("http: Accept error: %v; retrying in %v", e, tempDelay)
- time.Sleep(tempDelay)
- continue
- }
- return e
- }
- tempDelay = 0
- if srv.ReadTimeout != 0 {
- rw.SetReadDeadline(time.Now().Add(srv.ReadTimeout))
- }
- if srv.WriteTimeout != 0 {
- rw.SetWriteDeadline(time.Now().Add(srv.WriteTimeout))
- }
- c, err := srv.newConn(rw)
- if err != nil {
- continue
- }
- go c.serve()
- }
- panic("not reached")
-}
-
-// ListenAndServe listens on the TCP network address addr
-// and then calls Serve with handler to handle requests
-// on incoming connections. Handler is typically nil,
-// in which case the DefaultServeMux is used.
-//
-// A trivial example server is:
-//
-// package main
-//
-// import (
-// "io"
-// "net/http"
-// "log"
-// )
-//
-// // hello world, the web server
-// func HelloServer(w http.ResponseWriter, req *http.Request) {
-// io.WriteString(w, "hello, world!\n")
-// }
-//
-// func main() {
-// http.HandleFunc("/hello", HelloServer)
-// err := http.ListenAndServe(":12345", nil)
-// if err != nil {
-// log.Fatal("ListenAndServe: ", err)
-// }
-// }
-func ListenAndServe(addr string, handler Handler) error {
- server := &Server{Addr: addr, Handler: handler}
- return server.ListenAndServe()
-}
-
-// ListenAndServeTLS acts identically to ListenAndServe, except that it
-// expects HTTPS connections. Additionally, files containing a certificate and
-// matching private key for the server must be provided. If the certificate
-// is signed by a certificate authority, the certFile should be the concatenation
-// of the server's certificate followed by the CA's certificate.
-//
-// A trivial example server is:
-//
-// import (
-// "log"
-// "net/http"
-// )
-//
-// func handler(w http.ResponseWriter, req *http.Request) {
-// w.Header().Set("Content-Type", "text/plain")
-// w.Write([]byte("This is an example server.\n"))
-// }
-//
-// func main() {
-// http.HandleFunc("/", handler)
-// log.Printf("About to listen on 10443. Go to https://127.0.0.1:10443/")
-// err := http.ListenAndServeTLS(":10443", "cert.pem", "key.pem", nil)
-// if err != nil {
-// log.Fatal(err)
-// }
-// }
-//
-// One can use generate_cert.go in crypto/tls to generate cert.pem and key.pem.
-func ListenAndServeTLS(addr string, certFile string, keyFile string, handler Handler) error {
- server := &Server{Addr: addr, Handler: handler}
- return server.ListenAndServeTLS(certFile, keyFile)
-}
-
-// ListenAndServeTLS listens on the TCP network address srv.Addr and
-// then calls Serve to handle requests on incoming TLS connections.
-//
-// Filenames containing a certificate and matching private key for
-// the server must be provided. If the certificate is signed by a
-// certificate authority, the certFile should be the concatenation
-// of the server's certificate followed by the CA's certificate.
-//
-// If srv.Addr is blank, ":https" is used.
-func (srv *Server) ListenAndServeTLS(certFile, keyFile string) error {
- addr := srv.Addr
- if addr == "" {
- addr = ":https"
- }
- config := &tls.Config{}
- if srv.TLSConfig != nil {
- *config = *srv.TLSConfig
- }
- if config.NextProtos == nil {
- config.NextProtos = []string{"http/1.1"}
- }
-
- var err error
- config.Certificates = make([]tls.Certificate, 1)
- config.Certificates[0], err = tls.LoadX509KeyPair(certFile, keyFile)
- if err != nil {
- return err
- }
-
- conn, err := net.Listen("tcp", addr)
- if err != nil {
- return err
- }
-
- tlsListener := tls.NewListener(conn, config)
- return srv.Serve(tlsListener)
-}
-
-// TimeoutHandler returns a Handler that runs h with the given time limit.
-//
-// The new Handler calls h.ServeHTTP to handle each request, but if a
-// call runs for longer than its time limit, the handler responds with
-// a 503 Service Unavailable error and the given message in its body.
-// (If msg is empty, a suitable default message will be sent.)
-// After such a timeout, writes by h to its ResponseWriter will return
-// ErrHandlerTimeout.
-func TimeoutHandler(h Handler, dt time.Duration, msg string) Handler {
- f := func() <-chan time.Time {
- return time.After(dt)
- }
- return &timeoutHandler{h, f, msg}
-}
-
-// ErrHandlerTimeout is returned on ResponseWriter Write calls
-// in handlers which have timed out.
-var ErrHandlerTimeout = errors.New("http: Handler timeout")
-
-type timeoutHandler struct {
- handler Handler
- timeout func() <-chan time.Time // returns channel producing a timeout
- body string
-}
-
-func (h *timeoutHandler) errorBody() string {
- if h.body != "" {
- return h.body
- }
- return "<html><head><title>Timeout</title></head><body><h1>Timeout</h1></body></html>"
-}
-
-func (h *timeoutHandler) ServeHTTP(w ResponseWriter, r *Request) {
- done := make(chan bool)
- tw := &timeoutWriter{w: w}
- go func() {
- h.handler.ServeHTTP(tw, r)
- done <- true
- }()
- select {
- case <-done:
- return
- case <-h.timeout():
- tw.mu.Lock()
- defer tw.mu.Unlock()
- if !tw.wroteHeader {
- tw.w.WriteHeader(StatusServiceUnavailable)
- tw.w.Write([]byte(h.errorBody()))
- }
- tw.timedOut = true
- }
-}
-
-type timeoutWriter struct {
- w ResponseWriter
-
- mu sync.Mutex
- timedOut bool
- wroteHeader bool
-}
-
-func (tw *timeoutWriter) Header() Header {
- return tw.w.Header()
-}
-
-func (tw *timeoutWriter) Write(p []byte) (int, error) {
- tw.mu.Lock()
- timedOut := tw.timedOut
- tw.mu.Unlock()
- if timedOut {
- return 0, ErrHandlerTimeout
- }
- return tw.w.Write(p)
-}
-
-func (tw *timeoutWriter) WriteHeader(code int) {
- tw.mu.Lock()
- if tw.timedOut || tw.wroteHeader {
- tw.mu.Unlock()
- return
- }
- tw.wroteHeader = true
- tw.mu.Unlock()
- tw.w.WriteHeader(code)
-}
-
-// globalOptionsHandler responds to "OPTIONS *" requests.
-type globalOptionsHandler struct{}
-
-func (globalOptionsHandler) ServeHTTP(w ResponseWriter, r *Request) {
- w.Header().Set("Content-Length", "0")
- if r.ContentLength != 0 {
- // Read up to 4KB of OPTIONS body (as mentioned in the
- // spec as being reserved for future use), but anything
- // over that is considered a waste of server resources
- // (or an attack) and we abort and close the connection,
- // courtesy of MaxBytesReader's EOF behavior.
- mb := MaxBytesReader(w, r.Body, 4<<10)
- io.Copy(ioutil.Discard, mb)
- }
-}
-
-// loggingConn is used for debugging.
-type loggingConn struct {
- name string
- net.Conn
-}
-
-var (
- uniqNameMu sync.Mutex
- uniqNameNext = make(map[string]int)
-)
-
-func newLoggingConn(baseName string, c net.Conn) net.Conn {
- uniqNameMu.Lock()
- defer uniqNameMu.Unlock()
- uniqNameNext[baseName]++
- return &loggingConn{
- name: fmt.Sprintf("%s-%d", baseName, uniqNameNext[baseName]),
- Conn: c,
- }
-}
-
-func (c *loggingConn) Write(p []byte) (n int, err error) {
- log.Printf("%s.Write(%d) = ....", c.name, len(p))
- n, err = c.Conn.Write(p)
- log.Printf("%s.Write(%d) = %d, %v", c.name, len(p), n, err)
- return
-}
-
-func (c *loggingConn) Read(p []byte) (n int, err error) {
- log.Printf("%s.Read(%d) = ....", c.name, len(p))
- n, err = c.Conn.Read(p)
- log.Printf("%s.Read(%d) = %d, %v", c.name, len(p), n, err)
- return
-}
-
-func (c *loggingConn) Close() (err error) {
- log.Printf("%s.Close() = ...", c.name)
- err = c.Conn.Close()
- log.Printf("%s.Close() = %v", c.name, err)
- return
-}
diff --git a/gcc-4.8.1/libgo/go/net/http/server_test.go b/gcc-4.8.1/libgo/go/net/http/server_test.go
deleted file mode 100644
index 8b4e8c6d6..000000000
--- a/gcc-4.8.1/libgo/go/net/http/server_test.go
+++ /dev/null
@@ -1,95 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package http
-
-import (
- "net/url"
- "testing"
-)
-
-var serveMuxRegister = []struct {
- pattern string
- h Handler
-}{
- {"/dir/", serve(200)},
- {"/search", serve(201)},
- {"codesearch.google.com/search", serve(202)},
- {"codesearch.google.com/", serve(203)},
-}
-
-// serve returns a handler that sends a response with the given code.
-func serve(code int) HandlerFunc {
- return func(w ResponseWriter, r *Request) {
- w.WriteHeader(code)
- }
-}
-
-var serveMuxTests = []struct {
- method string
- host string
- path string
- code int
- pattern string
-}{
- {"GET", "google.com", "/", 404, ""},
- {"GET", "google.com", "/dir", 301, "/dir/"},
- {"GET", "google.com", "/dir/", 200, "/dir/"},
- {"GET", "google.com", "/dir/file", 200, "/dir/"},
- {"GET", "google.com", "/search", 201, "/search"},
- {"GET", "google.com", "/search/", 404, ""},
- {"GET", "google.com", "/search/foo", 404, ""},
- {"GET", "codesearch.google.com", "/search", 202, "codesearch.google.com/search"},
- {"GET", "codesearch.google.com", "/search/", 203, "codesearch.google.com/"},
- {"GET", "codesearch.google.com", "/search/foo", 203, "codesearch.google.com/"},
- {"GET", "codesearch.google.com", "/", 203, "codesearch.google.com/"},
- {"GET", "images.google.com", "/search", 201, "/search"},
- {"GET", "images.google.com", "/search/", 404, ""},
- {"GET", "images.google.com", "/search/foo", 404, ""},
- {"GET", "google.com", "/../search", 301, "/search"},
- {"GET", "google.com", "/dir/..", 301, ""},
- {"GET", "google.com", "/dir/..", 301, ""},
- {"GET", "google.com", "/dir/./file", 301, "/dir/"},
-
- // The /foo -> /foo/ redirect applies to CONNECT requests
- // but the path canonicalization does not.
- {"CONNECT", "google.com", "/dir", 301, "/dir/"},
- {"CONNECT", "google.com", "/../search", 404, ""},
- {"CONNECT", "google.com", "/dir/..", 200, "/dir/"},
- {"CONNECT", "google.com", "/dir/..", 200, "/dir/"},
- {"CONNECT", "google.com", "/dir/./file", 200, "/dir/"},
-}
-
-func TestServeMuxHandler(t *testing.T) {
- mux := NewServeMux()
- for _, e := range serveMuxRegister {
- mux.Handle(e.pattern, e.h)
- }
-
- for _, tt := range serveMuxTests {
- r := &Request{
- Method: tt.method,
- Host: tt.host,
- URL: &url.URL{
- Path: tt.path,
- },
- }
- h, pattern := mux.Handler(r)
- cs := &codeSaver{h: Header{}}
- h.ServeHTTP(cs, r)
- if pattern != tt.pattern || cs.code != tt.code {
- t.Errorf("%s %s %s = %d, %q, want %d, %q", tt.method, tt.host, tt.path, cs.code, pattern, tt.code, tt.pattern)
- }
- }
-}
-
-// A codeSaver is a ResponseWriter that saves the code passed to WriteHeader.
-type codeSaver struct {
- h Header
- code int
-}
-
-func (cs *codeSaver) Header() Header { return cs.h }
-func (cs *codeSaver) Write(p []byte) (int, error) { return len(p), nil }
-func (cs *codeSaver) WriteHeader(code int) { cs.code = code }
diff --git a/gcc-4.8.1/libgo/go/net/http/sniff.go b/gcc-4.8.1/libgo/go/net/http/sniff.go
deleted file mode 100644
index 68f519b05..000000000
--- a/gcc-4.8.1/libgo/go/net/http/sniff.go
+++ /dev/null
@@ -1,214 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package http
-
-import (
- "bytes"
- "encoding/binary"
-)
-
-// The algorithm uses at most sniffLen bytes to make its decision.
-const sniffLen = 512
-
-// DetectContentType implements the algorithm described
-// at http://mimesniff.spec.whatwg.org/ to determine the
-// Content-Type of the given data. It considers at most the
-// first 512 bytes of data. DetectContentType always returns
-// a valid MIME type: if it cannot determine a more specific one, it
-// returns "application/octet-stream".
-func DetectContentType(data []byte) string {
- if len(data) > sniffLen {
- data = data[:sniffLen]
- }
-
- // Index of the first non-whitespace byte in data.
- firstNonWS := 0
- for ; firstNonWS < len(data) && isWS(data[firstNonWS]); firstNonWS++ {
- }
-
- for _, sig := range sniffSignatures {
- if ct := sig.match(data, firstNonWS); ct != "" {
- return ct
- }
- }
-
- return "application/octet-stream" // fallback
-}
-
-func isWS(b byte) bool {
- return bytes.IndexByte([]byte("\t\n\x0C\r "), b) != -1
-}
-
-type sniffSig interface {
- // match returns the MIME type of the data, or "" if unknown.
- match(data []byte, firstNonWS int) string
-}
-
-// Data matching the table in section 6.
-var sniffSignatures = []sniffSig{
- htmlSig("<!DOCTYPE HTML"),
- htmlSig("<HTML"),
- htmlSig("<HEAD"),
- htmlSig("<SCRIPT"),
- htmlSig("<IFRAME"),
- htmlSig("<H1"),
- htmlSig("<DIV"),
- htmlSig("<FONT"),
- htmlSig("<TABLE"),
- htmlSig("<A"),
- htmlSig("<STYLE"),
- htmlSig("<TITLE"),
- htmlSig("<B"),
- htmlSig("<BODY"),
- htmlSig("<BR"),
- htmlSig("<P"),
- htmlSig("<!--"),
-
- &maskedSig{mask: []byte("\xFF\xFF\xFF\xFF\xFF"), pat: []byte("<?xml"), skipWS: true, ct: "text/xml; charset=utf-8"},
-
- &exactSig{[]byte("%PDF-"), "application/pdf"},
- &exactSig{[]byte("%!PS-Adobe-"), "application/postscript"},
-
- // UTF BOMs.
- &maskedSig{mask: []byte("\xFF\xFF\x00\x00"), pat: []byte("\xFE\xFF\x00\x00"), ct: "text/plain; charset=utf-16be"},
- &maskedSig{mask: []byte("\xFF\xFF\x00\x00"), pat: []byte("\xFF\xFE\x00\x00"), ct: "text/plain; charset=utf-16le"},
- &maskedSig{mask: []byte("\xFF\xFF\xFF\x00"), pat: []byte("\xEF\xBB\xBF\x00"), ct: "text/plain; charset=utf-8"},
-
- &exactSig{[]byte("GIF87a"), "image/gif"},
- &exactSig{[]byte("GIF89a"), "image/gif"},
- &exactSig{[]byte("\x89\x50\x4E\x47\x0D\x0A\x1A\x0A"), "image/png"},
- &exactSig{[]byte("\xFF\xD8\xFF"), "image/jpeg"},
- &exactSig{[]byte("BM"), "image/bmp"},
- &maskedSig{
- mask: []byte("\xFF\xFF\xFF\xFF\x00\x00\x00\x00\xFF\xFF\xFF\xFF\xFF\xFF"),
- pat: []byte("RIFF\x00\x00\x00\x00WEBPVP"),
- ct: "image/webp",
- },
- &exactSig{[]byte("\x00\x00\x01\x00"), "image/vnd.microsoft.icon"},
- &exactSig{[]byte("\x4F\x67\x67\x53\x00"), "application/ogg"},
- &maskedSig{
- mask: []byte("\xFF\xFF\xFF\xFF\x00\x00\x00\x00\xFF\xFF\xFF\xFF"),
- pat: []byte("RIFF\x00\x00\x00\x00WAVE"),
- ct: "audio/wave",
- },
- &exactSig{[]byte("\x1A\x45\xDF\xA3"), "video/webm"},
- &exactSig{[]byte("\x52\x61\x72\x20\x1A\x07\x00"), "application/x-rar-compressed"},
- &exactSig{[]byte("\x50\x4B\x03\x04"), "application/zip"},
- &exactSig{[]byte("\x1F\x8B\x08"), "application/x-gzip"},
-
- // TODO(dsymonds): Re-enable this when the spec is sorted w.r.t. MP4.
- //mp4Sig(0),
-
- textSig(0), // should be last
-}
-
-type exactSig struct {
- sig []byte
- ct string
-}
-
-func (e *exactSig) match(data []byte, firstNonWS int) string {
- if bytes.HasPrefix(data, e.sig) {
- return e.ct
- }
- return ""
-}
-
-type maskedSig struct {
- mask, pat []byte
- skipWS bool
- ct string
-}
-
-func (m *maskedSig) match(data []byte, firstNonWS int) string {
- if m.skipWS {
- data = data[firstNonWS:]
- }
- if len(data) < len(m.mask) {
- return ""
- }
- for i, mask := range m.mask {
- db := data[i] & mask
- if db != m.pat[i] {
- return ""
- }
- }
- return m.ct
-}
-
-type htmlSig []byte
-
-func (h htmlSig) match(data []byte, firstNonWS int) string {
- data = data[firstNonWS:]
- if len(data) < len(h)+1 {
- return ""
- }
- for i, b := range h {
- db := data[i]
- if 'A' <= b && b <= 'Z' {
- db &= 0xDF
- }
- if b != db {
- return ""
- }
- }
- // Next byte must be space or right angle bracket.
- if db := data[len(h)]; db != ' ' && db != '>' {
- return ""
- }
- return "text/html; charset=utf-8"
-}
-
-type mp4Sig int
-
-func (mp4Sig) match(data []byte, firstNonWS int) string {
- // c.f. section 6.1.
- if len(data) < 8 {
- return ""
- }
- boxSize := int(binary.BigEndian.Uint32(data[:4]))
- if boxSize%4 != 0 || len(data) < boxSize {
- return ""
- }
- if !bytes.Equal(data[4:8], []byte("ftyp")) {
- return ""
- }
- for st := 8; st < boxSize; st += 4 {
- if st == 12 {
- // minor version number
- continue
- }
- seg := string(data[st : st+3])
- switch seg {
- case "mp4", "iso", "M4V", "M4P", "M4B":
- return "video/mp4"
- /* The remainder are not in the spec.
- case "M4A":
- return "audio/mp4"
- case "3gp":
- return "video/3gpp"
- case "jp2":
- return "image/jp2" // JPEG 2000
- */
- }
- }
- return ""
-}
-
-type textSig int
-
-func (textSig) match(data []byte, firstNonWS int) string {
- // c.f. section 5, step 4.
- for _, b := range data[firstNonWS:] {
- switch {
- case 0x00 <= b && b <= 0x08,
- b == 0x0B,
- 0x0E <= b && b <= 0x1A,
- 0x1C <= b && b <= 0x1F:
- return ""
- }
- }
- return "text/plain; charset=utf-8"
-}
diff --git a/gcc-4.8.1/libgo/go/net/http/sniff_test.go b/gcc-4.8.1/libgo/go/net/http/sniff_test.go
deleted file mode 100644
index 8ab72ac23..000000000
--- a/gcc-4.8.1/libgo/go/net/http/sniff_test.go
+++ /dev/null
@@ -1,138 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package http_test
-
-import (
- "bytes"
- "fmt"
- "io"
- "io/ioutil"
- "log"
- . "net/http"
- "net/http/httptest"
- "strconv"
- "strings"
- "testing"
-)
-
-var sniffTests = []struct {
- desc string
- data []byte
- contentType string
-}{
- // Some nonsense.
- {"Empty", []byte{}, "text/plain; charset=utf-8"},
- {"Binary", []byte{1, 2, 3}, "application/octet-stream"},
-
- {"HTML document #1", []byte(`<HtMl><bOdY>blah blah blah</body></html>`), "text/html; charset=utf-8"},
- {"HTML document #2", []byte(`<HTML></HTML>`), "text/html; charset=utf-8"},
- {"HTML document #3 (leading whitespace)", []byte(` <!DOCTYPE HTML>...`), "text/html; charset=utf-8"},
- {"HTML document #4 (leading CRLF)", []byte("\r\n<html>..."), "text/html; charset=utf-8"},
-
- {"Plain text", []byte(`This is not HTML. It has โ˜ƒ though.`), "text/plain; charset=utf-8"},
-
- {"XML", []byte("\n<?xml!"), "text/xml; charset=utf-8"},
-
- // Image types.
- {"GIF 87a", []byte(`GIF87a`), "image/gif"},
- {"GIF 89a", []byte(`GIF89a...`), "image/gif"},
-
- // TODO(dsymonds): Re-enable this when the spec is sorted w.r.t. MP4.
- //{"MP4 video", []byte("\x00\x00\x00\x18ftypmp42\x00\x00\x00\x00mp42isom<\x06t\xbfmdat"), "video/mp4"},
- //{"MP4 audio", []byte("\x00\x00\x00\x20ftypM4A \x00\x00\x00\x00M4A mp42isom\x00\x00\x00\x00"), "audio/mp4"},
-}
-
-func TestDetectContentType(t *testing.T) {
- for _, tt := range sniffTests {
- ct := DetectContentType(tt.data)
- if ct != tt.contentType {
- t.Errorf("%v: DetectContentType = %q, want %q", tt.desc, ct, tt.contentType)
- }
- }
-}
-
-func TestServerContentType(t *testing.T) {
- ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
- i, _ := strconv.Atoi(r.FormValue("i"))
- tt := sniffTests[i]
- n, err := w.Write(tt.data)
- if n != len(tt.data) || err != nil {
- log.Fatalf("%v: Write(%q) = %v, %v want %d, nil", tt.desc, tt.data, n, err, len(tt.data))
- }
- }))
- defer ts.Close()
-
- for i, tt := range sniffTests {
- resp, err := Get(ts.URL + "/?i=" + strconv.Itoa(i))
- if err != nil {
- t.Errorf("%v: %v", tt.desc, err)
- continue
- }
- if ct := resp.Header.Get("Content-Type"); ct != tt.contentType {
- t.Errorf("%v: Content-Type = %q, want %q", tt.desc, ct, tt.contentType)
- }
- data, err := ioutil.ReadAll(resp.Body)
- if err != nil {
- t.Errorf("%v: reading body: %v", tt.desc, err)
- } else if !bytes.Equal(data, tt.data) {
- t.Errorf("%v: data is %q, want %q", tt.desc, data, tt.data)
- }
- resp.Body.Close()
- }
-}
-
-func TestContentTypeWithCopy(t *testing.T) {
- const (
- input = "\n<html>\n\t<head>\n"
- expected = "text/html; charset=utf-8"
- )
-
- ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
- // Use io.Copy from a bytes.Buffer to trigger ReadFrom.
- buf := bytes.NewBuffer([]byte(input))
- n, err := io.Copy(w, buf)
- if int(n) != len(input) || err != nil {
- t.Errorf("io.Copy(w, %q) = %v, %v want %d, nil", input, n, err, len(input))
- }
- }))
- defer ts.Close()
-
- resp, err := Get(ts.URL)
- if err != nil {
- t.Fatalf("Get: %v", err)
- }
- if ct := resp.Header.Get("Content-Type"); ct != expected {
- t.Errorf("Content-Type = %q, want %q", ct, expected)
- }
- data, err := ioutil.ReadAll(resp.Body)
- if err != nil {
- t.Errorf("reading body: %v", err)
- } else if !bytes.Equal(data, []byte(input)) {
- t.Errorf("data is %q, want %q", data, input)
- }
- resp.Body.Close()
-}
-
-func TestSniffWriteSize(t *testing.T) {
- ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
- size, _ := strconv.Atoi(r.FormValue("size"))
- written, err := io.WriteString(w, strings.Repeat("a", size))
- if err != nil {
- t.Errorf("write of %d bytes: %v", size, err)
- return
- }
- if written != size {
- t.Errorf("write of %d bytes wrote %d bytes", size, written)
- }
- }))
- defer ts.Close()
- for _, size := range []int{0, 1, 200, 600, 999, 1000, 1023, 1024, 512 << 10, 1 << 20} {
- res, err := Get(fmt.Sprintf("%s/?size=%d", ts.URL, size))
- if err != nil {
- t.Fatalf("size %d: %v", size, err)
- }
- res.Body.Close()
- }
-}
diff --git a/gcc-4.8.1/libgo/go/net/http/status.go b/gcc-4.8.1/libgo/go/net/http/status.go
deleted file mode 100644
index 5af0b77c4..000000000
--- a/gcc-4.8.1/libgo/go/net/http/status.go
+++ /dev/null
@@ -1,108 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package http
-
-// HTTP status codes, defined in RFC 2616.
-const (
- StatusContinue = 100
- StatusSwitchingProtocols = 101
-
- StatusOK = 200
- StatusCreated = 201
- StatusAccepted = 202
- StatusNonAuthoritativeInfo = 203
- StatusNoContent = 204
- StatusResetContent = 205
- StatusPartialContent = 206
-
- StatusMultipleChoices = 300
- StatusMovedPermanently = 301
- StatusFound = 302
- StatusSeeOther = 303
- StatusNotModified = 304
- StatusUseProxy = 305
- StatusTemporaryRedirect = 307
-
- StatusBadRequest = 400
- StatusUnauthorized = 401
- StatusPaymentRequired = 402
- StatusForbidden = 403
- StatusNotFound = 404
- StatusMethodNotAllowed = 405
- StatusNotAcceptable = 406
- StatusProxyAuthRequired = 407
- StatusRequestTimeout = 408
- StatusConflict = 409
- StatusGone = 410
- StatusLengthRequired = 411
- StatusPreconditionFailed = 412
- StatusRequestEntityTooLarge = 413
- StatusRequestURITooLong = 414
- StatusUnsupportedMediaType = 415
- StatusRequestedRangeNotSatisfiable = 416
- StatusExpectationFailed = 417
- StatusTeapot = 418
-
- StatusInternalServerError = 500
- StatusNotImplemented = 501
- StatusBadGateway = 502
- StatusServiceUnavailable = 503
- StatusGatewayTimeout = 504
- StatusHTTPVersionNotSupported = 505
-)
-
-var statusText = map[int]string{
- StatusContinue: "Continue",
- StatusSwitchingProtocols: "Switching Protocols",
-
- StatusOK: "OK",
- StatusCreated: "Created",
- StatusAccepted: "Accepted",
- StatusNonAuthoritativeInfo: "Non-Authoritative Information",
- StatusNoContent: "No Content",
- StatusResetContent: "Reset Content",
- StatusPartialContent: "Partial Content",
-
- StatusMultipleChoices: "Multiple Choices",
- StatusMovedPermanently: "Moved Permanently",
- StatusFound: "Found",
- StatusSeeOther: "See Other",
- StatusNotModified: "Not Modified",
- StatusUseProxy: "Use Proxy",
- StatusTemporaryRedirect: "Temporary Redirect",
-
- StatusBadRequest: "Bad Request",
- StatusUnauthorized: "Unauthorized",
- StatusPaymentRequired: "Payment Required",
- StatusForbidden: "Forbidden",
- StatusNotFound: "Not Found",
- StatusMethodNotAllowed: "Method Not Allowed",
- StatusNotAcceptable: "Not Acceptable",
- StatusProxyAuthRequired: "Proxy Authentication Required",
- StatusRequestTimeout: "Request Timeout",
- StatusConflict: "Conflict",
- StatusGone: "Gone",
- StatusLengthRequired: "Length Required",
- StatusPreconditionFailed: "Precondition Failed",
- StatusRequestEntityTooLarge: "Request Entity Too Large",
- StatusRequestURITooLong: "Request URI Too Long",
- StatusUnsupportedMediaType: "Unsupported Media Type",
- StatusRequestedRangeNotSatisfiable: "Requested Range Not Satisfiable",
- StatusExpectationFailed: "Expectation Failed",
- StatusTeapot: "I'm a teapot",
-
- StatusInternalServerError: "Internal Server Error",
- StatusNotImplemented: "Not Implemented",
- StatusBadGateway: "Bad Gateway",
- StatusServiceUnavailable: "Service Unavailable",
- StatusGatewayTimeout: "Gateway Timeout",
- StatusHTTPVersionNotSupported: "HTTP Version Not Supported",
-}
-
-// StatusText returns a text for the HTTP status code. It returns the empty
-// string if the code is unknown.
-func StatusText(code int) string {
- return statusText[code]
-}
diff --git a/gcc-4.8.1/libgo/go/net/http/testdata/file b/gcc-4.8.1/libgo/go/net/http/testdata/file
deleted file mode 100644
index 11f11f9be..000000000
--- a/gcc-4.8.1/libgo/go/net/http/testdata/file
+++ /dev/null
@@ -1 +0,0 @@
-0123456789
diff --git a/gcc-4.8.1/libgo/go/net/http/testdata/index.html b/gcc-4.8.1/libgo/go/net/http/testdata/index.html
deleted file mode 100644
index da8e1e93d..000000000
--- a/gcc-4.8.1/libgo/go/net/http/testdata/index.html
+++ /dev/null
@@ -1 +0,0 @@
-index.html says hello
diff --git a/gcc-4.8.1/libgo/go/net/http/testdata/style.css b/gcc-4.8.1/libgo/go/net/http/testdata/style.css
deleted file mode 100644
index 208d16d42..000000000
--- a/gcc-4.8.1/libgo/go/net/http/testdata/style.css
+++ /dev/null
@@ -1 +0,0 @@
-body {}
diff --git a/gcc-4.8.1/libgo/go/net/http/transfer.go b/gcc-4.8.1/libgo/go/net/http/transfer.go
deleted file mode 100644
index 25b34adde..000000000
--- a/gcc-4.8.1/libgo/go/net/http/transfer.go
+++ /dev/null
@@ -1,656 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package http
-
-import (
- "bufio"
- "bytes"
- "errors"
- "fmt"
- "io"
- "io/ioutil"
- "net/textproto"
- "strconv"
- "strings"
-)
-
-// transferWriter inspects the fields of a user-supplied Request or Response,
-// sanitizes them without changing the user object and provides methods for
-// writing the respective header, body and trailer in wire format.
-type transferWriter struct {
- Method string
- Body io.Reader
- BodyCloser io.Closer
- ResponseToHEAD bool
- ContentLength int64 // -1 means unknown, 0 means exactly none
- Close bool
- TransferEncoding []string
- Trailer Header
-}
-
-func newTransferWriter(r interface{}) (t *transferWriter, err error) {
- t = &transferWriter{}
-
- // Extract relevant fields
- atLeastHTTP11 := false
- switch rr := r.(type) {
- case *Request:
- if rr.ContentLength != 0 && rr.Body == nil {
- return nil, fmt.Errorf("http: Request.ContentLength=%d with nil Body", rr.ContentLength)
- }
- t.Method = rr.Method
- t.Body = rr.Body
- t.BodyCloser = rr.Body
- t.ContentLength = rr.ContentLength
- t.Close = rr.Close
- t.TransferEncoding = rr.TransferEncoding
- t.Trailer = rr.Trailer
- atLeastHTTP11 = rr.ProtoAtLeast(1, 1)
- if t.Body != nil && len(t.TransferEncoding) == 0 && atLeastHTTP11 {
- if t.ContentLength == 0 {
- // Test to see if it's actually zero or just unset.
- var buf [1]byte
- n, _ := io.ReadFull(t.Body, buf[:])
- if n == 1 {
- // Oh, guess there is data in this Body Reader after all.
- // The ContentLength field just wasn't set.
- // Stich the Body back together again, re-attaching our
- // consumed byte.
- t.ContentLength = -1
- t.Body = io.MultiReader(bytes.NewBuffer(buf[:]), t.Body)
- } else {
- // Body is actually empty.
- t.Body = nil
- t.BodyCloser = nil
- }
- }
- if t.ContentLength < 0 {
- t.TransferEncoding = []string{"chunked"}
- }
- }
- case *Response:
- if rr.Request != nil {
- t.Method = rr.Request.Method
- }
- t.Body = rr.Body
- t.BodyCloser = rr.Body
- t.ContentLength = rr.ContentLength
- t.Close = rr.Close
- t.TransferEncoding = rr.TransferEncoding
- t.Trailer = rr.Trailer
- atLeastHTTP11 = rr.ProtoAtLeast(1, 1)
- t.ResponseToHEAD = noBodyExpected(t.Method)
- }
-
- // Sanitize Body,ContentLength,TransferEncoding
- if t.ResponseToHEAD {
- t.Body = nil
- if chunked(t.TransferEncoding) {
- t.ContentLength = -1
- }
- } else {
- if !atLeastHTTP11 || t.Body == nil {
- t.TransferEncoding = nil
- }
- if chunked(t.TransferEncoding) {
- t.ContentLength = -1
- } else if t.Body == nil { // no chunking, no body
- t.ContentLength = 0
- }
- }
-
- // Sanitize Trailer
- if !chunked(t.TransferEncoding) {
- t.Trailer = nil
- }
-
- return t, nil
-}
-
-func noBodyExpected(requestMethod string) bool {
- return requestMethod == "HEAD"
-}
-
-func (t *transferWriter) shouldSendContentLength() bool {
- if chunked(t.TransferEncoding) {
- return false
- }
- if t.ContentLength > 0 {
- return true
- }
- // Many servers expect a Content-Length for these methods
- if t.Method == "POST" || t.Method == "PUT" {
- return true
- }
- if t.ContentLength == 0 && isIdentity(t.TransferEncoding) {
- return true
- }
-
- return false
-}
-
-func (t *transferWriter) WriteHeader(w io.Writer) (err error) {
- if t.Close {
- _, err = io.WriteString(w, "Connection: close\r\n")
- if err != nil {
- return
- }
- }
-
- // Write Content-Length and/or Transfer-Encoding whose values are a
- // function of the sanitized field triple (Body, ContentLength,
- // TransferEncoding)
- if t.shouldSendContentLength() {
- io.WriteString(w, "Content-Length: ")
- _, err = io.WriteString(w, strconv.FormatInt(t.ContentLength, 10)+"\r\n")
- if err != nil {
- return
- }
- } else if chunked(t.TransferEncoding) {
- _, err = io.WriteString(w, "Transfer-Encoding: chunked\r\n")
- if err != nil {
- return
- }
- }
-
- // Write Trailer header
- if t.Trailer != nil {
- // TODO: At some point, there should be a generic mechanism for
- // writing long headers, using HTTP line splitting
- io.WriteString(w, "Trailer: ")
- needComma := false
- for k := range t.Trailer {
- k = CanonicalHeaderKey(k)
- switch k {
- case "Transfer-Encoding", "Trailer", "Content-Length":
- return &badStringError{"invalid Trailer key", k}
- }
- if needComma {
- io.WriteString(w, ",")
- }
- io.WriteString(w, k)
- needComma = true
- }
- _, err = io.WriteString(w, "\r\n")
- }
-
- return
-}
-
-func (t *transferWriter) WriteBody(w io.Writer) (err error) {
- var ncopy int64
-
- // Write body
- if t.Body != nil {
- if chunked(t.TransferEncoding) {
- cw := newChunkedWriter(w)
- _, err = io.Copy(cw, t.Body)
- if err == nil {
- err = cw.Close()
- }
- } else if t.ContentLength == -1 {
- ncopy, err = io.Copy(w, t.Body)
- } else {
- ncopy, err = io.Copy(w, io.LimitReader(t.Body, t.ContentLength))
- nextra, err := io.Copy(ioutil.Discard, t.Body)
- if err != nil {
- return err
- }
- ncopy += nextra
- }
- if err != nil {
- return err
- }
- if err = t.BodyCloser.Close(); err != nil {
- return err
- }
- }
-
- if t.ContentLength != -1 && t.ContentLength != ncopy {
- return fmt.Errorf("http: Request.ContentLength=%d with Body length %d",
- t.ContentLength, ncopy)
- }
-
- // TODO(petar): Place trailer writer code here.
- if chunked(t.TransferEncoding) {
- // Last chunk, empty trailer
- _, err = io.WriteString(w, "\r\n")
- }
-
- return
-}
-
-type transferReader struct {
- // Input
- Header Header
- StatusCode int
- RequestMethod string
- ProtoMajor int
- ProtoMinor int
- // Output
- Body io.ReadCloser
- ContentLength int64
- TransferEncoding []string
- Close bool
- Trailer Header
-}
-
-// bodyAllowedForStatus returns whether a given response status code
-// permits a body. See RFC2616, section 4.4.
-func bodyAllowedForStatus(status int) bool {
- switch {
- case status >= 100 && status <= 199:
- return false
- case status == 204:
- return false
- case status == 304:
- return false
- }
- return true
-}
-
-// msg is *Request or *Response.
-func readTransfer(msg interface{}, r *bufio.Reader) (err error) {
- t := &transferReader{}
-
- // Unify input
- isResponse := false
- switch rr := msg.(type) {
- case *Response:
- t.Header = rr.Header
- t.StatusCode = rr.StatusCode
- t.RequestMethod = rr.Request.Method
- t.ProtoMajor = rr.ProtoMajor
- t.ProtoMinor = rr.ProtoMinor
- t.Close = shouldClose(t.ProtoMajor, t.ProtoMinor, t.Header)
- isResponse = true
- case *Request:
- t.Header = rr.Header
- t.ProtoMajor = rr.ProtoMajor
- t.ProtoMinor = rr.ProtoMinor
- // Transfer semantics for Requests are exactly like those for
- // Responses with status code 200, responding to a GET method
- t.StatusCode = 200
- t.RequestMethod = "GET"
- default:
- panic("unexpected type")
- }
-
- // Default to HTTP/1.1
- if t.ProtoMajor == 0 && t.ProtoMinor == 0 {
- t.ProtoMajor, t.ProtoMinor = 1, 1
- }
-
- // Transfer encoding, content length
- t.TransferEncoding, err = fixTransferEncoding(t.RequestMethod, t.Header)
- if err != nil {
- return err
- }
-
- realLength, err := fixLength(isResponse, t.StatusCode, t.RequestMethod, t.Header, t.TransferEncoding)
- if err != nil {
- return err
- }
- if isResponse && t.RequestMethod == "HEAD" {
- if n, err := parseContentLength(t.Header.get("Content-Length")); err != nil {
- return err
- } else {
- t.ContentLength = n
- }
- } else {
- t.ContentLength = realLength
- }
-
- // Trailer
- t.Trailer, err = fixTrailer(t.Header, t.TransferEncoding)
- if err != nil {
- return err
- }
-
- // If there is no Content-Length or chunked Transfer-Encoding on a *Response
- // and the status is not 1xx, 204 or 304, then the body is unbounded.
- // See RFC2616, section 4.4.
- switch msg.(type) {
- case *Response:
- if realLength == -1 &&
- !chunked(t.TransferEncoding) &&
- bodyAllowedForStatus(t.StatusCode) {
- // Unbounded body.
- t.Close = true
- }
- }
-
- // Prepare body reader. ContentLength < 0 means chunked encoding
- // or close connection when finished, since multipart is not supported yet
- switch {
- case chunked(t.TransferEncoding):
- t.Body = &body{Reader: newChunkedReader(r), hdr: msg, r: r, closing: t.Close}
- case realLength >= 0:
- // TODO: limit the Content-Length. This is an easy DoS vector.
- t.Body = &body{Reader: io.LimitReader(r, realLength), closing: t.Close}
- default:
- // realLength < 0, i.e. "Content-Length" not mentioned in header
- if t.Close {
- // Close semantics (i.e. HTTP/1.0)
- t.Body = &body{Reader: r, closing: t.Close}
- } else {
- // Persistent connection (i.e. HTTP/1.1)
- t.Body = &body{Reader: io.LimitReader(r, 0), closing: t.Close}
- }
- }
-
- // Unify output
- switch rr := msg.(type) {
- case *Request:
- rr.Body = t.Body
- rr.ContentLength = t.ContentLength
- rr.TransferEncoding = t.TransferEncoding
- rr.Close = t.Close
- rr.Trailer = t.Trailer
- case *Response:
- rr.Body = t.Body
- rr.ContentLength = t.ContentLength
- rr.TransferEncoding = t.TransferEncoding
- rr.Close = t.Close
- rr.Trailer = t.Trailer
- }
-
- return nil
-}
-
-// Checks whether chunked is part of the encodings stack
-func chunked(te []string) bool { return len(te) > 0 && te[0] == "chunked" }
-
-// Checks whether the encoding is explicitly "identity".
-func isIdentity(te []string) bool { return len(te) == 1 && te[0] == "identity" }
-
-// Sanitize transfer encoding
-func fixTransferEncoding(requestMethod string, header Header) ([]string, error) {
- raw, present := header["Transfer-Encoding"]
- if !present {
- return nil, nil
- }
-
- delete(header, "Transfer-Encoding")
-
- encodings := strings.Split(raw[0], ",")
- te := make([]string, 0, len(encodings))
- // TODO: Even though we only support "identity" and "chunked"
- // encodings, the loop below is designed with foresight. One
- // invariant that must be maintained is that, if present,
- // chunked encoding must always come first.
- for _, encoding := range encodings {
- encoding = strings.ToLower(strings.TrimSpace(encoding))
- // "identity" encoding is not recorded
- if encoding == "identity" {
- break
- }
- if encoding != "chunked" {
- return nil, &badStringError{"unsupported transfer encoding", encoding}
- }
- te = te[0 : len(te)+1]
- te[len(te)-1] = encoding
- }
- if len(te) > 1 {
- return nil, &badStringError{"too many transfer encodings", strings.Join(te, ",")}
- }
- if len(te) > 0 {
- // Chunked encoding trumps Content-Length. See RFC 2616
- // Section 4.4. Currently len(te) > 0 implies chunked
- // encoding.
- delete(header, "Content-Length")
- return te, nil
- }
-
- return nil, nil
-}
-
-// Determine the expected body length, using RFC 2616 Section 4.4. This
-// function is not a method, because ultimately it should be shared by
-// ReadResponse and ReadRequest.
-func fixLength(isResponse bool, status int, requestMethod string, header Header, te []string) (int64, error) {
-
- // Logic based on response type or status
- if noBodyExpected(requestMethod) {
- return 0, nil
- }
- if status/100 == 1 {
- return 0, nil
- }
- switch status {
- case 204, 304:
- return 0, nil
- }
-
- // Logic based on Transfer-Encoding
- if chunked(te) {
- return -1, nil
- }
-
- // Logic based on Content-Length
- cl := strings.TrimSpace(header.get("Content-Length"))
- if cl != "" {
- n, err := parseContentLength(cl)
- if err != nil {
- return -1, err
- }
- return n, nil
- } else {
- header.Del("Content-Length")
- }
-
- if !isResponse && requestMethod == "GET" {
- // RFC 2616 doesn't explicitly permit nor forbid an
- // entity-body on a GET request so we permit one if
- // declared, but we default to 0 here (not -1 below)
- // if there's no mention of a body.
- return 0, nil
- }
-
- // Logic based on media type. The purpose of the following code is just
- // to detect whether the unsupported "multipart/byteranges" is being
- // used. A proper Content-Type parser is needed in the future.
- if strings.Contains(strings.ToLower(header.get("Content-Type")), "multipart/byteranges") {
- return -1, ErrNotSupported
- }
-
- // Body-EOF logic based on other methods (like closing, or chunked coding)
- return -1, nil
-}
-
-// Determine whether to hang up after sending a request and body, or
-// receiving a response and body
-// 'header' is the request headers
-func shouldClose(major, minor int, header Header) bool {
- if major < 1 {
- return true
- } else if major == 1 && minor == 0 {
- if !strings.Contains(strings.ToLower(header.get("Connection")), "keep-alive") {
- return true
- }
- return false
- } else {
- // TODO: Should split on commas, toss surrounding white space,
- // and check each field.
- if strings.ToLower(header.get("Connection")) == "close" {
- header.Del("Connection")
- return true
- }
- }
- return false
-}
-
-// Parse the trailer header
-func fixTrailer(header Header, te []string) (Header, error) {
- raw := header.get("Trailer")
- if raw == "" {
- return nil, nil
- }
-
- header.Del("Trailer")
- trailer := make(Header)
- keys := strings.Split(raw, ",")
- for _, key := range keys {
- key = CanonicalHeaderKey(strings.TrimSpace(key))
- switch key {
- case "Transfer-Encoding", "Trailer", "Content-Length":
- return nil, &badStringError{"bad trailer key", key}
- }
- trailer.Del(key)
- }
- if len(trailer) == 0 {
- return nil, nil
- }
- if !chunked(te) {
- // Trailer and no chunking
- return nil, ErrUnexpectedTrailer
- }
- return trailer, nil
-}
-
-// body turns a Reader into a ReadCloser.
-// Close ensures that the body has been fully read
-// and then reads the trailer if necessary.
-type body struct {
- io.Reader
- hdr interface{} // non-nil (Response or Request) value means read trailer
- r *bufio.Reader // underlying wire-format reader for the trailer
- closing bool // is the connection to be closed after reading body?
- closed bool
-
- res *response // response writer for server requests, else nil
-}
-
-// ErrBodyReadAfterClose is returned when reading a Request or Response
-// Body after the body has been closed. This typically happens when the body is
-// read after an HTTP Handler calls WriteHeader or Write on its
-// ResponseWriter.
-var ErrBodyReadAfterClose = errors.New("http: invalid Read on closed Body")
-
-func (b *body) Read(p []byte) (n int, err error) {
- if b.closed {
- return 0, ErrBodyReadAfterClose
- }
- n, err = b.Reader.Read(p)
-
- // Read the final trailer once we hit EOF.
- if err == io.EOF && b.hdr != nil {
- if e := b.readTrailer(); e != nil {
- err = e
- }
- b.hdr = nil
- }
- return n, err
-}
-
-var (
- singleCRLF = []byte("\r\n")
- doubleCRLF = []byte("\r\n\r\n")
-)
-
-func seeUpcomingDoubleCRLF(r *bufio.Reader) bool {
- for peekSize := 4; ; peekSize++ {
- // This loop stops when Peek returns an error,
- // which it does when r's buffer has been filled.
- buf, err := r.Peek(peekSize)
- if bytes.HasSuffix(buf, doubleCRLF) {
- return true
- }
- if err != nil {
- break
- }
- }
- return false
-}
-
-var errTrailerEOF = errors.New("http: unexpected EOF reading trailer")
-
-func (b *body) readTrailer() error {
- // The common case, since nobody uses trailers.
- buf, err := b.r.Peek(2)
- if bytes.Equal(buf, singleCRLF) {
- b.r.ReadByte()
- b.r.ReadByte()
- return nil
- }
- if len(buf) < 2 {
- return errTrailerEOF
- }
- if err != nil {
- return err
- }
-
- // Make sure there's a header terminator coming up, to prevent
- // a DoS with an unbounded size Trailer. It's not easy to
- // slip in a LimitReader here, as textproto.NewReader requires
- // a concrete *bufio.Reader. Also, we can't get all the way
- // back up to our conn's LimitedReader that *might* be backing
- // this bufio.Reader. Instead, a hack: we iteratively Peek up
- // to the bufio.Reader's max size, looking for a double CRLF.
- // This limits the trailer to the underlying buffer size, typically 4kB.
- if !seeUpcomingDoubleCRLF(b.r) {
- return errors.New("http: suspiciously long trailer after chunked body")
- }
-
- hdr, err := textproto.NewReader(b.r).ReadMIMEHeader()
- if err != nil {
- if err == io.EOF {
- return errTrailerEOF
- }
- return err
- }
- switch rr := b.hdr.(type) {
- case *Request:
- rr.Trailer = Header(hdr)
- case *Response:
- rr.Trailer = Header(hdr)
- }
- return nil
-}
-
-func (b *body) Close() error {
- if b.closed {
- return nil
- }
- defer func() {
- b.closed = true
- }()
- if b.hdr == nil && b.closing {
- // no trailer and closing the connection next.
- // no point in reading to EOF.
- return nil
- }
-
- // In a server request, don't continue reading from the client
- // if we've already hit the maximum body size set by the
- // handler. If this is set, that also means the TCP connection
- // is about to be closed, so getting to the next HTTP request
- // in the stream is not necessary.
- if b.res != nil && b.res.requestBodyLimitHit {
- return nil
- }
-
- // Fully consume the body, which will also lead to us reading
- // the trailer headers after the body, if present.
- if _, err := io.Copy(ioutil.Discard, b); err != nil {
- return err
- }
- return nil
-}
-
-// parseContentLength trims whitespace from s and returns -1 if no value
-// is set, or the value if it's >= 0.
-func parseContentLength(cl string) (int64, error) {
- cl = strings.TrimSpace(cl)
- if cl == "" {
- return -1, nil
- }
- n, err := strconv.ParseInt(cl, 10, 64)
- if err != nil || n < 0 {
- return 0, &badStringError{"bad Content-Length", cl}
- }
- return n, nil
-
-}
diff --git a/gcc-4.8.1/libgo/go/net/http/transfer_test.go b/gcc-4.8.1/libgo/go/net/http/transfer_test.go
deleted file mode 100644
index 8627a374c..000000000
--- a/gcc-4.8.1/libgo/go/net/http/transfer_test.go
+++ /dev/null
@@ -1,37 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package http
-
-import (
- "bufio"
- "strings"
- "testing"
-)
-
-func TestBodyReadBadTrailer(t *testing.T) {
- b := &body{
- Reader: strings.NewReader("foobar"),
- hdr: true, // force reading the trailer
- r: bufio.NewReader(strings.NewReader("")),
- }
- buf := make([]byte, 7)
- n, err := b.Read(buf[:3])
- got := string(buf[:n])
- if got != "foo" || err != nil {
- t.Fatalf(`first Read = %d (%q), %v; want 3 ("foo")`, n, got, err)
- }
-
- n, err = b.Read(buf[:])
- got = string(buf[:n])
- if got != "bar" || err != nil {
- t.Fatalf(`second Read = %d (%q), %v; want 3 ("bar")`, n, got, err)
- }
-
- n, err = b.Read(buf[:])
- got = string(buf[:n])
- if err == nil {
- t.Errorf("final Read was successful (%q), expected error from trailer read", got)
- }
-}
diff --git a/gcc-4.8.1/libgo/go/net/http/transport.go b/gcc-4.8.1/libgo/go/net/http/transport.go
deleted file mode 100644
index 98e198e78..000000000
--- a/gcc-4.8.1/libgo/go/net/http/transport.go
+++ /dev/null
@@ -1,906 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// HTTP client implementation. See RFC 2616.
-//
-// This is the low-level Transport implementation of RoundTripper.
-// The high-level interface is in client.go.
-
-package http
-
-import (
- "bufio"
- "compress/gzip"
- "crypto/tls"
- "encoding/base64"
- "errors"
- "fmt"
- "io"
- "io/ioutil"
- "log"
- "net"
- "net/url"
- "os"
- "strings"
- "sync"
- "time"
-)
-
-// DefaultTransport is the default implementation of Transport and is
-// used by DefaultClient. It establishes network connections as needed
-// and caches them for reuse by subsequent calls. It uses HTTP proxies
-// as directed by the $HTTP_PROXY and $NO_PROXY (or $http_proxy and
-// $no_proxy) environment variables.
-var DefaultTransport RoundTripper = &Transport{Proxy: ProxyFromEnvironment}
-
-// DefaultMaxIdleConnsPerHost is the default value of Transport's
-// MaxIdleConnsPerHost.
-const DefaultMaxIdleConnsPerHost = 2
-
-// Transport is an implementation of RoundTripper that supports http,
-// https, and http proxies (for either http or https with CONNECT).
-// Transport can also cache connections for future re-use.
-type Transport struct {
- idleLk sync.Mutex
- idleConn map[string][]*persistConn
- altLk sync.RWMutex
- altProto map[string]RoundTripper // nil or map of URI scheme => RoundTripper
-
- // TODO: tunable on global max cached connections
- // TODO: tunable on timeout on cached connections
- // TODO: optional pipelining
-
- // Proxy specifies a function to return a proxy for a given
- // Request. If the function returns a non-nil error, the
- // request is aborted with the provided error.
- // If Proxy is nil or returns a nil *URL, no proxy is used.
- Proxy func(*Request) (*url.URL, error)
-
- // Dial specifies the dial function for creating TCP
- // connections.
- // If Dial is nil, net.Dial is used.
- Dial func(net, addr string) (c net.Conn, err error)
-
- // TLSClientConfig specifies the TLS configuration to use with
- // tls.Client. If nil, the default configuration is used.
- TLSClientConfig *tls.Config
-
- DisableKeepAlives bool
- DisableCompression bool
-
- // MaxIdleConnsPerHost, if non-zero, controls the maximum idle
- // (keep-alive) to keep per-host. If zero,
- // DefaultMaxIdleConnsPerHost is used.
- MaxIdleConnsPerHost int
-}
-
-// ProxyFromEnvironment returns the URL of the proxy to use for a
-// given request, as indicated by the environment variables
-// $HTTP_PROXY and $NO_PROXY (or $http_proxy and $no_proxy).
-// An error is returned if the proxy environment is invalid.
-// A nil URL and nil error are returned if no proxy is defined in the
-// environment, or a proxy should not be used for the given request.
-func ProxyFromEnvironment(req *Request) (*url.URL, error) {
- proxy := getenvEitherCase("HTTP_PROXY")
- if proxy == "" {
- return nil, nil
- }
- if !useProxy(canonicalAddr(req.URL)) {
- return nil, nil
- }
- proxyURL, err := url.Parse(proxy)
- if err != nil || !strings.HasPrefix(proxyURL.Scheme, "http") {
- if u, err := url.Parse("http://" + proxy); err == nil {
- proxyURL = u
- err = nil
- }
- }
- if err != nil {
- return nil, fmt.Errorf("invalid proxy address %q: %v", proxy, err)
- }
- return proxyURL, nil
-}
-
-// ProxyURL returns a proxy function (for use in a Transport)
-// that always returns the same URL.
-func ProxyURL(fixedURL *url.URL) func(*Request) (*url.URL, error) {
- return func(*Request) (*url.URL, error) {
- return fixedURL, nil
- }
-}
-
-// transportRequest is a wrapper around a *Request that adds
-// optional extra headers to write.
-type transportRequest struct {
- *Request // original request, not to be mutated
- extra Header // extra headers to write, or nil
-}
-
-func (tr *transportRequest) extraHeaders() Header {
- if tr.extra == nil {
- tr.extra = make(Header)
- }
- return tr.extra
-}
-
-// RoundTrip implements the RoundTripper interface.
-func (t *Transport) RoundTrip(req *Request) (resp *Response, err error) {
- if req.URL == nil {
- return nil, errors.New("http: nil Request.URL")
- }
- if req.Header == nil {
- return nil, errors.New("http: nil Request.Header")
- }
- if req.URL.Scheme != "http" && req.URL.Scheme != "https" {
- t.altLk.RLock()
- var rt RoundTripper
- if t.altProto != nil {
- rt = t.altProto[req.URL.Scheme]
- }
- t.altLk.RUnlock()
- if rt == nil {
- return nil, &badStringError{"unsupported protocol scheme", req.URL.Scheme}
- }
- return rt.RoundTrip(req)
- }
- if req.URL.Host == "" {
- return nil, errors.New("http: no Host in request URL")
- }
- treq := &transportRequest{Request: req}
- cm, err := t.connectMethodForRequest(treq)
- if err != nil {
- return nil, err
- }
-
- // Get the cached or newly-created connection to either the
- // host (for http or https), the http proxy, or the http proxy
- // pre-CONNECTed to https server. In any case, we'll be ready
- // to send it requests.
- pconn, err := t.getConn(cm)
- if err != nil {
- return nil, err
- }
-
- return pconn.roundTrip(treq)
-}
-
-// RegisterProtocol registers a new protocol with scheme.
-// The Transport will pass requests using the given scheme to rt.
-// It is rt's responsibility to simulate HTTP request semantics.
-//
-// RegisterProtocol can be used by other packages to provide
-// implementations of protocol schemes like "ftp" or "file".
-func (t *Transport) RegisterProtocol(scheme string, rt RoundTripper) {
- if scheme == "http" || scheme == "https" {
- panic("protocol " + scheme + " already registered")
- }
- t.altLk.Lock()
- defer t.altLk.Unlock()
- if t.altProto == nil {
- t.altProto = make(map[string]RoundTripper)
- }
- if _, exists := t.altProto[scheme]; exists {
- panic("protocol " + scheme + " already registered")
- }
- t.altProto[scheme] = rt
-}
-
-// CloseIdleConnections closes any connections which were previously
-// connected from previous requests but are now sitting idle in
-// a "keep-alive" state. It does not interrupt any connections currently
-// in use.
-func (t *Transport) CloseIdleConnections() {
- t.idleLk.Lock()
- m := t.idleConn
- t.idleConn = nil
- t.idleLk.Unlock()
- if m == nil {
- return
- }
- for _, conns := range m {
- for _, pconn := range conns {
- pconn.close()
- }
- }
-}
-
-//
-// Private implementation past this point.
-//
-
-func getenvEitherCase(k string) string {
- if v := os.Getenv(strings.ToUpper(k)); v != "" {
- return v
- }
- return os.Getenv(strings.ToLower(k))
-}
-
-func (t *Transport) connectMethodForRequest(treq *transportRequest) (*connectMethod, error) {
- cm := &connectMethod{
- targetScheme: treq.URL.Scheme,
- targetAddr: canonicalAddr(treq.URL),
- }
- if t.Proxy != nil {
- var err error
- cm.proxyURL, err = t.Proxy(treq.Request)
- if err != nil {
- return nil, err
- }
- }
- return cm, nil
-}
-
-// proxyAuth returns the Proxy-Authorization header to set
-// on requests, if applicable.
-func (cm *connectMethod) proxyAuth() string {
- if cm.proxyURL == nil {
- return ""
- }
- if u := cm.proxyURL.User; u != nil {
- return "Basic " + base64.URLEncoding.EncodeToString([]byte(u.String()))
- }
- return ""
-}
-
-// putIdleConn adds pconn to the list of idle persistent connections awaiting
-// a new request.
-// If pconn is no longer needed or not in a good state, putIdleConn
-// returns false.
-func (t *Transport) putIdleConn(pconn *persistConn) bool {
- if t.DisableKeepAlives || t.MaxIdleConnsPerHost < 0 {
- pconn.close()
- return false
- }
- if pconn.isBroken() {
- return false
- }
- key := pconn.cacheKey
- max := t.MaxIdleConnsPerHost
- if max == 0 {
- max = DefaultMaxIdleConnsPerHost
- }
- t.idleLk.Lock()
- if t.idleConn == nil {
- t.idleConn = make(map[string][]*persistConn)
- }
- if len(t.idleConn[key]) >= max {
- t.idleLk.Unlock()
- pconn.close()
- return false
- }
- for _, exist := range t.idleConn[key] {
- if exist == pconn {
- log.Fatalf("dup idle pconn %p in freelist", pconn)
- }
- }
- t.idleConn[key] = append(t.idleConn[key], pconn)
- t.idleLk.Unlock()
- return true
-}
-
-func (t *Transport) getIdleConn(cm *connectMethod) (pconn *persistConn) {
- key := cm.String()
- t.idleLk.Lock()
- defer t.idleLk.Unlock()
- if t.idleConn == nil {
- return nil
- }
- for {
- pconns, ok := t.idleConn[key]
- if !ok {
- return nil
- }
- if len(pconns) == 1 {
- pconn = pconns[0]
- delete(t.idleConn, key)
- } else {
- // 2 or more cached connections; pop last
- // TODO: queue?
- pconn = pconns[len(pconns)-1]
- t.idleConn[key] = pconns[0 : len(pconns)-1]
- }
- if !pconn.isBroken() {
- return
- }
- }
- panic("unreachable")
-}
-
-func (t *Transport) dial(network, addr string) (c net.Conn, err error) {
- if t.Dial != nil {
- return t.Dial(network, addr)
- }
- return net.Dial(network, addr)
-}
-
-// getConn dials and creates a new persistConn to the target as
-// specified in the connectMethod. This includes doing a proxy CONNECT
-// and/or setting up TLS. If this doesn't return an error, the persistConn
-// is ready to write requests to.
-func (t *Transport) getConn(cm *connectMethod) (*persistConn, error) {
- if pc := t.getIdleConn(cm); pc != nil {
- return pc, nil
- }
-
- conn, err := t.dial("tcp", cm.addr())
- if err != nil {
- if cm.proxyURL != nil {
- err = fmt.Errorf("http: error connecting to proxy %s: %v", cm.proxyURL, err)
- }
- return nil, err
- }
-
- pa := cm.proxyAuth()
-
- pconn := &persistConn{
- t: t,
- cacheKey: cm.String(),
- conn: conn,
- reqch: make(chan requestAndChan, 50),
- writech: make(chan writeRequest, 50),
- closech: make(chan struct{}),
- }
-
- switch {
- case cm.proxyURL == nil:
- // Do nothing.
- case cm.targetScheme == "http":
- pconn.isProxy = true
- if pa != "" {
- pconn.mutateHeaderFunc = func(h Header) {
- h.Set("Proxy-Authorization", pa)
- }
- }
- case cm.targetScheme == "https":
- connectReq := &Request{
- Method: "CONNECT",
- URL: &url.URL{Opaque: cm.targetAddr},
- Host: cm.targetAddr,
- Header: make(Header),
- }
- if pa != "" {
- connectReq.Header.Set("Proxy-Authorization", pa)
- }
- connectReq.Write(conn)
-
- // Read response.
- // Okay to use and discard buffered reader here, because
- // TLS server will not speak until spoken to.
- br := bufio.NewReader(conn)
- resp, err := ReadResponse(br, connectReq)
- if err != nil {
- conn.Close()
- return nil, err
- }
- if resp.StatusCode != 200 {
- f := strings.SplitN(resp.Status, " ", 2)
- conn.Close()
- return nil, errors.New(f[1])
- }
- }
-
- if cm.targetScheme == "https" {
- // Initiate TLS and check remote host name against certificate.
- cfg := t.TLSClientConfig
- if cfg == nil || cfg.ServerName == "" {
- host := cm.tlsHost()
- if cfg == nil {
- cfg = &tls.Config{ServerName: host}
- } else {
- clone := *cfg // shallow clone
- clone.ServerName = host
- cfg = &clone
- }
- }
- conn = tls.Client(conn, cfg)
- if err = conn.(*tls.Conn).Handshake(); err != nil {
- return nil, err
- }
- if t.TLSClientConfig == nil || !t.TLSClientConfig.InsecureSkipVerify {
- if err = conn.(*tls.Conn).VerifyHostname(cm.tlsHost()); err != nil {
- return nil, err
- }
- }
- pconn.conn = conn
- }
-
- pconn.br = bufio.NewReader(pconn.conn)
- pconn.bw = bufio.NewWriter(pconn.conn)
- go pconn.readLoop()
- go pconn.writeLoop()
- return pconn, nil
-}
-
-// useProxy returns true if requests to addr should use a proxy,
-// according to the NO_PROXY or no_proxy environment variable.
-// addr is always a canonicalAddr with a host and port.
-func useProxy(addr string) bool {
- if len(addr) == 0 {
- return true
- }
- host, _, err := net.SplitHostPort(addr)
- if err != nil {
- return false
- }
- if host == "localhost" {
- return false
- }
- if ip := net.ParseIP(host); ip != nil {
- if ip.IsLoopback() {
- return false
- }
- }
-
- no_proxy := getenvEitherCase("NO_PROXY")
- if no_proxy == "*" {
- return false
- }
-
- addr = strings.ToLower(strings.TrimSpace(addr))
- if hasPort(addr) {
- addr = addr[:strings.LastIndex(addr, ":")]
- }
-
- for _, p := range strings.Split(no_proxy, ",") {
- p = strings.ToLower(strings.TrimSpace(p))
- if len(p) == 0 {
- continue
- }
- if hasPort(p) {
- p = p[:strings.LastIndex(p, ":")]
- }
- if addr == p {
- return false
- }
- if p[0] == '.' && (strings.HasSuffix(addr, p) || addr == p[1:]) {
- // no_proxy ".foo.com" matches "bar.foo.com" or "foo.com"
- return false
- }
- if p[0] != '.' && strings.HasSuffix(addr, p) && addr[len(addr)-len(p)-1] == '.' {
- // no_proxy "foo.com" matches "bar.foo.com"
- return false
- }
- }
- return true
-}
-
-// connectMethod is the map key (in its String form) for keeping persistent
-// TCP connections alive for subsequent HTTP requests.
-//
-// A connect method may be of the following types:
-//
-// Cache key form Description
-// ----------------- -------------------------
-// ||http|foo.com http directly to server, no proxy
-// ||https|foo.com https directly to server, no proxy
-// http://proxy.com|https|foo.com http to proxy, then CONNECT to foo.com
-// http://proxy.com|http http to proxy, http to anywhere after that
-//
-// Note: no support to https to the proxy yet.
-//
-type connectMethod struct {
- proxyURL *url.URL // nil for no proxy, else full proxy URL
- targetScheme string // "http" or "https"
- targetAddr string // Not used if proxy + http targetScheme (4th example in table)
-}
-
-func (ck *connectMethod) String() string {
- proxyStr := ""
- targetAddr := ck.targetAddr
- if ck.proxyURL != nil {
- proxyStr = ck.proxyURL.String()
- if ck.targetScheme == "http" {
- targetAddr = ""
- }
- }
- return strings.Join([]string{proxyStr, ck.targetScheme, targetAddr}, "|")
-}
-
-// addr returns the first hop "host:port" to which we need to TCP connect.
-func (cm *connectMethod) addr() string {
- if cm.proxyURL != nil {
- return canonicalAddr(cm.proxyURL)
- }
- return cm.targetAddr
-}
-
-// tlsHost returns the host name to match against the peer's
-// TLS certificate.
-func (cm *connectMethod) tlsHost() string {
- h := cm.targetAddr
- if hasPort(h) {
- h = h[:strings.LastIndex(h, ":")]
- }
- return h
-}
-
-// persistConn wraps a connection, usually a persistent one
-// (but may be used for non-keep-alive requests as well)
-type persistConn struct {
- t *Transport
- cacheKey string // its connectMethod.String()
- conn net.Conn
- closed bool // whether conn has been closed
- br *bufio.Reader // from conn
- bw *bufio.Writer // to conn
- reqch chan requestAndChan // written by roundTrip; read by readLoop
- writech chan writeRequest // written by roundTrip; read by writeLoop
- closech chan struct{} // broadcast close when readLoop (TCP connection) closes
- isProxy bool
-
- // mutateHeaderFunc is an optional func to modify extra
- // headers on each outbound request before it's written. (the
- // original Request given to RoundTrip is not modified)
- mutateHeaderFunc func(Header)
-
- lk sync.Mutex // guards numExpectedResponses and broken
- numExpectedResponses int
- broken bool // an error has happened on this connection; marked broken so it's not reused.
-}
-
-func (pc *persistConn) isBroken() bool {
- pc.lk.Lock()
- b := pc.broken
- pc.lk.Unlock()
- return b
-}
-
-var remoteSideClosedFunc func(error) bool // or nil to use default
-
-func remoteSideClosed(err error) bool {
- if err == io.EOF {
- return true
- }
- if remoteSideClosedFunc != nil {
- return remoteSideClosedFunc(err)
- }
- return false
-}
-
-func (pc *persistConn) readLoop() {
- defer close(pc.closech)
- alive := true
- var lastbody io.ReadCloser // last response body, if any, read on this connection
-
- for alive {
- pb, err := pc.br.Peek(1)
-
- pc.lk.Lock()
- if pc.numExpectedResponses == 0 {
- pc.closeLocked()
- pc.lk.Unlock()
- if len(pb) > 0 {
- log.Printf("Unsolicited response received on idle HTTP channel starting with %q; err=%v",
- string(pb), err)
- }
- return
- }
- pc.lk.Unlock()
-
- rc := <-pc.reqch
-
- // Advance past the previous response's body, if the
- // caller hasn't done so.
- if lastbody != nil {
- lastbody.Close() // assumed idempotent
- lastbody = nil
- }
-
- var resp *Response
- if err == nil {
- resp, err = ReadResponse(pc.br, rc.req)
- }
-
- if err != nil {
- pc.close()
- } else {
- hasBody := rc.req.Method != "HEAD" && resp.ContentLength != 0
- if rc.addedGzip && hasBody && resp.Header.Get("Content-Encoding") == "gzip" {
- resp.Header.Del("Content-Encoding")
- resp.Header.Del("Content-Length")
- resp.ContentLength = -1
- gzReader, zerr := gzip.NewReader(resp.Body)
- if zerr != nil {
- pc.close()
- err = zerr
- } else {
- resp.Body = &readFirstCloseBoth{&discardOnCloseReadCloser{gzReader}, resp.Body}
- }
- }
- resp.Body = &bodyEOFSignal{body: resp.Body}
- }
-
- if err != nil || resp.Close || rc.req.Close {
- alive = false
- }
-
- hasBody := resp != nil && rc.req.Method != "HEAD" && resp.ContentLength != 0
- var waitForBodyRead chan bool
- if hasBody {
- lastbody = resp.Body
- waitForBodyRead = make(chan bool, 1)
- resp.Body.(*bodyEOFSignal).fn = func(err error) {
- alive1 := alive
- if err != nil {
- alive1 = false
- }
- if alive1 && !pc.t.putIdleConn(pc) {
- alive1 = false
- }
- if !alive1 || pc.isBroken() {
- pc.close()
- }
- waitForBodyRead <- alive1
- }
- }
-
- if alive && !hasBody {
- // When there's no response body, we immediately
- // reuse the TCP connection (putIdleConn), but
- // we need to prevent ClientConn.Read from
- // closing the Response.Body on the next
- // loop, otherwise it might close the body
- // before the client code has had a chance to
- // read it (even though it'll just be 0, EOF).
- lastbody = nil
-
- if !pc.t.putIdleConn(pc) {
- alive = false
- }
- }
-
- rc.ch <- responseAndError{resp, err}
-
- // Wait for the just-returned response body to be fully consumed
- // before we race and peek on the underlying bufio reader.
- if waitForBodyRead != nil {
- alive = <-waitForBodyRead
- }
-
- if !alive {
- pc.close()
- }
- }
-}
-
-func (pc *persistConn) writeLoop() {
- for {
- select {
- case wr := <-pc.writech:
- if pc.isBroken() {
- wr.ch <- errors.New("http: can't write HTTP request on broken connection")
- continue
- }
- err := wr.req.Request.write(pc.bw, pc.isProxy, wr.req.extra)
- if err == nil {
- err = pc.bw.Flush()
- }
- if err != nil {
- pc.markBroken()
- }
- wr.ch <- err
- case <-pc.closech:
- return
- }
- }
-}
-
-type responseAndError struct {
- res *Response
- err error
-}
-
-type requestAndChan struct {
- req *Request
- ch chan responseAndError
-
- // did the Transport (as opposed to the client code) add an
- // Accept-Encoding gzip header? only if it we set it do
- // we transparently decode the gzip.
- addedGzip bool
-}
-
-// A writeRequest is sent by the readLoop's goroutine to the
-// writeLoop's goroutine to write a request while the read loop
-// concurrently waits on both the write response and the server's
-// reply.
-type writeRequest struct {
- req *transportRequest
- ch chan<- error
-}
-
-func (pc *persistConn) roundTrip(req *transportRequest) (resp *Response, err error) {
- if pc.mutateHeaderFunc != nil {
- pc.mutateHeaderFunc(req.extraHeaders())
- }
-
- // Ask for a compressed version if the caller didn't set their
- // own value for Accept-Encoding. We only attempted to
- // uncompress the gzip stream if we were the layer that
- // requested it.
- requestedGzip := false
- if !pc.t.DisableCompression && req.Header.Get("Accept-Encoding") == "" {
- // Request gzip only, not deflate. Deflate is ambiguous and
- // not as universally supported anyway.
- // See: http://www.gzip.org/zlib/zlib_faq.html#faq38
- requestedGzip = true
- req.extraHeaders().Set("Accept-Encoding", "gzip")
- }
-
- pc.lk.Lock()
- pc.numExpectedResponses++
- pc.lk.Unlock()
-
- // Write the request concurrently with waiting for a response,
- // in case the server decides to reply before reading our full
- // request body.
- writeErrCh := make(chan error, 1)
- pc.writech <- writeRequest{req, writeErrCh}
-
- resc := make(chan responseAndError, 1)
- pc.reqch <- requestAndChan{req.Request, resc, requestedGzip}
-
- var re responseAndError
- var pconnDeadCh = pc.closech
- var failTicker <-chan time.Time
-WaitResponse:
- for {
- select {
- case err := <-writeErrCh:
- if err != nil {
- re = responseAndError{nil, err}
- pc.close()
- break WaitResponse
- }
- case <-pconnDeadCh:
- // The persist connection is dead. This shouldn't
- // usually happen (only with Connection: close responses
- // with no response bodies), but if it does happen it
- // means either a) the remote server hung up on us
- // prematurely, or b) the readLoop sent us a response &
- // closed its closech at roughly the same time, and we
- // selected this case first, in which case a response
- // might still be coming soon.
- //
- // We can't avoid the select race in b) by using a unbuffered
- // resc channel instead, because then goroutines can
- // leak if we exit due to other errors.
- pconnDeadCh = nil // avoid spinning
- failTicker = time.After(100 * time.Millisecond) // arbitrary time to wait for resc
- case <-failTicker:
- re = responseAndError{nil, errors.New("net/http: transport closed before response was received")}
- break WaitResponse
- case re = <-resc:
- break WaitResponse
- }
- }
-
- pc.lk.Lock()
- pc.numExpectedResponses--
- pc.lk.Unlock()
-
- return re.res, re.err
-}
-
-// markBroken marks a connection as broken (so it's not reused).
-// It differs from close in that it doesn't close the underlying
-// connection for use when it's still being read.
-func (pc *persistConn) markBroken() {
- pc.lk.Lock()
- defer pc.lk.Unlock()
- pc.broken = true
-}
-
-func (pc *persistConn) close() {
- pc.lk.Lock()
- defer pc.lk.Unlock()
- pc.closeLocked()
-}
-
-func (pc *persistConn) closeLocked() {
- pc.broken = true
- if !pc.closed {
- pc.conn.Close()
- pc.closed = true
- }
- pc.mutateHeaderFunc = nil
-}
-
-var portMap = map[string]string{
- "http": "80",
- "https": "443",
-}
-
-// canonicalAddr returns url.Host but always with a ":port" suffix
-func canonicalAddr(url *url.URL) string {
- addr := url.Host
- if !hasPort(addr) {
- return addr + ":" + portMap[url.Scheme]
- }
- return addr
-}
-
-// bodyEOFSignal wraps a ReadCloser but runs fn (if non-nil) at most
-// once, right before its final (error-producing) Read or Close call
-// returns.
-type bodyEOFSignal struct {
- body io.ReadCloser
- mu sync.Mutex // guards closed, rerr and fn
- closed bool // whether Close has been called
- rerr error // sticky Read error
- fn func(error) // error will be nil on Read io.EOF
-}
-
-func (es *bodyEOFSignal) Read(p []byte) (n int, err error) {
- es.mu.Lock()
- closed, rerr := es.closed, es.rerr
- es.mu.Unlock()
- if closed {
- return 0, errors.New("http: read on closed response body")
- }
- if rerr != nil {
- return 0, rerr
- }
-
- n, err = es.body.Read(p)
- if err != nil {
- es.mu.Lock()
- defer es.mu.Unlock()
- if es.rerr == nil {
- es.rerr = err
- }
- es.condfn(err)
- }
- return
-}
-
-func (es *bodyEOFSignal) Close() error {
- es.mu.Lock()
- defer es.mu.Unlock()
- if es.closed {
- return nil
- }
- es.closed = true
- err := es.body.Close()
- es.condfn(err)
- return err
-}
-
-// caller must hold es.mu.
-func (es *bodyEOFSignal) condfn(err error) {
- if es.fn == nil {
- return
- }
- if err == io.EOF {
- err = nil
- }
- es.fn(err)
- es.fn = nil
-}
-
-type readFirstCloseBoth struct {
- io.ReadCloser
- io.Closer
-}
-
-func (r *readFirstCloseBoth) Close() error {
- if err := r.ReadCloser.Close(); err != nil {
- r.Closer.Close()
- return err
- }
- if err := r.Closer.Close(); err != nil {
- return err
- }
- return nil
-}
-
-// discardOnCloseReadCloser consumes all its input on Close.
-type discardOnCloseReadCloser struct {
- io.ReadCloser
-}
-
-func (d *discardOnCloseReadCloser) Close() error {
- io.Copy(ioutil.Discard, d.ReadCloser) // ignore errors; likely invalid or already closed
- return d.ReadCloser.Close()
-}
diff --git a/gcc-4.8.1/libgo/go/net/http/transport_test.go b/gcc-4.8.1/libgo/go/net/http/transport_test.go
deleted file mode 100644
index daaecae34..000000000
--- a/gcc-4.8.1/libgo/go/net/http/transport_test.go
+++ /dev/null
@@ -1,1214 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Tests for transport.go
-
-package http_test
-
-import (
- "bytes"
- "compress/gzip"
- "crypto/rand"
- "fmt"
- "io"
- "io/ioutil"
- "net"
- . "net/http"
- "net/http/httptest"
- "net/url"
- "os"
- "runtime"
- "strconv"
- "strings"
- "sync"
- "testing"
- "time"
-)
-
-// TODO: test 5 pipelined requests with responses: 1) OK, 2) OK, Connection: Close
-// and then verify that the final 2 responses get errors back.
-
-// hostPortHandler writes back the client's "host:port".
-var hostPortHandler = HandlerFunc(func(w ResponseWriter, r *Request) {
- if r.FormValue("close") == "true" {
- w.Header().Set("Connection", "close")
- }
- w.Write([]byte(r.RemoteAddr))
-})
-
-// testCloseConn is a net.Conn tracked by a testConnSet.
-type testCloseConn struct {
- net.Conn
- set *testConnSet
-}
-
-func (c *testCloseConn) Close() error {
- c.set.remove(c)
- return c.Conn.Close()
-}
-
-// testConnSet tracks a set of TCP connections and whether they've
-// been closed.
-type testConnSet struct {
- t *testing.T
- closed map[net.Conn]bool
- list []net.Conn // in order created
- mutex sync.Mutex
-}
-
-func (tcs *testConnSet) insert(c net.Conn) {
- tcs.mutex.Lock()
- defer tcs.mutex.Unlock()
- tcs.closed[c] = false
- tcs.list = append(tcs.list, c)
-}
-
-func (tcs *testConnSet) remove(c net.Conn) {
- tcs.mutex.Lock()
- defer tcs.mutex.Unlock()
- tcs.closed[c] = true
-}
-
-// some tests use this to manage raw tcp connections for later inspection
-func makeTestDial(t *testing.T) (*testConnSet, func(n, addr string) (net.Conn, error)) {
- connSet := &testConnSet{
- t: t,
- closed: make(map[net.Conn]bool),
- }
- dial := func(n, addr string) (net.Conn, error) {
- c, err := net.Dial(n, addr)
- if err != nil {
- return nil, err
- }
- tc := &testCloseConn{c, connSet}
- connSet.insert(tc)
- return tc, nil
- }
- return connSet, dial
-}
-
-func (tcs *testConnSet) check(t *testing.T) {
- tcs.mutex.Lock()
- defer tcs.mutex.Unlock()
-
- for i, c := range tcs.list {
- if !tcs.closed[c] {
- t.Errorf("TCP connection #%d, %p (of %d total) was not closed", i+1, c, len(tcs.list))
- }
- }
-}
-
-// Two subsequent requests and verify their response is the same.
-// The response from the server is our own IP:port
-func TestTransportKeepAlives(t *testing.T) {
- ts := httptest.NewServer(hostPortHandler)
- defer ts.Close()
-
- for _, disableKeepAlive := range []bool{false, true} {
- tr := &Transport{DisableKeepAlives: disableKeepAlive}
- c := &Client{Transport: tr}
-
- fetch := func(n int) string {
- res, err := c.Get(ts.URL)
- if err != nil {
- t.Fatalf("error in disableKeepAlive=%v, req #%d, GET: %v", disableKeepAlive, n, err)
- }
- body, err := ioutil.ReadAll(res.Body)
- if err != nil {
- t.Fatalf("error in disableKeepAlive=%v, req #%d, ReadAll: %v", disableKeepAlive, n, err)
- }
- return string(body)
- }
-
- body1 := fetch(1)
- body2 := fetch(2)
-
- bodiesDiffer := body1 != body2
- if bodiesDiffer != disableKeepAlive {
- t.Errorf("error in disableKeepAlive=%v. unexpected bodiesDiffer=%v; body1=%q; body2=%q",
- disableKeepAlive, bodiesDiffer, body1, body2)
- }
- }
-}
-
-func TestTransportConnectionCloseOnResponse(t *testing.T) {
- ts := httptest.NewServer(hostPortHandler)
- defer ts.Close()
-
- connSet, testDial := makeTestDial(t)
-
- for _, connectionClose := range []bool{false, true} {
- tr := &Transport{
- Dial: testDial,
- }
- c := &Client{Transport: tr}
-
- fetch := func(n int) string {
- req := new(Request)
- var err error
- req.URL, err = url.Parse(ts.URL + fmt.Sprintf("/?close=%v", connectionClose))
- if err != nil {
- t.Fatalf("URL parse error: %v", err)
- }
- req.Method = "GET"
- req.Proto = "HTTP/1.1"
- req.ProtoMajor = 1
- req.ProtoMinor = 1
-
- res, err := c.Do(req)
- if err != nil {
- t.Fatalf("error in connectionClose=%v, req #%d, Do: %v", connectionClose, n, err)
- }
- defer res.Body.Close()
- body, err := ioutil.ReadAll(res.Body)
- if err != nil {
- t.Fatalf("error in connectionClose=%v, req #%d, ReadAll: %v", connectionClose, n, err)
- }
- return string(body)
- }
-
- body1 := fetch(1)
- body2 := fetch(2)
- bodiesDiffer := body1 != body2
- if bodiesDiffer != connectionClose {
- t.Errorf("error in connectionClose=%v. unexpected bodiesDiffer=%v; body1=%q; body2=%q",
- connectionClose, bodiesDiffer, body1, body2)
- }
-
- tr.CloseIdleConnections()
- }
-
- connSet.check(t)
-}
-
-func TestTransportConnectionCloseOnRequest(t *testing.T) {
- ts := httptest.NewServer(hostPortHandler)
- defer ts.Close()
-
- connSet, testDial := makeTestDial(t)
-
- for _, connectionClose := range []bool{false, true} {
- tr := &Transport{
- Dial: testDial,
- }
- c := &Client{Transport: tr}
-
- fetch := func(n int) string {
- req := new(Request)
- var err error
- req.URL, err = url.Parse(ts.URL)
- if err != nil {
- t.Fatalf("URL parse error: %v", err)
- }
- req.Method = "GET"
- req.Proto = "HTTP/1.1"
- req.ProtoMajor = 1
- req.ProtoMinor = 1
- req.Close = connectionClose
-
- res, err := c.Do(req)
- if err != nil {
- t.Fatalf("error in connectionClose=%v, req #%d, Do: %v", connectionClose, n, err)
- }
- body, err := ioutil.ReadAll(res.Body)
- if err != nil {
- t.Fatalf("error in connectionClose=%v, req #%d, ReadAll: %v", connectionClose, n, err)
- }
- return string(body)
- }
-
- body1 := fetch(1)
- body2 := fetch(2)
- bodiesDiffer := body1 != body2
- if bodiesDiffer != connectionClose {
- t.Errorf("error in connectionClose=%v. unexpected bodiesDiffer=%v; body1=%q; body2=%q",
- connectionClose, bodiesDiffer, body1, body2)
- }
-
- tr.CloseIdleConnections()
- }
-
- connSet.check(t)
-}
-
-func TestTransportIdleCacheKeys(t *testing.T) {
- ts := httptest.NewServer(hostPortHandler)
- defer ts.Close()
-
- tr := &Transport{DisableKeepAlives: false}
- c := &Client{Transport: tr}
-
- if e, g := 0, len(tr.IdleConnKeysForTesting()); e != g {
- t.Errorf("After CloseIdleConnections expected %d idle conn cache keys; got %d", e, g)
- }
-
- resp, err := c.Get(ts.URL)
- if err != nil {
- t.Error(err)
- }
- ioutil.ReadAll(resp.Body)
-
- keys := tr.IdleConnKeysForTesting()
- if e, g := 1, len(keys); e != g {
- t.Fatalf("After Get expected %d idle conn cache keys; got %d", e, g)
- }
-
- if e := "|http|" + ts.Listener.Addr().String(); keys[0] != e {
- t.Errorf("Expected idle cache key %q; got %q", e, keys[0])
- }
-
- tr.CloseIdleConnections()
- if e, g := 0, len(tr.IdleConnKeysForTesting()); e != g {
- t.Errorf("After CloseIdleConnections expected %d idle conn cache keys; got %d", e, g)
- }
-}
-
-func TestTransportMaxPerHostIdleConns(t *testing.T) {
- resch := make(chan string)
- gotReq := make(chan bool)
- ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
- gotReq <- true
- msg := <-resch
- _, err := w.Write([]byte(msg))
- if err != nil {
- t.Fatalf("Write: %v", err)
- }
- }))
- defer ts.Close()
- maxIdleConns := 2
- tr := &Transport{DisableKeepAlives: false, MaxIdleConnsPerHost: maxIdleConns}
- c := &Client{Transport: tr}
-
- // Start 3 outstanding requests and wait for the server to get them.
- // Their responses will hang until we write to resch, though.
- donech := make(chan bool)
- doReq := func() {
- resp, err := c.Get(ts.URL)
- if err != nil {
- t.Error(err)
- }
- _, err = ioutil.ReadAll(resp.Body)
- if err != nil {
- t.Fatalf("ReadAll: %v", err)
- }
- donech <- true
- }
- go doReq()
- <-gotReq
- go doReq()
- <-gotReq
- go doReq()
- <-gotReq
-
- if e, g := 0, len(tr.IdleConnKeysForTesting()); e != g {
- t.Fatalf("Before writes, expected %d idle conn cache keys; got %d", e, g)
- }
-
- resch <- "res1"
- <-donech
- keys := tr.IdleConnKeysForTesting()
- if e, g := 1, len(keys); e != g {
- t.Fatalf("after first response, expected %d idle conn cache keys; got %d", e, g)
- }
- cacheKey := "|http|" + ts.Listener.Addr().String()
- if keys[0] != cacheKey {
- t.Fatalf("Expected idle cache key %q; got %q", cacheKey, keys[0])
- }
- if e, g := 1, tr.IdleConnCountForTesting(cacheKey); e != g {
- t.Errorf("after first response, expected %d idle conns; got %d", e, g)
- }
-
- resch <- "res2"
- <-donech
- if e, g := 2, tr.IdleConnCountForTesting(cacheKey); e != g {
- t.Errorf("after second response, expected %d idle conns; got %d", e, g)
- }
-
- resch <- "res3"
- <-donech
- if e, g := maxIdleConns, tr.IdleConnCountForTesting(cacheKey); e != g {
- t.Errorf("after third response, still expected %d idle conns; got %d", e, g)
- }
-}
-
-func TestTransportServerClosingUnexpectedly(t *testing.T) {
- ts := httptest.NewServer(hostPortHandler)
- defer ts.Close()
-
- tr := &Transport{}
- c := &Client{Transport: tr}
-
- fetch := func(n, retries int) string {
- condFatalf := func(format string, arg ...interface{}) {
- if retries <= 0 {
- t.Fatalf(format, arg...)
- }
- t.Logf("retrying shortly after expected error: "+format, arg...)
- time.Sleep(time.Second / time.Duration(retries))
- }
- for retries >= 0 {
- retries--
- res, err := c.Get(ts.URL)
- if err != nil {
- condFatalf("error in req #%d, GET: %v", n, err)
- continue
- }
- body, err := ioutil.ReadAll(res.Body)
- if err != nil {
- condFatalf("error in req #%d, ReadAll: %v", n, err)
- continue
- }
- res.Body.Close()
- return string(body)
- }
- panic("unreachable")
- }
-
- body1 := fetch(1, 0)
- body2 := fetch(2, 0)
-
- ts.CloseClientConnections() // surprise!
-
- // This test has an expected race. Sleeping for 25 ms prevents
- // it on most fast machines, causing the next fetch() call to
- // succeed quickly. But if we do get errors, fetch() will retry 5
- // times with some delays between.
- time.Sleep(25 * time.Millisecond)
-
- body3 := fetch(3, 5)
-
- if body1 != body2 {
- t.Errorf("expected body1 and body2 to be equal")
- }
- if body2 == body3 {
- t.Errorf("expected body2 and body3 to be different")
- }
-}
-
-// Test for http://golang.org/issue/2616 (appropriate issue number)
-// This fails pretty reliably with GOMAXPROCS=100 or something high.
-func TestStressSurpriseServerCloses(t *testing.T) {
- if testing.Short() {
- t.Skip("skipping test in short mode")
- }
- ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
- w.Header().Set("Content-Length", "5")
- w.Header().Set("Content-Type", "text/plain")
- w.Write([]byte("Hello"))
- w.(Flusher).Flush()
- conn, buf, _ := w.(Hijacker).Hijack()
- buf.Flush()
- conn.Close()
- }))
- defer ts.Close()
-
- tr := &Transport{DisableKeepAlives: false}
- c := &Client{Transport: tr}
-
- // Do a bunch of traffic from different goroutines. Send to activityc
- // after each request completes, regardless of whether it failed.
- const (
- numClients = 50
- reqsPerClient = 250
- )
- activityc := make(chan bool)
- for i := 0; i < numClients; i++ {
- go func() {
- for i := 0; i < reqsPerClient; i++ {
- res, err := c.Get(ts.URL)
- if err == nil {
- // We expect errors since the server is
- // hanging up on us after telling us to
- // send more requests, so we don't
- // actually care what the error is.
- // But we want to close the body in cases
- // where we won the race.
- res.Body.Close()
- }
- activityc <- true
- }
- }()
- }
-
- // Make sure all the request come back, one way or another.
- for i := 0; i < numClients*reqsPerClient; i++ {
- select {
- case <-activityc:
- case <-time.After(5 * time.Second):
- t.Fatalf("presumed deadlock; no HTTP client activity seen in awhile")
- }
- }
-}
-
-// TestTransportHeadResponses verifies that we deal with Content-Lengths
-// with no bodies properly
-func TestTransportHeadResponses(t *testing.T) {
- ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
- if r.Method != "HEAD" {
- panic("expected HEAD; got " + r.Method)
- }
- w.Header().Set("Content-Length", "123")
- w.WriteHeader(200)
- }))
- defer ts.Close()
-
- tr := &Transport{DisableKeepAlives: false}
- c := &Client{Transport: tr}
- for i := 0; i < 2; i++ {
- res, err := c.Head(ts.URL)
- if err != nil {
- t.Errorf("error on loop %d: %v", i, err)
- }
- if e, g := "123", res.Header.Get("Content-Length"); e != g {
- t.Errorf("loop %d: expected Content-Length header of %q, got %q", i, e, g)
- }
- if e, g := int64(123), res.ContentLength; e != g {
- t.Errorf("loop %d: expected res.ContentLength of %v, got %v", i, e, g)
- }
- }
-}
-
-// TestTransportHeadChunkedResponse verifies that we ignore chunked transfer-encoding
-// on responses to HEAD requests.
-func TestTransportHeadChunkedResponse(t *testing.T) {
- ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
- if r.Method != "HEAD" {
- panic("expected HEAD; got " + r.Method)
- }
- w.Header().Set("Transfer-Encoding", "chunked") // client should ignore
- w.Header().Set("x-client-ipport", r.RemoteAddr)
- w.WriteHeader(200)
- }))
- defer ts.Close()
-
- tr := &Transport{DisableKeepAlives: false}
- c := &Client{Transport: tr}
-
- res1, err := c.Head(ts.URL)
- if err != nil {
- t.Fatalf("request 1 error: %v", err)
- }
- res2, err := c.Head(ts.URL)
- if err != nil {
- t.Fatalf("request 2 error: %v", err)
- }
- if v1, v2 := res1.Header.Get("x-client-ipport"), res2.Header.Get("x-client-ipport"); v1 != v2 {
- t.Errorf("ip/ports differed between head requests: %q vs %q", v1, v2)
- }
-}
-
-var roundTripTests = []struct {
- accept string
- expectAccept string
- compressed bool
-}{
- // Requests with no accept-encoding header use transparent compression
- {"", "gzip", false},
- // Requests with other accept-encoding should pass through unmodified
- {"foo", "foo", false},
- // Requests with accept-encoding == gzip should be passed through
- {"gzip", "gzip", true},
-}
-
-// Test that the modification made to the Request by the RoundTripper is cleaned up
-func TestRoundTripGzip(t *testing.T) {
- const responseBody = "test response body"
- ts := httptest.NewServer(HandlerFunc(func(rw ResponseWriter, req *Request) {
- accept := req.Header.Get("Accept-Encoding")
- if expect := req.FormValue("expect_accept"); accept != expect {
- t.Errorf("in handler, test %v: Accept-Encoding = %q, want %q",
- req.FormValue("testnum"), accept, expect)
- }
- if accept == "gzip" {
- rw.Header().Set("Content-Encoding", "gzip")
- gz := gzip.NewWriter(rw)
- gz.Write([]byte(responseBody))
- gz.Close()
- } else {
- rw.Header().Set("Content-Encoding", accept)
- rw.Write([]byte(responseBody))
- }
- }))
- defer ts.Close()
-
- for i, test := range roundTripTests {
- // Test basic request (no accept-encoding)
- req, _ := NewRequest("GET", fmt.Sprintf("%s/?testnum=%d&expect_accept=%s", ts.URL, i, test.expectAccept), nil)
- if test.accept != "" {
- req.Header.Set("Accept-Encoding", test.accept)
- }
- res, err := DefaultTransport.RoundTrip(req)
- var body []byte
- if test.compressed {
- gzip, err := gzip.NewReader(res.Body)
- if err != nil {
- t.Errorf("%d. gzip NewReader: %v", i, err)
- continue
- }
- body, err = ioutil.ReadAll(gzip)
- res.Body.Close()
- } else {
- body, err = ioutil.ReadAll(res.Body)
- }
- if err != nil {
- t.Errorf("%d. Error: %q", i, err)
- continue
- }
- if g, e := string(body), responseBody; g != e {
- t.Errorf("%d. body = %q; want %q", i, g, e)
- }
- if g, e := req.Header.Get("Accept-Encoding"), test.accept; g != e {
- t.Errorf("%d. Accept-Encoding = %q; want %q (it was mutated, in violation of RoundTrip contract)", i, g, e)
- }
- if g, e := res.Header.Get("Content-Encoding"), test.accept; g != e {
- t.Errorf("%d. Content-Encoding = %q; want %q", i, g, e)
- }
- }
-
-}
-
-func TestTransportGzip(t *testing.T) {
- const testString = "The test string aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
- const nRandBytes = 1024 * 1024
- ts := httptest.NewServer(HandlerFunc(func(rw ResponseWriter, req *Request) {
- if g, e := req.Header.Get("Accept-Encoding"), "gzip"; g != e {
- t.Errorf("Accept-Encoding = %q, want %q", g, e)
- }
- rw.Header().Set("Content-Encoding", "gzip")
- if req.Method == "HEAD" {
- return
- }
-
- var w io.Writer = rw
- var buf bytes.Buffer
- if req.FormValue("chunked") == "0" {
- w = &buf
- defer io.Copy(rw, &buf)
- defer func() {
- rw.Header().Set("Content-Length", strconv.Itoa(buf.Len()))
- }()
- }
- gz := gzip.NewWriter(w)
- gz.Write([]byte(testString))
- if req.FormValue("body") == "large" {
- io.CopyN(gz, rand.Reader, nRandBytes)
- }
- gz.Close()
- }))
- defer ts.Close()
-
- for _, chunked := range []string{"1", "0"} {
- c := &Client{Transport: &Transport{}}
-
- // First fetch something large, but only read some of it.
- res, err := c.Get(ts.URL + "/?body=large&chunked=" + chunked)
- if err != nil {
- t.Fatalf("large get: %v", err)
- }
- buf := make([]byte, len(testString))
- n, err := io.ReadFull(res.Body, buf)
- if err != nil {
- t.Fatalf("partial read of large response: size=%d, %v", n, err)
- }
- if e, g := testString, string(buf); e != g {
- t.Errorf("partial read got %q, expected %q", g, e)
- }
- res.Body.Close()
- // Read on the body, even though it's closed
- n, err = res.Body.Read(buf)
- if n != 0 || err == nil {
- t.Errorf("expected error post-closed large Read; got = %d, %v", n, err)
- }
-
- // Then something small.
- res, err = c.Get(ts.URL + "/?chunked=" + chunked)
- if err != nil {
- t.Fatal(err)
- }
- body, err := ioutil.ReadAll(res.Body)
- if err != nil {
- t.Fatal(err)
- }
- if g, e := string(body), testString; g != e {
- t.Fatalf("body = %q; want %q", g, e)
- }
- if g, e := res.Header.Get("Content-Encoding"), ""; g != e {
- t.Fatalf("Content-Encoding = %q; want %q", g, e)
- }
-
- // Read on the body after it's been fully read:
- n, err = res.Body.Read(buf)
- if n != 0 || err == nil {
- t.Errorf("expected Read error after exhausted reads; got %d, %v", n, err)
- }
- res.Body.Close()
- n, err = res.Body.Read(buf)
- if n != 0 || err == nil {
- t.Errorf("expected Read error after Close; got %d, %v", n, err)
- }
- }
-
- // And a HEAD request too, because they're always weird.
- c := &Client{Transport: &Transport{}}
- res, err := c.Head(ts.URL)
- if err != nil {
- t.Fatalf("Head: %v", err)
- }
- if res.StatusCode != 200 {
- t.Errorf("Head status=%d; want=200", res.StatusCode)
- }
-}
-
-func TestTransportProxy(t *testing.T) {
- ch := make(chan string, 1)
- ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
- ch <- "real server"
- }))
- defer ts.Close()
- proxy := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
- ch <- "proxy for " + r.URL.String()
- }))
- defer proxy.Close()
-
- pu, err := url.Parse(proxy.URL)
- if err != nil {
- t.Fatal(err)
- }
- c := &Client{Transport: &Transport{Proxy: ProxyURL(pu)}}
- c.Head(ts.URL)
- got := <-ch
- want := "proxy for " + ts.URL + "/"
- if got != want {
- t.Errorf("want %q, got %q", want, got)
- }
-}
-
-// TestTransportGzipRecursive sends a gzip quine and checks that the
-// client gets the same value back. This is more cute than anything,
-// but checks that we don't recurse forever, and checks that
-// Content-Encoding is removed.
-func TestTransportGzipRecursive(t *testing.T) {
- ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
- w.Header().Set("Content-Encoding", "gzip")
- w.Write(rgz)
- }))
- defer ts.Close()
-
- c := &Client{Transport: &Transport{}}
- res, err := c.Get(ts.URL)
- if err != nil {
- t.Fatal(err)
- }
- body, err := ioutil.ReadAll(res.Body)
- if err != nil {
- t.Fatal(err)
- }
- if !bytes.Equal(body, rgz) {
- t.Fatalf("Incorrect result from recursive gz:\nhave=%x\nwant=%x",
- body, rgz)
- }
- if g, e := res.Header.Get("Content-Encoding"), ""; g != e {
- t.Fatalf("Content-Encoding = %q; want %q", g, e)
- }
-}
-
-// tests that persistent goroutine connections shut down when no longer desired.
-func TestTransportPersistConnLeak(t *testing.T) {
- gotReqCh := make(chan bool)
- unblockCh := make(chan bool)
- ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
- gotReqCh <- true
- <-unblockCh
- w.Header().Set("Content-Length", "0")
- w.WriteHeader(204)
- }))
- defer ts.Close()
-
- tr := &Transport{}
- c := &Client{Transport: tr}
-
- n0 := runtime.NumGoroutine()
-
- const numReq = 25
- didReqCh := make(chan bool)
- for i := 0; i < numReq; i++ {
- go func() {
- res, err := c.Get(ts.URL)
- didReqCh <- true
- if err != nil {
- t.Errorf("client fetch error: %v", err)
- return
- }
- res.Body.Close()
- }()
- }
-
- // Wait for all goroutines to be stuck in the Handler.
- for i := 0; i < numReq; i++ {
- <-gotReqCh
- }
-
- nhigh := runtime.NumGoroutine()
-
- // Tell all handlers to unblock and reply.
- for i := 0; i < numReq; i++ {
- unblockCh <- true
- }
-
- // Wait for all HTTP clients to be done.
- for i := 0; i < numReq; i++ {
- <-didReqCh
- }
-
- tr.CloseIdleConnections()
- time.Sleep(100 * time.Millisecond)
- runtime.GC()
- runtime.GC() // even more.
- nfinal := runtime.NumGoroutine()
-
- growth := nfinal - n0
-
- // We expect 0 or 1 extra goroutine, empirically. Allow up to 5.
- // Previously we were leaking one per numReq.
- t.Logf("goroutine growth: %d -> %d -> %d (delta: %d)", n0, nhigh, nfinal, growth)
- if int(growth) > 5 {
- t.Error("too many new goroutines")
- }
-}
-
-// golang.org/issue/4531: Transport leaks goroutines when
-// request.ContentLength is explicitly short
-func TestTransportPersistConnLeakShortBody(t *testing.T) {
- ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
- }))
- defer ts.Close()
-
- tr := &Transport{}
- c := &Client{Transport: tr}
-
- n0 := runtime.NumGoroutine()
- body := []byte("Hello")
- for i := 0; i < 20; i++ {
- req, err := NewRequest("POST", ts.URL, bytes.NewReader(body))
- if err != nil {
- t.Fatal(err)
- }
- req.ContentLength = int64(len(body) - 2) // explicitly short
- _, err = c.Do(req)
- if err == nil {
- t.Fatal("Expect an error from writing too long of a body.")
- }
- }
- nhigh := runtime.NumGoroutine()
- tr.CloseIdleConnections()
- time.Sleep(50 * time.Millisecond)
- runtime.GC()
- nfinal := runtime.NumGoroutine()
-
- growth := nfinal - n0
-
- // We expect 0 or 1 extra goroutine, empirically. Allow up to 5.
- // Previously we were leaking one per numReq.
- t.Logf("goroutine growth: %d -> %d -> %d (delta: %d)", n0, nhigh, nfinal, growth)
- if int(growth) > 5 {
- t.Error("too many new goroutines")
- }
-}
-
-// This used to crash; http://golang.org/issue/3266
-func TestTransportIdleConnCrash(t *testing.T) {
- tr := &Transport{}
- c := &Client{Transport: tr}
-
- unblockCh := make(chan bool, 1)
- ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
- <-unblockCh
- tr.CloseIdleConnections()
- }))
- defer ts.Close()
-
- didreq := make(chan bool)
- go func() {
- res, err := c.Get(ts.URL)
- if err != nil {
- t.Error(err)
- } else {
- res.Body.Close() // returns idle conn
- }
- didreq <- true
- }()
- unblockCh <- true
- <-didreq
-}
-
-// Test that the transport doesn't close the TCP connection early,
-// before the response body has been read. This was a regression
-// which sadly lacked a triggering test. The large response body made
-// the old race easier to trigger.
-func TestIssue3644(t *testing.T) {
- const numFoos = 5000
- ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
- w.Header().Set("Connection", "close")
- for i := 0; i < numFoos; i++ {
- w.Write([]byte("foo "))
- }
- }))
- defer ts.Close()
- tr := &Transport{}
- c := &Client{Transport: tr}
- res, err := c.Get(ts.URL)
- if err != nil {
- t.Fatal(err)
- }
- defer res.Body.Close()
- bs, err := ioutil.ReadAll(res.Body)
- if err != nil {
- t.Fatal(err)
- }
- if len(bs) != numFoos*len("foo ") {
- t.Errorf("unexpected response length")
- }
-}
-
-// Test that a client receives a server's reply, even if the server doesn't read
-// the entire request body.
-func TestIssue3595(t *testing.T) {
- const deniedMsg = "sorry, denied."
- ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
- Error(w, deniedMsg, StatusUnauthorized)
- }))
- defer ts.Close()
- tr := &Transport{}
- c := &Client{Transport: tr}
- res, err := c.Post(ts.URL, "application/octet-stream", neverEnding('a'))
- if err != nil {
- t.Errorf("Post: %v", err)
- return
- }
- got, err := ioutil.ReadAll(res.Body)
- if err != nil {
- t.Fatalf("Body ReadAll: %v", err)
- }
- if !strings.Contains(string(got), deniedMsg) {
- t.Errorf("Known bug: response %q does not contain %q", got, deniedMsg)
- }
-}
-
-// From http://golang.org/issue/4454 ,
-// "client fails to handle requests with no body and chunked encoding"
-func TestChunkedNoContent(t *testing.T) {
- ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
- w.WriteHeader(StatusNoContent)
- }))
- defer ts.Close()
-
- for _, closeBody := range []bool{true, false} {
- c := &Client{Transport: &Transport{}}
- const n = 4
- for i := 1; i <= n; i++ {
- res, err := c.Get(ts.URL)
- if err != nil {
- t.Errorf("closingBody=%v, req %d/%d: %v", closeBody, i, n, err)
- } else {
- if closeBody {
- res.Body.Close()
- }
- }
- }
- }
-}
-
-func TestTransportConcurrency(t *testing.T) {
- const maxProcs = 16
- const numReqs = 500
- defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(maxProcs))
- ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
- fmt.Fprintf(w, "%v", r.FormValue("echo"))
- }))
- defer ts.Close()
- tr := &Transport{}
- c := &Client{Transport: tr}
- reqs := make(chan string)
- defer close(reqs)
-
- var wg sync.WaitGroup
- wg.Add(numReqs)
- for i := 0; i < maxProcs*2; i++ {
- go func() {
- for req := range reqs {
- res, err := c.Get(ts.URL + "/?echo=" + req)
- if err != nil {
- t.Errorf("error on req %s: %v", req, err)
- wg.Done()
- continue
- }
- all, err := ioutil.ReadAll(res.Body)
- if err != nil {
- t.Errorf("read error on req %s: %v", req, err)
- wg.Done()
- continue
- }
- if string(all) != req {
- t.Errorf("body of req %s = %q; want %q", req, all, req)
- }
- wg.Done()
- res.Body.Close()
- }
- }()
- }
- for i := 0; i < numReqs; i++ {
- reqs <- fmt.Sprintf("request-%d", i)
- }
- wg.Wait()
-}
-
-func TestIssue4191_InfiniteGetTimeout(t *testing.T) {
- const debug = false
- mux := NewServeMux()
- mux.HandleFunc("/get", func(w ResponseWriter, r *Request) {
- io.Copy(w, neverEnding('a'))
- })
- ts := httptest.NewServer(mux)
-
- client := &Client{
- Transport: &Transport{
- Dial: func(n, addr string) (net.Conn, error) {
- conn, err := net.Dial(n, addr)
- if err != nil {
- return nil, err
- }
- conn.SetDeadline(time.Now().Add(100 * time.Millisecond))
- if debug {
- conn = NewLoggingConn("client", conn)
- }
- return conn, nil
- },
- DisableKeepAlives: true,
- },
- }
-
- nRuns := 5
- if testing.Short() {
- nRuns = 1
- }
- for i := 0; i < nRuns; i++ {
- if debug {
- println("run", i+1, "of", nRuns)
- }
- sres, err := client.Get(ts.URL + "/get")
- if err != nil {
- t.Errorf("Error issuing GET: %v", err)
- break
- }
- _, err = io.Copy(ioutil.Discard, sres.Body)
- if err == nil {
- t.Errorf("Unexpected successful copy")
- break
- }
- }
- if debug {
- println("tests complete; waiting for handlers to finish")
- }
- ts.Close()
-}
-
-func TestIssue4191_InfiniteGetToPutTimeout(t *testing.T) {
- const debug = false
- mux := NewServeMux()
- mux.HandleFunc("/get", func(w ResponseWriter, r *Request) {
- io.Copy(w, neverEnding('a'))
- })
- mux.HandleFunc("/put", func(w ResponseWriter, r *Request) {
- defer r.Body.Close()
- io.Copy(ioutil.Discard, r.Body)
- })
- ts := httptest.NewServer(mux)
-
- client := &Client{
- Transport: &Transport{
- Dial: func(n, addr string) (net.Conn, error) {
- conn, err := net.Dial(n, addr)
- if err != nil {
- return nil, err
- }
- conn.SetDeadline(time.Now().Add(100 * time.Millisecond))
- if debug {
- conn = NewLoggingConn("client", conn)
- }
- return conn, nil
- },
- DisableKeepAlives: true,
- },
- }
-
- nRuns := 5
- if testing.Short() {
- nRuns = 1
- }
- for i := 0; i < nRuns; i++ {
- if debug {
- println("run", i+1, "of", nRuns)
- }
- sres, err := client.Get(ts.URL + "/get")
- if err != nil {
- t.Errorf("Error issuing GET: %v", err)
- break
- }
- req, _ := NewRequest("PUT", ts.URL+"/put", sres.Body)
- _, err = client.Do(req)
- if err == nil {
- sres.Body.Close()
- t.Errorf("Unexpected successful PUT")
- break
- }
- sres.Body.Close()
- }
- if debug {
- println("tests complete; waiting for handlers to finish")
- }
- ts.Close()
-}
-
-type fooProto struct{}
-
-func (fooProto) RoundTrip(req *Request) (*Response, error) {
- res := &Response{
- Status: "200 OK",
- StatusCode: 200,
- Header: make(Header),
- Body: ioutil.NopCloser(strings.NewReader("You wanted " + req.URL.String())),
- }
- return res, nil
-}
-
-func TestTransportAltProto(t *testing.T) {
- tr := &Transport{}
- c := &Client{Transport: tr}
- tr.RegisterProtocol("foo", fooProto{})
- res, err := c.Get("foo://bar.com/path")
- if err != nil {
- t.Fatal(err)
- }
- bodyb, err := ioutil.ReadAll(res.Body)
- if err != nil {
- t.Fatal(err)
- }
- body := string(bodyb)
- if e := "You wanted foo://bar.com/path"; body != e {
- t.Errorf("got response %q, want %q", body, e)
- }
-}
-
-func TestTransportNoHost(t *testing.T) {
- tr := &Transport{}
- _, err := tr.RoundTrip(&Request{
- Header: make(Header),
- URL: &url.URL{
- Scheme: "http",
- },
- })
- want := "http: no Host in request URL"
- if got := fmt.Sprint(err); got != want {
- t.Errorf("error = %v; want %q", err, want)
- }
-}
-
-type proxyFromEnvTest struct {
- req string // URL to fetch; blank means "http://example.com"
- env string
- noenv string
- want string
- wanterr error
-}
-
-func (t proxyFromEnvTest) String() string {
- var buf bytes.Buffer
- if t.env != "" {
- fmt.Fprintf(&buf, "http_proxy=%q", t.env)
- }
- if t.noenv != "" {
- fmt.Fprintf(&buf, " no_proxy=%q", t.noenv)
- }
- req := "http://example.com"
- if t.req != "" {
- req = t.req
- }
- fmt.Fprintf(&buf, " req=%q", req)
- return strings.TrimSpace(buf.String())
-}
-
-var proxyFromEnvTests = []proxyFromEnvTest{
- {env: "127.0.0.1:8080", want: "http://127.0.0.1:8080"},
- {env: "cache.corp.example.com:1234", want: "http://cache.corp.example.com:1234"},
- {env: "cache.corp.example.com", want: "http://cache.corp.example.com"},
- {env: "https://cache.corp.example.com", want: "https://cache.corp.example.com"},
- {env: "http://127.0.0.1:8080", want: "http://127.0.0.1:8080"},
- {env: "https://127.0.0.1:8080", want: "https://127.0.0.1:8080"},
- {want: "<nil>"},
- {noenv: "example.com", req: "http://example.com/", env: "proxy", want: "<nil>"},
- {noenv: ".example.com", req: "http://example.com/", env: "proxy", want: "<nil>"},
- {noenv: "ample.com", req: "http://example.com/", env: "proxy", want: "http://proxy"},
- {noenv: "example.com", req: "http://foo.example.com/", env: "proxy", want: "<nil>"},
- {noenv: ".foo.com", req: "http://example.com/", env: "proxy", want: "http://proxy"},
-}
-
-func TestProxyFromEnvironment(t *testing.T) {
- os.Setenv("HTTP_PROXY", "")
- os.Setenv("http_proxy", "")
- os.Setenv("NO_PROXY", "")
- os.Setenv("no_proxy", "")
- for _, tt := range proxyFromEnvTests {
- os.Setenv("HTTP_PROXY", tt.env)
- os.Setenv("NO_PROXY", tt.noenv)
- reqURL := tt.req
- if reqURL == "" {
- reqURL = "http://example.com"
- }
- req, _ := NewRequest("GET", reqURL, nil)
- url, err := ProxyFromEnvironment(req)
- if g, e := fmt.Sprintf("%v", err), fmt.Sprintf("%v", tt.wanterr); g != e {
- t.Errorf("%v: got error = %q, want %q", tt, g, e)
- continue
- }
- if got := fmt.Sprintf("%s", url); got != tt.want {
- t.Errorf("%v: got URL = %q, want %q", tt, url, tt.want)
- }
- }
-}
-
-// rgz is a gzip quine that uncompresses to itself.
-var rgz = []byte{
- 0x1f, 0x8b, 0x08, 0x08, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x72, 0x65, 0x63, 0x75, 0x72, 0x73,
- 0x69, 0x76, 0x65, 0x00, 0x92, 0xef, 0xe6, 0xe0,
- 0x60, 0x00, 0x83, 0xa2, 0xd4, 0xe4, 0xd2, 0xa2,
- 0xe2, 0xcc, 0xb2, 0x54, 0x06, 0x00, 0x00, 0x17,
- 0x00, 0xe8, 0xff, 0x92, 0xef, 0xe6, 0xe0, 0x60,
- 0x00, 0x83, 0xa2, 0xd4, 0xe4, 0xd2, 0xa2, 0xe2,
- 0xcc, 0xb2, 0x54, 0x06, 0x00, 0x00, 0x17, 0x00,
- 0xe8, 0xff, 0x42, 0x12, 0x46, 0x16, 0x06, 0x00,
- 0x05, 0x00, 0xfa, 0xff, 0x42, 0x12, 0x46, 0x16,
- 0x06, 0x00, 0x05, 0x00, 0xfa, 0xff, 0x00, 0x05,
- 0x00, 0xfa, 0xff, 0x00, 0x14, 0x00, 0xeb, 0xff,
- 0x42, 0x12, 0x46, 0x16, 0x06, 0x00, 0x05, 0x00,
- 0xfa, 0xff, 0x00, 0x05, 0x00, 0xfa, 0xff, 0x00,
- 0x14, 0x00, 0xeb, 0xff, 0x42, 0x88, 0x21, 0xc4,
- 0x00, 0x00, 0x14, 0x00, 0xeb, 0xff, 0x42, 0x88,
- 0x21, 0xc4, 0x00, 0x00, 0x14, 0x00, 0xeb, 0xff,
- 0x42, 0x88, 0x21, 0xc4, 0x00, 0x00, 0x14, 0x00,
- 0xeb, 0xff, 0x42, 0x88, 0x21, 0xc4, 0x00, 0x00,
- 0x14, 0x00, 0xeb, 0xff, 0x42, 0x88, 0x21, 0xc4,
- 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x00, 0x00,
- 0x00, 0xff, 0xff, 0x00, 0x17, 0x00, 0xe8, 0xff,
- 0x42, 0x88, 0x21, 0xc4, 0x00, 0x00, 0x00, 0x00,
- 0xff, 0xff, 0x00, 0x00, 0x00, 0xff, 0xff, 0x00,
- 0x17, 0x00, 0xe8, 0xff, 0x42, 0x12, 0x46, 0x16,
- 0x06, 0x00, 0x00, 0x00, 0xff, 0xff, 0x01, 0x08,
- 0x00, 0xf7, 0xff, 0x3d, 0xb1, 0x20, 0x85, 0xfa,
- 0x00, 0x00, 0x00, 0x42, 0x12, 0x46, 0x16, 0x06,
- 0x00, 0x00, 0x00, 0xff, 0xff, 0x01, 0x08, 0x00,
- 0xf7, 0xff, 0x3d, 0xb1, 0x20, 0x85, 0xfa, 0x00,
- 0x00, 0x00, 0x3d, 0xb1, 0x20, 0x85, 0xfa, 0x00,
- 0x00, 0x00,
-}
diff --git a/gcc-4.8.1/libgo/go/net/http/triv.go b/gcc-4.8.1/libgo/go/net/http/triv.go
deleted file mode 100644
index 232d65089..000000000
--- a/gcc-4.8.1/libgo/go/net/http/triv.go
+++ /dev/null
@@ -1,141 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build ignore
-
-package main
-
-import (
- "bytes"
- "expvar"
- "flag"
- "fmt"
- "io"
- "log"
- "net/http"
- "os"
- "os/exec"
- "strconv"
- "sync"
-)
-
-// hello world, the web server
-var helloRequests = expvar.NewInt("hello-requests")
-
-func HelloServer(w http.ResponseWriter, req *http.Request) {
- helloRequests.Add(1)
- io.WriteString(w, "hello, world!\n")
-}
-
-// Simple counter server. POSTing to it will set the value.
-type Counter struct {
- mu sync.Mutex // protects n
- n int
-}
-
-// This makes Counter satisfy the expvar.Var interface, so we can export
-// it directly.
-func (ctr *Counter) String() string {
- ctr.mu.Lock()
- defer ctr.mu.Unlock()
- return fmt.Sprintf("%d", ctr.n)
-}
-
-func (ctr *Counter) ServeHTTP(w http.ResponseWriter, req *http.Request) {
- ctr.mu.Lock()
- defer ctr.mu.Unlock()
- switch req.Method {
- case "GET":
- ctr.n++
- case "POST":
- buf := new(bytes.Buffer)
- io.Copy(buf, req.Body)
- body := buf.String()
- if n, err := strconv.Atoi(body); err != nil {
- fmt.Fprintf(w, "bad POST: %v\nbody: [%v]\n", err, body)
- } else {
- ctr.n = n
- fmt.Fprint(w, "counter reset\n")
- }
- }
- fmt.Fprintf(w, "counter = %d\n", ctr.n)
-}
-
-// simple flag server
-var booleanflag = flag.Bool("boolean", true, "another flag for testing")
-
-func FlagServer(w http.ResponseWriter, req *http.Request) {
- w.Header().Set("Content-Type", "text/plain; charset=utf-8")
- fmt.Fprint(w, "Flags:\n")
- flag.VisitAll(func(f *flag.Flag) {
- if f.Value.String() != f.DefValue {
- fmt.Fprintf(w, "%s = %s [default = %s]\n", f.Name, f.Value.String(), f.DefValue)
- } else {
- fmt.Fprintf(w, "%s = %s\n", f.Name, f.Value.String())
- }
- })
-}
-
-// simple argument server
-func ArgServer(w http.ResponseWriter, req *http.Request) {
- for _, s := range os.Args {
- fmt.Fprint(w, s, " ")
- }
-}
-
-// a channel (just for the fun of it)
-type Chan chan int
-
-func ChanCreate() Chan {
- c := make(Chan)
- go func(c Chan) {
- for x := 0; ; x++ {
- c <- x
- }
- }(c)
- return c
-}
-
-func (ch Chan) ServeHTTP(w http.ResponseWriter, req *http.Request) {
- io.WriteString(w, fmt.Sprintf("channel send #%d\n", <-ch))
-}
-
-// exec a program, redirecting output
-func DateServer(rw http.ResponseWriter, req *http.Request) {
- rw.Header().Set("Content-Type", "text/plain; charset=utf-8")
-
- date, err := exec.Command("/bin/date").Output()
- if err != nil {
- http.Error(rw, err.Error(), 500)
- return
- }
- rw.Write(date)
-}
-
-func Logger(w http.ResponseWriter, req *http.Request) {
- log.Print(req.URL)
- http.Error(w, "oops", 404)
-}
-
-var webroot = flag.String("root", os.Getenv("HOME"), "web root directory")
-
-func main() {
- flag.Parse()
-
- // The counter is published as a variable directly.
- ctr := new(Counter)
- expvar.Publish("counter", ctr)
- http.Handle("/counter", ctr)
- http.Handle("/", http.HandlerFunc(Logger))
- http.Handle("/go/", http.StripPrefix("/go/", http.FileServer(http.Dir(*webroot))))
- http.Handle("/chan", ChanCreate())
- http.HandleFunc("/flags", FlagServer)
- http.HandleFunc("/args", ArgServer)
- http.HandleFunc("/go/hello", HelloServer)
- http.HandleFunc("/date", DateServer)
- err := http.ListenAndServe(":12345", nil)
- if err != nil {
- log.Panicln("ListenAndServe:", err)
- }
-}