| package main |
| |
| var src = ` |
| // Code generated by golang.org/x/tools/cmd/bundle command: |
| // $ bundle net/http http |
| |
| // Package http provides HTTP client and server implementations. |
| // |
| // Get, Head, Post, and PostForm make HTTP (or HTTPS) requests: |
| // |
| // resp, err := http.Get("http://example.com/") |
| // ... |
| // resp, err := http.Post("http://example.com/upload", "image/jpeg", &buf) |
| // ... |
| // resp, err := http.PostForm("http://example.com/form", |
| // url.Values{"key": {"Value"}, "id": {"123"}}) |
| // |
| // The client must close the response body when finished with it: |
| // |
| // resp, err := http.Get("http://example.com/") |
| // if err != nil { |
| // // handle error |
| // } |
| // defer resp.Body.Close() |
| // body, err := io.ReadAll(resp.Body) |
| // // ... |
| // |
| // For control over HTTP client headers, redirect policy, and other |
| // settings, create a Client: |
| // |
| // client := &http.Client{ |
| // CheckRedirect: redirectPolicyFunc, |
| // } |
| // |
| // resp, err := client.Get("http://example.com") |
| // // ... |
| // |
| // req, err := http.NewRequest("GET", "http://example.com", nil) |
| // // ... |
| // req.Header.Add("If-None-Match", ` + "`" + `W/"wyzzy"` + "`" + `) |
| // resp, err := client.Do(req) |
| // // ... |
| // |
| // For control over proxies, TLS configuration, keep-alives, |
| // compression, and other settings, create a Transport: |
| // |
| // tr := &http.Transport{ |
| // TLSClientConfig: &tls.Config{RootCAs: pool}, |
| // DisableCompression: true, |
| // } |
| // client := &http.Client{Transport: tr} |
| // resp, err := client.Get("https://example.com") |
| // |
| // Clients and Transports are safe for concurrent use by multiple |
| // goroutines and for efficiency should only be created once and re-used. |
| // |
| // ListenAndServe starts an HTTP server with a given address and handler. |
| // The handler is usually nil, which means to use DefaultServeMux. |
| // Handle and HandleFunc add handlers to DefaultServeMux: |
| // |
| // http.Handle("/foo", fooHandler) |
| // |
| // http.HandleFunc("/bar", func(w http.ResponseWriter, r *http.Request) { |
| // fmt.Fprintf(w, "Hello, %q", html.EscapeString(r.URL.Path)) |
| // }) |
| // |
| // log.Fatal(http.ListenAndServe(":8080", nil)) |
| // |
| // More control over the server's behavior is available by creating a |
| // custom Server: |
| // |
| // s := &http.Server{ |
| // Addr: ":8080", |
| // Handler: myHandler, |
| // ReadTimeout: 10 * time.Second, |
| // WriteTimeout: 10 * time.Second, |
| // MaxHeaderBytes: 1 << 20, |
| // } |
| // log.Fatal(s.ListenAndServe()) |
| // |
| package http |
| |
| import ( |
| "bufio" |
| "bytes" |
| "compress/gzip" |
| "crypto/tls" |
| "encoding/base64" |
| "encoding/binary" |
| "errors" |
| "fmt" |
| "io" |
| "log" |
| "mime" |
| "mime/multipart" |
| "net" |
| "net/http/internal" |
| "net/textproto" |
| "net/url" |
| "os" |
| "path" |
| "path/filepath" |
| "runtime" |
| "sort" |
| "strconv" |
| "strings" |
| "sync" |
| "sync/atomic" |
| "time" |
| "unicode/utf8" |
| ) |
| |
| // A Client is an HTTP client. Its zero value (DefaultClient) is a |
| // usable client that uses DefaultTransport. |
| // |
| // The Client's Transport typically has internal state (cached TCP |
| // connections), so Clients should be reused instead of created as |
| // needed. Clients are safe for concurrent use by multiple goroutines. |
| // |
| // A Client is higher-level than a RoundTripper (such as Transport) |
| // and additionally handles HTTP details such as cookies and |
| // redirects. |
| type Client struct { |
| // Transport specifies the mechanism by which individual |
| // HTTP requests are made. |
| // If nil, DefaultTransport is used. |
| Transport RoundTripper |
| |
| // CheckRedirect specifies the policy for handling redirects. |
| // If CheckRedirect is not nil, the client calls it before |
| // following an HTTP redirect. The arguments req and via are |
| // the upcoming request and the requests made already, oldest |
| // first. If CheckRedirect returns an error, the Client's Get |
| // method returns both the previous Response and |
| // CheckRedirect's error (wrapped in a url.Error) instead of |
| // issuing the Request req. |
| // |
| // If CheckRedirect is nil, the Client uses its default policy, |
| // which is to stop after 10 consecutive requests. |
| CheckRedirect func(req *Request, via []*Request) error |
| |
| // Jar specifies the cookie jar. |
| // If Jar is nil, cookies are not sent in requests and ignored |
| // in responses. |
| Jar CookieJar |
| |
| // Timeout specifies a time limit for requests made by this |
| // Client. The timeout includes connection time, any |
| // redirects, and reading the response body. The timer remains |
| // running after Get, Head, Post, or Do return and will |
| // interrupt reading of the Response.Body. |
| // |
| // A Timeout of zero means no timeout. |
| // |
| // The Client's Transport must support the CancelRequest |
| // method or Client will return errors when attempting to make |
| // a request with Get, Head, Post, or Do. Client's default |
| // Transport (DefaultTransport) supports CancelRequest. |
| Timeout time.Duration |
| } |
| |
| // DefaultClient is the default Client and is used by Get, Head, and Post. |
| var DefaultClient = &Client{} |
| |
| // RoundTripper is an interface representing the ability to execute a |
| // single HTTP transaction, obtaining the Response for a given Request. |
| // |
| // A RoundTripper must be safe for concurrent use by multiple |
| // goroutines. |
| type RoundTripper interface { |
| // RoundTrip executes a single HTTP transaction, returning |
| // the Response for the request req. RoundTrip should not |
| // attempt to interpret the response. In particular, |
| // RoundTrip must return err == nil if it obtained a response, |
| // regardless of the response's HTTP status code. A non-nil |
| // err should be reserved for failure to obtain a response. |
| // Similarly, RoundTrip should not attempt to handle |
| // higher-level protocol details such as redirects, |
| // authentication, or cookies. |
| // |
| // RoundTrip should not modify the request, except for |
| // consuming and closing the Body, including on errors. The |
| // request's URL and Header fields are guaranteed to be |
| // initialized. |
| RoundTrip(*Request) (*Response, error) |
| } |
| |
| // Given a string of the form "host", "host:port", or "[ipv6::address]:port", |
| // return true if the string includes a port. |
| func hasPort(s string) bool { return strings.LastIndex(s, ":") > strings.LastIndex(s, "]") } |
| |
| // refererForURL returns a referer without any authentication info or |
| // an empty string if lastReq scheme is https and newReq scheme is http. |
| func refererForURL(lastReq, newReq *url.URL) string { |
| |
| if lastReq.Scheme == "https" && newReq.Scheme == "http" { |
| return "" |
| } |
| referer := lastReq.String() |
| if lastReq.User != nil { |
| |
| auth := lastReq.User.String() + "@" |
| referer = strings.Replace(referer, auth, "", 1) |
| } |
| return referer |
| } |
| |
| // Used in Send to implement io.ReadCloser by bundling together the |
| // bufio.Reader through which we read the response, and the underlying |
| // network connection. |
| type readClose struct { |
| io.Reader |
| io.Closer |
| } |
| |
| func (c *Client) send(req *Request) (*Response, error) { |
| if c.Jar != nil { |
| for _, cookie := range c.Jar.Cookies(req.URL) { |
| req.AddCookie(cookie) |
| } |
| } |
| resp, err := send(req, c.transport()) |
| if err != nil { |
| return nil, err |
| } |
| if c.Jar != nil { |
| if rc := resp.Cookies(); len(rc) > 0 { |
| c.Jar.SetCookies(req.URL, rc) |
| } |
| } |
| return resp, err |
| } |
| |
| // Do sends an HTTP request and returns an HTTP response, following |
| // policy (e.g. redirects, cookies, auth) as configured on the client. |
| // |
| // An error is returned if caused by client policy (such as |
| // CheckRedirect), or if there was an HTTP protocol error. |
| // A non-2xx response doesn't cause an error. |
| // |
| // When err is nil, resp always contains a non-nil resp.Body. |
| // |
| // Callers should close resp.Body when done reading from it. If |
| // resp.Body is not closed, the Client's underlying RoundTripper |
| // (typically Transport) may not be able to re-use a persistent TCP |
| // connection to the server for a subsequent "keep-alive" request. |
| // |
| // The request Body, if non-nil, will be closed by the underlying |
| // Transport, even on errors. |
| // |
| // Generally Get, Post, or PostForm will be used instead of Do. |
| func (c *Client) Do(req *Request) (resp *Response, err error) { |
| if req.Method == "GET" || req.Method == "HEAD" { |
| return c.doFollowingRedirects(req, shouldRedirectGet) |
| } |
| if req.Method == "POST" || req.Method == "PUT" { |
| return c.doFollowingRedirects(req, shouldRedirectPost) |
| } |
| return c.send(req) |
| } |
| |
| func (c *Client) transport() RoundTripper { |
| if c.Transport != nil { |
| return c.Transport |
| } |
| return DefaultTransport |
| } |
| |
| // send issues an HTTP request. |
| // Caller should close resp.Body when done reading from it. |
| func send(req *Request, t RoundTripper) (resp *Response, err error) { |
| if t == nil { |
| req.closeBody() |
| return nil, errors.New("http: no Client.Transport or DefaultTransport") |
| } |
| |
| if req.URL == nil { |
| req.closeBody() |
| return nil, errors.New("http: nil Request.URL") |
| } |
| |
| if req.RequestURI != "" { |
| req.closeBody() |
| return nil, errors.New("http: Request.RequestURI can't be set in client requests.") |
| } |
| |
| if req.Header == nil { |
| req.Header = make(Header) |
| } |
| |
| if u := req.URL.User; u != nil && req.Header.Get("Authorization") == "" { |
| username := u.Username() |
| password, _ := u.Password() |
| req.Header.Set("Authorization", "Basic "+basicAuth(username, password)) |
| } |
| resp, err = t.RoundTrip(req) |
| if err != nil { |
| if resp != nil { |
| log.Printf("RoundTripper returned a response & error; ignoring response") |
| } |
| return nil, err |
| } |
| return resp, nil |
| } |
| |
| // See 2 (end of page 4) http://www.ietf.org/rfc/rfc2617.txt |
| // "To receive authorization, the client sends the userid and password, |
| // separated by a single colon (":") character, within a base64 |
| // encoded string in the credentials." |
| // It is not meant to be urlencoded. |
| func basicAuth(username, password string) string { |
| auth := username + ":" + password |
| return base64.StdEncoding.EncodeToString([]byte(auth)) |
| } |
| |
| // True if the specified HTTP status code is one for which the Get utility should |
| // automatically redirect. |
| func shouldRedirectGet(statusCode int) bool { |
| switch statusCode { |
| case StatusMovedPermanently, StatusFound, StatusSeeOther, StatusTemporaryRedirect: |
| return true |
| } |
| return false |
| } |
| |
| // True if the specified HTTP status code is one for which the Post utility should |
| // automatically redirect. |
| func shouldRedirectPost(statusCode int) bool { |
| switch statusCode { |
| case StatusFound, StatusSeeOther: |
| return true |
| } |
| return false |
| } |
| |
| // Get issues a GET to the specified URL. If the response is one of |
| // the following redirect codes, Get follows the redirect, up to a |
| // maximum of 10 redirects: |
| // |
| // 301 (Moved Permanently) |
| // 302 (Found) |
| // 303 (See Other) |
| // 307 (Temporary Redirect) |
| // |
| // An error is returned if there were too many redirects or if there |
| // was an HTTP protocol error. A non-2xx response doesn't cause an |
| // error. |
| // |
| // When err is nil, resp always contains a non-nil resp.Body. |
| // Caller should close resp.Body when done reading from it. |
| // |
| // Get is a wrapper around DefaultClient.Get. |
| // |
| // To make a request with custom headers, use NewRequest and |
| // DefaultClient.Do. |
| func Get(url string) (resp *Response, err error) { |
| return DefaultClient.Get(url) |
| } |
| |
| // Get issues a GET to the specified URL. If the response is one of the |
| // following redirect codes, Get follows the redirect after calling the |
| // Client's CheckRedirect function: |
| // |
| // 301 (Moved Permanently) |
| // 302 (Found) |
| // 303 (See Other) |
| // 307 (Temporary Redirect) |
| // |
| // An error is returned if the Client's CheckRedirect function fails |
| // or if there was an HTTP protocol error. A non-2xx response doesn't |
| // cause an error. |
| // |
| // When err is nil, resp always contains a non-nil resp.Body. |
| // Caller should close resp.Body when done reading from it. |
| // |
| // To make a request with custom headers, use NewRequest and Client.Do. |
| func (c *Client) Get(url string) (resp *Response, err error) { |
| req, err := NewRequest("GET", url, nil) |
| if err != nil { |
| return nil, err |
| } |
| return c.doFollowingRedirects(req, shouldRedirectGet) |
| } |
| |
| func alwaysFalse() bool { return false } |
| |
| func (c *Client) doFollowingRedirects(ireq *Request, shouldRedirect func(int) bool) (resp *Response, err error) { |
| var base *url.URL |
| redirectChecker := c.CheckRedirect |
| if redirectChecker == nil { |
| redirectChecker = defaultCheckRedirect |
| } |
| var via []*Request |
| |
| if ireq.URL == nil { |
| ireq.closeBody() |
| return nil, errors.New("http: nil Request.URL") |
| } |
| |
| var reqmu sync.Mutex // guards req |
| req := ireq |
| |
| var timer *time.Timer |
| var atomicWasCanceled int32 // atomic bool (1 or 0) |
| var wasCanceled = alwaysFalse |
| if c.Timeout > 0 { |
| wasCanceled = func() bool { return atomic.LoadInt32(&atomicWasCanceled) != 0 } |
| type canceler interface { |
| CancelRequest(*Request) |
| } |
| tr, ok := c.transport().(canceler) |
| if !ok { |
| return nil, fmt.Errorf("net/http: Client Transport of type %T doesn't support CancelRequest; Timeout not supported", c.transport()) |
| } |
| timer = time.AfterFunc(c.Timeout, func() { |
| atomic.StoreInt32(&atomicWasCanceled, 1) |
| reqmu.Lock() |
| defer reqmu.Unlock() |
| tr.CancelRequest(req) |
| }) |
| } |
| |
| urlStr := "" |
| redirectFailed := false |
| for redirect := 0; ; redirect++ { |
| if redirect != 0 { |
| nreq := new(Request) |
| nreq.Method = ireq.Method |
| if ireq.Method == "POST" || ireq.Method == "PUT" { |
| nreq.Method = "GET" |
| } |
| nreq.Header = make(Header) |
| nreq.URL, err = base.Parse(urlStr) |
| if err != nil { |
| break |
| } |
| if len(via) > 0 { |
| |
| lastReq := via[len(via)-1] |
| if ref := refererForURL(lastReq.URL, nreq.URL); ref != "" { |
| nreq.Header.Set("Referer", ref) |
| } |
| |
| err = redirectChecker(nreq, via) |
| if err != nil { |
| redirectFailed = true |
| break |
| } |
| } |
| reqmu.Lock() |
| req = nreq |
| reqmu.Unlock() |
| } |
| |
| urlStr = req.URL.String() |
| if resp, err = c.send(req); err != nil { |
| if wasCanceled() { |
| err = &httpError{ |
| err: err.Error() + " (Client.Timeout exceeded while awaiting headers)", |
| timeout: true, |
| } |
| } |
| break |
| } |
| |
| if shouldRedirect(resp.StatusCode) { |
| // Read the body if small so underlying TCP connection will be re-used. |
| // No need to check for errors: if it fails, Transport won't reuse it anyway. |
| const maxBodySlurpSize = 2 << 10 |
| if resp.ContentLength == -1 || resp.ContentLength <= maxBodySlurpSize { |
| io.CopyN(io.Discard, resp.Body, maxBodySlurpSize) |
| } |
| resp.Body.Close() |
| if urlStr = resp.Header.Get("Location"); urlStr == "" { |
| err = fmt.Errorf("%d response missing Location header", resp.StatusCode) |
| break |
| } |
| base = req.URL |
| via = append(via, req) |
| continue |
| } |
| if timer != nil { |
| resp.Body = &cancelTimerBody{ |
| t: timer, |
| rc: resp.Body, |
| reqWasCanceled: wasCanceled, |
| } |
| } |
| return resp, nil |
| } |
| |
| method := ireq.Method |
| urlErr := &url.Error{ |
| Op: method[0:1] + strings.ToLower(method[1:]), |
| URL: urlStr, |
| Err: err, |
| } |
| |
| if redirectFailed { |
| |
| return resp, urlErr |
| } |
| |
| if resp != nil { |
| resp.Body.Close() |
| } |
| return nil, urlErr |
| } |
| |
| func defaultCheckRedirect(req *Request, via []*Request) error { |
| if len(via) >= 10 { |
| return errors.New("stopped after 10 redirects") |
| } |
| return nil |
| } |
| |
| // Post issues a POST to the specified URL. |
| // |
| // Caller should close resp.Body when done reading from it. |
| // |
| // If the provided body is an io.Closer, it is closed after the |
| // request. |
| // |
| // Post is a wrapper around DefaultClient.Post. |
| // |
| // To set custom headers, use NewRequest and DefaultClient.Do. |
| func Post(url string, bodyType string, body io.Reader) (resp *Response, err error) { |
| return DefaultClient.Post(url, bodyType, body) |
| } |
| |
| // Post issues a POST to the specified URL. |
| // |
| // Caller should close resp.Body when done reading from it. |
| // |
| // If the provided body is an io.Closer, it is closed after the |
| // request. |
| // |
| // To set custom headers, use NewRequest and Client.Do. |
| func (c *Client) Post(url string, bodyType string, body io.Reader) (resp *Response, err error) { |
| req, err := NewRequest("POST", url, body) |
| if err != nil { |
| return nil, err |
| } |
| req.Header.Set("Content-Type", bodyType) |
| return c.doFollowingRedirects(req, shouldRedirectPost) |
| } |
| |
| // PostForm issues a POST to the specified URL, with data's keys and |
| // values URL-encoded as the request body. |
| // |
| // The Content-Type header is set to application/x-www-form-urlencoded. |
| // To set other headers, use NewRequest and DefaultClient.Do. |
| // |
| // When err is nil, resp always contains a non-nil resp.Body. |
| // Caller should close resp.Body when done reading from it. |
| // |
| // PostForm is a wrapper around DefaultClient.PostForm. |
| func PostForm(url string, data url.Values) (resp *Response, err error) { |
| return DefaultClient.PostForm(url, data) |
| } |
| |
| // PostForm issues a POST to the specified URL, |
| // with data's keys and values URL-encoded as the request body. |
| // |
| // The Content-Type header is set to application/x-www-form-urlencoded. |
| // To set other headers, use NewRequest and DefaultClient.Do. |
| // |
| // When err is nil, resp always contains a non-nil resp.Body. |
| // Caller should close resp.Body when done reading from it. |
| func (c *Client) PostForm(url string, data url.Values) (resp *Response, err error) { |
| return c.Post(url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode())) |
| } |
| |
| // Head issues a HEAD to the specified URL. If the response is one of |
| // the following redirect codes, Head follows the redirect, up to a |
| // maximum of 10 redirects: |
| // |
| // 301 (Moved Permanently) |
| // 302 (Found) |
| // 303 (See Other) |
| // 307 (Temporary Redirect) |
| // |
| // Head is a wrapper around DefaultClient.Head |
| func Head(url string) (resp *Response, err error) { |
| return DefaultClient.Head(url) |
| } |
| |
| // Head issues a HEAD to the specified URL. If the response is one of the |
| // following redirect codes, Head follows the redirect after calling the |
| // Client's CheckRedirect function: |
| // |
| // 301 (Moved Permanently) |
| // 302 (Found) |
| // 303 (See Other) |
| // 307 (Temporary Redirect) |
| func (c *Client) Head(url string) (resp *Response, err error) { |
| req, err := NewRequest("HEAD", url, nil) |
| if err != nil { |
| return nil, err |
| } |
| return c.doFollowingRedirects(req, shouldRedirectGet) |
| } |
| |
| // cancelTimerBody is an io.ReadCloser that wraps rc with two features: |
| // 1) on Read EOF or Close, the timer t is Stopped, |
| // 2) On Read failure, if reqWasCanceled is true, the error is wrapped and |
| // marked as net.Error that hit its timeout. |
| type cancelTimerBody struct { |
| t *time.Timer |
| rc io.ReadCloser |
| reqWasCanceled func() bool |
| } |
| |
| func (b *cancelTimerBody) Read(p []byte) (n int, err error) { |
| n, err = b.rc.Read(p) |
| if err == io.EOF { |
| b.t.Stop() |
| } else if err != nil && b.reqWasCanceled() { |
| return n, &httpError{ |
| err: err.Error() + " (Client.Timeout exceeded while reading body)", |
| timeout: true, |
| } |
| } |
| return |
| } |
| |
| func (b *cancelTimerBody) Close() error { |
| err := b.rc.Close() |
| b.t.Stop() |
| return err |
| } |
| |
| // A Cookie represents an HTTP cookie as sent in the Set-Cookie header of an |
| // HTTP response or the Cookie header of an HTTP request. |
| // |
| // See http://tools.ietf.org/html/rfc6265 for details. |
| type Cookie struct { |
| Name string |
| Value string |
| |
| Path string // optional |
| Domain string // optional |
| Expires time.Time // optional |
| RawExpires string // for reading cookies only |
| |
| // MaxAge=0 means no 'Max-Age' attribute specified. |
| // MaxAge<0 means delete cookie now, equivalently 'Max-Age: 0' |
| // MaxAge>0 means Max-Age attribute present and given in seconds |
| MaxAge int |
| Secure bool |
| HttpOnly bool |
| Raw string |
| Unparsed []string // Raw text of unparsed attribute-value pairs |
| } |
| |
| // readSetCookies parses all "Set-Cookie" values from |
| // the header h and returns the successfully parsed Cookies. |
| func readSetCookies(h Header) []*Cookie { |
| cookies := []*Cookie{} |
| for _, line := range h["Set-Cookie"] { |
| parts := strings.Split(strings.TrimSpace(line), ";") |
| if len(parts) == 1 && parts[0] == "" { |
| continue |
| } |
| parts[0] = strings.TrimSpace(parts[0]) |
| j := strings.Index(parts[0], "=") |
| if j < 0 { |
| continue |
| } |
| name, value := parts[0][:j], parts[0][j+1:] |
| if !isCookieNameValid(name) { |
| continue |
| } |
| value, success := parseCookieValue(value, true) |
| if !success { |
| continue |
| } |
| c := &Cookie{ |
| Name: name, |
| Value: value, |
| Raw: line, |
| } |
| for i := 1; i < len(parts); i++ { |
| parts[i] = strings.TrimSpace(parts[i]) |
| if len(parts[i]) == 0 { |
| continue |
| } |
| |
| attr, val := parts[i], "" |
| if j := strings.Index(attr, "="); j >= 0 { |
| attr, val = attr[:j], attr[j+1:] |
| } |
| lowerAttr := strings.ToLower(attr) |
| val, success = parseCookieValue(val, false) |
| if !success { |
| c.Unparsed = append(c.Unparsed, parts[i]) |
| continue |
| } |
| switch lowerAttr { |
| case "secure": |
| c.Secure = true |
| continue |
| case "httponly": |
| c.HttpOnly = true |
| continue |
| case "domain": |
| c.Domain = val |
| continue |
| case "max-age": |
| secs, err := strconv.Atoi(val) |
| if err != nil || secs != 0 && val[0] == '0' { |
| break |
| } |
| if secs <= 0 { |
| c.MaxAge = -1 |
| } else { |
| c.MaxAge = secs |
| } |
| continue |
| case "expires": |
| c.RawExpires = val |
| exptime, err := time.Parse(time.RFC1123, val) |
| if err != nil { |
| exptime, err = time.Parse("Mon, 02-Jan-2006 15:04:05 MST", val) |
| if err != nil { |
| c.Expires = time.Time{} |
| break |
| } |
| } |
| c.Expires = exptime.UTC() |
| continue |
| case "path": |
| c.Path = val |
| continue |
| } |
| c.Unparsed = append(c.Unparsed, parts[i]) |
| } |
| cookies = append(cookies, c) |
| } |
| return cookies |
| } |
| |
| // SetCookie adds a Set-Cookie header to the provided ResponseWriter's headers. |
| // The provided cookie must have a valid Name. Invalid cookies may be |
| // silently dropped. |
| func SetCookie(w ResponseWriter, cookie *Cookie) { |
| if v := cookie.String(); v != "" { |
| w.Header().Add("Set-Cookie", v) |
| } |
| } |
| |
| // String returns the serialization of the cookie for use in a Cookie |
| // header (if only Name and Value are set) or a Set-Cookie response |
| // header (if other fields are set). |
| // If c is nil or c.Name is invalid, the empty string is returned. |
| func (c *Cookie) String() string { |
| if c == nil || !isCookieNameValid(c.Name) { |
| return "" |
| } |
| var b bytes.Buffer |
| fmt.Fprintf(&b, "%s=%s", sanitizeCookieName(c.Name), sanitizeCookieValue(c.Value)) |
| if len(c.Path) > 0 { |
| fmt.Fprintf(&b, "; Path=%s", sanitizeCookiePath(c.Path)) |
| } |
| if len(c.Domain) > 0 { |
| if validCookieDomain(c.Domain) { |
| |
| d := c.Domain |
| if d[0] == '.' { |
| d = d[1:] |
| } |
| fmt.Fprintf(&b, "; Domain=%s", d) |
| } else { |
| log.Printf("net/http: invalid Cookie.Domain %q; dropping domain attribute", |
| c.Domain) |
| } |
| } |
| if c.Expires.Unix() > 0 { |
| fmt.Fprintf(&b, "; Expires=%s", c.Expires.UTC().Format(TimeFormat)) |
| } |
| if c.MaxAge > 0 { |
| fmt.Fprintf(&b, "; Max-Age=%d", c.MaxAge) |
| } else if c.MaxAge < 0 { |
| fmt.Fprintf(&b, "; Max-Age=0") |
| } |
| if c.HttpOnly { |
| fmt.Fprintf(&b, "; HttpOnly") |
| } |
| if c.Secure { |
| fmt.Fprintf(&b, "; Secure") |
| } |
| return b.String() |
| } |
| |
| // readCookies parses all "Cookie" values from the header h and |
| // returns the successfully parsed Cookies. |
| // |
| // if filter isn't empty, only cookies of that name are returned |
| func readCookies(h Header, filter string) []*Cookie { |
| cookies := []*Cookie{} |
| lines, ok := h["Cookie"] |
| if !ok { |
| return cookies |
| } |
| |
| for _, line := range lines { |
| parts := strings.Split(strings.TrimSpace(line), ";") |
| if len(parts) == 1 && parts[0] == "" { |
| continue |
| } |
| |
| parsedPairs := 0 |
| for i := 0; i < len(parts); i++ { |
| parts[i] = strings.TrimSpace(parts[i]) |
| if len(parts[i]) == 0 { |
| continue |
| } |
| name, val := parts[i], "" |
| if j := strings.Index(name, "="); j >= 0 { |
| name, val = name[:j], name[j+1:] |
| } |
| if !isCookieNameValid(name) { |
| continue |
| } |
| if filter != "" && filter != name { |
| continue |
| } |
| val, success := parseCookieValue(val, true) |
| if !success { |
| continue |
| } |
| cookies = append(cookies, &Cookie{Name: name, Value: val}) |
| parsedPairs++ |
| } |
| } |
| return cookies |
| } |
| |
| // validCookieDomain returns whether v is a valid cookie domain-value. |
| func validCookieDomain(v string) bool { |
| if isCookieDomainName(v) { |
| return true |
| } |
| if net.ParseIP(v) != nil && !strings.Contains(v, ":") { |
| return true |
| } |
| return false |
| } |
| |
| // isCookieDomainName returns whether s is a valid domain name or a valid |
| // domain name with a leading dot '.'. It is almost a direct copy of |
| // package net's isDomainName. |
| func isCookieDomainName(s string) bool { |
| if len(s) == 0 { |
| return false |
| } |
| if len(s) > 255 { |
| return false |
| } |
| |
| if s[0] == '.' { |
| |
| s = s[1:] |
| } |
| last := byte('.') |
| ok := false |
| partlen := 0 |
| for i := 0; i < len(s); i++ { |
| c := s[i] |
| switch { |
| default: |
| return false |
| case 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z': |
| |
| ok = true |
| partlen++ |
| case '0' <= c && c <= '9': |
| |
| partlen++ |
| case c == '-': |
| |
| if last == '.' { |
| return false |
| } |
| partlen++ |
| case c == '.': |
| |
| if last == '.' || last == '-' { |
| return false |
| } |
| if partlen > 63 || partlen == 0 { |
| return false |
| } |
| partlen = 0 |
| } |
| last = c |
| } |
| if last == '-' || partlen > 63 { |
| return false |
| } |
| |
| return ok |
| } |
| |
| var cookieNameSanitizer = strings.NewReplacer("\n", "-", "\r", "-") |
| |
| func sanitizeCookieName(n string) string { |
| return cookieNameSanitizer.Replace(n) |
| } |
| |
| // http://tools.ietf.org/html/rfc6265#section-4.1.1 |
| // cookie-value = *cookie-octet / ( DQUOTE *cookie-octet DQUOTE ) |
| // cookie-octet = %x21 / %x23-2B / %x2D-3A / %x3C-5B / %x5D-7E |
| // ; US-ASCII characters excluding CTLs, |
| // ; whitespace DQUOTE, comma, semicolon, |
| // ; and backslash |
| // We loosen this as spaces and commas are common in cookie values |
| // but we produce a quoted cookie-value in when value starts or ends |
| // with a comma or space. |
| // See https://golang.org/issue/7243 for the discussion. |
| func sanitizeCookieValue(v string) string { |
| v = sanitizeOrWarn("Cookie.Value", validCookieValueByte, v) |
| if len(v) == 0 { |
| return v |
| } |
| if v[0] == ' ' || v[0] == ',' || v[len(v)-1] == ' ' || v[len(v)-1] == ',' { |
| return ` + "`" + `"` + "`" + ` + v + ` + "`" + `"` + "`" + ` |
| } |
| return v |
| } |
| |
| func validCookieValueByte(b byte) bool { |
| return 0x20 <= b && b < 0x7f && b != '"' && b != ';' && b != '\\' |
| } |
| |
| // path-av = "Path=" path-value |
| // path-value = <any CHAR except CTLs or ";"> |
| func sanitizeCookiePath(v string) string { |
| return sanitizeOrWarn("Cookie.Path", validCookiePathByte, v) |
| } |
| |
| func validCookiePathByte(b byte) bool { |
| return 0x20 <= b && b < 0x7f && b != ';' |
| } |
| |
| func sanitizeOrWarn(fieldName string, valid func(byte) bool, v string) string { |
| ok := true |
| for i := 0; i < len(v); i++ { |
| if valid(v[i]) { |
| continue |
| } |
| log.Printf("net/http: invalid byte %q in %s; dropping invalid bytes", v[i], fieldName) |
| ok = false |
| break |
| } |
| if ok { |
| return v |
| } |
| buf := make([]byte, 0, len(v)) |
| for i := 0; i < len(v); i++ { |
| if b := v[i]; valid(b) { |
| buf = append(buf, b) |
| } |
| } |
| return string(buf) |
| } |
| |
| func parseCookieValue(raw string, allowDoubleQuote bool) (string, bool) { |
| |
| if allowDoubleQuote && len(raw) > 1 && raw[0] == '"' && raw[len(raw)-1] == '"' { |
| raw = raw[1 : len(raw)-1] |
| } |
| for i := 0; i < len(raw); i++ { |
| if !validCookieValueByte(raw[i]) { |
| return "", false |
| } |
| } |
| return raw, true |
| } |
| |
| func isCookieNameValid(raw string) bool { |
| if raw == "" { |
| return false |
| } |
| return strings.IndexFunc(raw, isNotToken) < 0 |
| } |
| |
| // fileTransport implements RoundTripper for the 'file' protocol. |
| type fileTransport struct { |
| fh fileHandler |
| } |
| |
| // NewFileTransport returns a new RoundTripper, serving the provided |
| // FileSystem. The returned RoundTripper ignores the URL host in its |
| // incoming requests, as well as most other properties of the |
| // request. |
| // |
| // The typical use case for NewFileTransport is to register the "file" |
| // protocol with a Transport, as in: |
| // |
| // t := &http.Transport{} |
| // t.RegisterProtocol("file", http.NewFileTransport(http.Dir("/"))) |
| // c := &http.Client{Transport: t} |
| // res, err := c.Get("file:///etc/passwd") |
| // ... |
| func NewFileTransport(fs FileSystem) RoundTripper { |
| return fileTransport{fileHandler{fs}} |
| } |
| |
| func (t fileTransport) RoundTrip(req *Request) (resp *Response, err error) { |
| |
| rw, resc := newPopulateResponseWriter() |
| go func() { |
| t.fh.ServeHTTP(rw, req) |
| rw.finish() |
| }() |
| return <-resc, nil |
| } |
| |
| func newPopulateResponseWriter() (*populateResponse, <-chan *Response) { |
| pr, pw := io.Pipe() |
| rw := &populateResponse{ |
| ch: make(chan *Response), |
| pw: pw, |
| res: &Response{ |
| Proto: "HTTP/1.0", |
| ProtoMajor: 1, |
| Header: make(Header), |
| Close: true, |
| Body: pr, |
| }, |
| } |
| return rw, rw.ch |
| } |
| |
| // populateResponse is a ResponseWriter that populates the *Response |
| // in res, and writes its body to a pipe connected to the response |
| // body. Once writes begin or finish() is called, the response is sent |
| // on ch. |
| type populateResponse struct { |
| res *Response |
| ch chan *Response |
| wroteHeader bool |
| hasContent bool |
| sentResponse bool |
| pw *io.PipeWriter |
| } |
| |
| func (pr *populateResponse) finish() { |
| if !pr.wroteHeader { |
| pr.WriteHeader(500) |
| } |
| if !pr.sentResponse { |
| pr.sendResponse() |
| } |
| pr.pw.Close() |
| } |
| |
| func (pr *populateResponse) sendResponse() { |
| if pr.sentResponse { |
| return |
| } |
| pr.sentResponse = true |
| |
| if pr.hasContent { |
| pr.res.ContentLength = -1 |
| } |
| pr.ch <- pr.res |
| } |
| |
| func (pr *populateResponse) Header() Header { |
| return pr.res.Header |
| } |
| |
| func (pr *populateResponse) WriteHeader(code int) { |
| if pr.wroteHeader { |
| return |
| } |
| pr.wroteHeader = true |
| |
| pr.res.StatusCode = code |
| pr.res.Status = fmt.Sprintf("%d %s", code, StatusText(code)) |
| } |
| |
| func (pr *populateResponse) Write(p []byte) (n int, err error) { |
| if !pr.wroteHeader { |
| pr.WriteHeader(StatusOK) |
| } |
| pr.hasContent = true |
| if !pr.sentResponse { |
| pr.sendResponse() |
| } |
| return pr.pw.Write(p) |
| } |
| |
| // A Dir implements FileSystem using the native file system restricted to a |
| // specific directory tree. |
| // |
| // While the FileSystem.Open method takes '/'-separated paths, a Dir's string |
| // value is a filename on the native file system, not a URL, so it is separated |
| // by filepath.Separator, which isn't necessarily '/'. |
| // |
| // An empty Dir is treated as ".". |
| type Dir string |
| |
| func (d Dir) Open(name string) (File, error) { |
| if filepath.Separator != '/' && strings.IndexRune(name, filepath.Separator) >= 0 || |
| strings.Contains(name, "\x00") { |
| return nil, errors.New("http: invalid character in file path") |
| } |
| dir := string(d) |
| if dir == "" { |
| dir = "." |
| } |
| f, err := os.Open(filepath.Join(dir, filepath.FromSlash(path.Clean("/"+name)))) |
| if err != nil { |
| return nil, err |
| } |
| return f, nil |
| } |
| |
| // A FileSystem implements access to a collection of named files. |
| // The elements in a file path are separated by slash ('/', U+002F) |
| // characters, regardless of host operating system convention. |
| type FileSystem interface { |
| Open(name string) (File, error) |
| } |
| |
| // A File is returned by a FileSystem's Open method and can be |
| // served by the FileServer implementation. |
| // |
| // The methods should behave the same as those on an *os.File. |
| type File interface { |
| io.Closer |
| io.Reader |
| Readdir(count int) ([]os.FileInfo, error) |
| Seek(offset int64, whence int) (int64, error) |
| Stat() (os.FileInfo, error) |
| } |
| |
| func dirList(w ResponseWriter, f File) { |
| w.Header().Set("Content-Type", "text/html; charset=utf-8") |
| fmt.Fprintf(w, "<pre>\n") |
| for { |
| dirs, err := f.Readdir(100) |
| if err != nil || len(dirs) == 0 { |
| break |
| } |
| for _, d := range dirs { |
| name := d.Name() |
| if d.IsDir() { |
| name += "/" |
| } |
| |
| url := url.URL{Path: name} |
| fmt.Fprintf(w, "<a href=\"%s\">%s</a>\n", url.String(), htmlReplacer.Replace(name)) |
| } |
| } |
| fmt.Fprintf(w, "</pre>\n") |
| } |
| |
| // ServeContent replies to the request using the content in the |
| // provided ReadSeeker. The main benefit of ServeContent over io.Copy |
| // is that it handles Range requests properly, sets the MIME type, and |
| // handles If-Modified-Since requests. |
| // |
| // If the response's Content-Type header is not set, ServeContent |
| // first tries to deduce the type from name's file extension and, |
| // if that fails, falls back to reading the first block of the content |
| // and passing it to DetectContentType. |
| // The name is otherwise unused; in particular it can be empty and is |
| // never sent in the response. |
| // |
| // If modtime is not the zero time or Unix epoch, ServeContent |
| // includes it in a Last-Modified header in the response. If the |
| // request includes an If-Modified-Since header, ServeContent uses |
| // modtime to decide whether the content needs to be sent at all. |
| // |
| // The content's Seek method must work: ServeContent uses |
| // a seek to the end of the content to determine its size. |
| // |
| // If the caller has set w's ETag header, ServeContent uses it to |
| // handle requests using If-Range and If-None-Match. |
| // |
| // Note that *os.File implements the io.ReadSeeker interface. |
| func ServeContent(w ResponseWriter, req *Request, name string, modtime time.Time, content io.ReadSeeker) { |
| sizeFunc := func() (int64, error) { |
| size, err := content.Seek(0, os.SEEK_END) |
| if err != nil { |
| return 0, errSeeker |
| } |
| _, err = content.Seek(0, os.SEEK_SET) |
| if err != nil { |
| return 0, errSeeker |
| } |
| return size, nil |
| } |
| serveContent(w, req, name, modtime, sizeFunc, content) |
| } |
| |
| // errSeeker is returned by ServeContent's sizeFunc when the content |
| // doesn't seek properly. The underlying Seeker's error text isn't |
| // included in the sizeFunc reply so it's not sent over HTTP to end |
| // users. |
| var errSeeker = errors.New("seeker can't seek") |
| |
| // if name is empty, filename is unknown. (used for mime type, before sniffing) |
| // if modtime.IsZero(), modtime is unknown. |
| // content must be seeked to the beginning of the file. |
| // The sizeFunc is called at most once. Its error, if any, is sent in the HTTP response. |
| func serveContent(w ResponseWriter, r *Request, name string, modtime time.Time, sizeFunc func() (int64, error), content io.ReadSeeker) { |
| if checkLastModified(w, r, modtime) { |
| return |
| } |
| rangeReq, done := checkETag(w, r, modtime) |
| if done { |
| return |
| } |
| |
| code := StatusOK |
| |
| ctypes, haveType := w.Header()["Content-Type"] |
| var ctype string |
| if !haveType { |
| ctype = mime.TypeByExtension(filepath.Ext(name)) |
| if ctype == "" { |
| // read a chunk to decide between utf-8 text and binary |
| var buf [sniffLen]byte |
| n, _ := io.ReadFull(content, buf[:]) |
| ctype = DetectContentType(buf[:n]) |
| _, err := content.Seek(0, os.SEEK_SET) |
| if err != nil { |
| Error(w, "seeker can't seek", StatusInternalServerError) |
| return |
| } |
| } |
| w.Header().Set("Content-Type", ctype) |
| } else if len(ctypes) > 0 { |
| ctype = ctypes[0] |
| } |
| |
| size, err := sizeFunc() |
| if err != nil { |
| Error(w, err.Error(), StatusInternalServerError) |
| return |
| } |
| |
| sendSize := size |
| var sendContent io.Reader = content |
| if size >= 0 { |
| ranges, err := parseRange(rangeReq, size) |
| if err != nil { |
| Error(w, err.Error(), StatusRequestedRangeNotSatisfiable) |
| return |
| } |
| if sumRangesSize(ranges) > size { |
| |
| ranges = nil |
| } |
| switch { |
| case len(ranges) == 1: |
| |
| ra := ranges[0] |
| if _, err := content.Seek(ra.start, os.SEEK_SET); err != nil { |
| Error(w, err.Error(), StatusRequestedRangeNotSatisfiable) |
| return |
| } |
| sendSize = ra.length |
| code = StatusPartialContent |
| w.Header().Set("Content-Range", ra.contentRange(size)) |
| case len(ranges) > 1: |
| sendSize = rangesMIMESize(ranges, ctype, size) |
| code = StatusPartialContent |
| |
| pr, pw := io.Pipe() |
| mw := multipart.NewWriter(pw) |
| w.Header().Set("Content-Type", "multipart/byteranges; boundary="+mw.Boundary()) |
| sendContent = pr |
| defer pr.Close() |
| go func() { |
| for _, ra := range ranges { |
| part, err := mw.CreatePart(ra.mimeHeader(ctype, size)) |
| if err != nil { |
| pw.CloseWithError(err) |
| return |
| } |
| if _, err := content.Seek(ra.start, os.SEEK_SET); err != nil { |
| pw.CloseWithError(err) |
| return |
| } |
| if _, err := io.CopyN(part, content, ra.length); err != nil { |
| pw.CloseWithError(err) |
| return |
| } |
| } |
| mw.Close() |
| pw.Close() |
| }() |
| } |
| |
| w.Header().Set("Accept-Ranges", "bytes") |
| if w.Header().Get("Content-Encoding") == "" { |
| w.Header().Set("Content-Length", strconv.FormatInt(sendSize, 10)) |
| } |
| } |
| |
| w.WriteHeader(code) |
| |
| if r.Method != "HEAD" { |
| io.CopyN(w, sendContent, sendSize) |
| } |
| } |
| |
| var unixEpochTime = time.Unix(0, 0) |
| |
| // modtime is the modification time of the resource to be served, or IsZero(). |
| // return value is whether this request is now complete. |
| func checkLastModified(w ResponseWriter, r *Request, modtime time.Time) bool { |
| if modtime.IsZero() || modtime.Equal(unixEpochTime) { |
| |
| return false |
| } |
| |
| if t, err := time.Parse(TimeFormat, r.Header.Get("If-Modified-Since")); err == nil && modtime.Before(t.Add(1*time.Second)) { |
| h := w.Header() |
| delete(h, "Content-Type") |
| delete(h, "Content-Length") |
| w.WriteHeader(StatusNotModified) |
| return true |
| } |
| w.Header().Set("Last-Modified", modtime.UTC().Format(TimeFormat)) |
| return false |
| } |
| |
| // checkETag implements If-None-Match and If-Range checks. |
| // |
| // The ETag or modtime must have been previously set in the |
| // ResponseWriter's headers. The modtime is only compared at second |
| // granularity and may be the zero value to mean unknown. |
| // |
| // The return value is the effective request "Range" header to use and |
| // whether this request is now considered done. |
| func checkETag(w ResponseWriter, r *Request, modtime time.Time) (rangeReq string, done bool) { |
| etag := w.Header().get("Etag") |
| rangeReq = r.Header.get("Range") |
| |
| if ir := r.Header.get("If-Range"); ir != "" && ir != etag { |
| |
| timeMatches := false |
| if !modtime.IsZero() { |
| if t, err := ParseTime(ir); err == nil && t.Unix() == modtime.Unix() { |
| timeMatches = true |
| } |
| } |
| if !timeMatches { |
| rangeReq = "" |
| } |
| } |
| |
| if inm := r.Header.get("If-None-Match"); inm != "" { |
| |
| if etag == "" { |
| return rangeReq, false |
| } |
| |
| if r.Method != "GET" && r.Method != "HEAD" { |
| return rangeReq, false |
| } |
| |
| if inm == etag || inm == "*" { |
| h := w.Header() |
| delete(h, "Content-Type") |
| delete(h, "Content-Length") |
| w.WriteHeader(StatusNotModified) |
| return "", true |
| } |
| } |
| return rangeReq, false |
| } |
| |
| // name is '/'-separated, not filepath.Separator. |
| func serveFile(w ResponseWriter, r *Request, fs FileSystem, name string, redirect bool) { |
| const indexPage = "/index.html" |
| |
| if strings.HasSuffix(r.URL.Path, indexPage) { |
| localRedirect(w, r, "./") |
| return |
| } |
| |
| f, err := fs.Open(name) |
| if err != nil { |
| msg, code := toHTTPError(err) |
| Error(w, msg, code) |
| return |
| } |
| defer f.Close() |
| |
| d, err1 := f.Stat() |
| if err1 != nil { |
| msg, code := toHTTPError(err) |
| Error(w, msg, code) |
| return |
| } |
| |
| if redirect { |
| |
| url := r.URL.Path |
| if d.IsDir() { |
| if url[len(url)-1] != '/' { |
| localRedirect(w, r, path.Base(url)+"/") |
| return |
| } |
| } else { |
| if url[len(url)-1] == '/' { |
| localRedirect(w, r, "../"+path.Base(url)) |
| return |
| } |
| } |
| } |
| |
| if d.IsDir() { |
| index := strings.TrimSuffix(name, "/") + indexPage |
| ff, err := fs.Open(index) |
| if err == nil { |
| defer ff.Close() |
| dd, err := ff.Stat() |
| if err == nil { |
| name = index |
| d = dd |
| f = ff |
| } |
| } |
| } |
| |
| if d.IsDir() { |
| if checkLastModified(w, r, d.ModTime()) { |
| return |
| } |
| dirList(w, f) |
| return |
| } |
| |
| sizeFunc := func() (int64, error) { return d.Size(), nil } |
| serveContent(w, r, d.Name(), d.ModTime(), sizeFunc, f) |
| } |
| |
| // toHTTPError returns a non-specific HTTP error message and status code |
| // for a given non-nil error value. It's important that toHTTPError does not |
| // actually return err.Error(), since msg and httpStatus are returned to users, |
| // and historically Go's ServeContent always returned just "404 Not Found" for |
| // all errors. We don't want to start leaking information in error messages. |
| func toHTTPError(err error) (msg string, httpStatus int) { |
| if os.IsNotExist(err) { |
| return "404 page not found", StatusNotFound |
| } |
| if os.IsPermission(err) { |
| return "403 Forbidden", StatusForbidden |
| } |
| |
| return "500 Internal Server Error", StatusInternalServerError |
| } |
| |
| // localRedirect gives a Moved Permanently response. |
| // It does not convert relative paths to absolute paths like Redirect does. |
| func localRedirect(w ResponseWriter, r *Request, newPath string) { |
| if q := r.URL.RawQuery; q != "" { |
| newPath += "?" + q |
| } |
| w.Header().Set("Location", newPath) |
| w.WriteHeader(StatusMovedPermanently) |
| } |
| |
| // ServeFile replies to the request with the contents of the named |
| // file or directory. |
| // |
| // As a special case, ServeFile redirects any request where r.URL.Path |
| // ends in "/index.html" to the same path, without the final |
| // "index.html". To avoid such redirects either modify the path or |
| // use ServeContent. |
| func ServeFile(w ResponseWriter, r *Request, name string) { |
| dir, file := filepath.Split(name) |
| serveFile(w, r, Dir(dir), file, false) |
| } |
| |
| type fileHandler struct { |
| root FileSystem |
| } |
| |
| // FileServer returns a handler that serves HTTP requests |
| // with the contents of the file system rooted at root. |
| // |
| // To use the operating system's file system implementation, |
| // use http.Dir: |
| // |
| // http.Handle("/", http.FileServer(http.Dir("/tmp"))) |
| // |
| // As a special case, the returned file server redirects any request |
| // ending in "/index.html" to the same path, without the final |
| // "index.html". |
| func FileServer(root FileSystem) Handler { |
| return &fileHandler{root} |
| } |
| |
| func (f *fileHandler) ServeHTTP(w ResponseWriter, r *Request) { |
| upath := r.URL.Path |
| if !strings.HasPrefix(upath, "/") { |
| upath = "/" + upath |
| r.URL.Path = upath |
| } |
| serveFile(w, r, f.root, path.Clean(upath), true) |
| } |
| |
| // httpRange specifies the byte range to be sent to the client. |
| type httpRange struct { |
| start, length int64 |
| } |
| |
| func (r httpRange) contentRange(size int64) string { |
| return fmt.Sprintf("bytes %d-%d/%d", r.start, r.start+r.length-1, size) |
| } |
| |
| func (r httpRange) mimeHeader(contentType string, size int64) textproto.MIMEHeader { |
| return textproto.MIMEHeader{ |
| "Content-Range": {r.contentRange(size)}, |
| "Content-Type": {contentType}, |
| } |
| } |
| |
| // parseRange parses a Range header string as per RFC 2616. |
| func parseRange(s string, size int64) ([]httpRange, error) { |
| if s == "" { |
| return nil, nil |
| } |
| const b = "bytes=" |
| if !strings.HasPrefix(s, b) { |
| return nil, errors.New("invalid range") |
| } |
| var ranges []httpRange |
| for _, ra := range strings.Split(s[len(b):], ",") { |
| ra = strings.TrimSpace(ra) |
| if ra == "" { |
| continue |
| } |
| i := strings.Index(ra, "-") |
| if i < 0 { |
| return nil, errors.New("invalid range") |
| } |
| start, end := strings.TrimSpace(ra[:i]), strings.TrimSpace(ra[i+1:]) |
| var r httpRange |
| if start == "" { |
| |
| i, err := strconv.ParseInt(end, 10, 64) |
| if err != nil { |
| return nil, errors.New("invalid range") |
| } |
| if i > size { |
| i = size |
| } |
| r.start = size - i |
| r.length = size - r.start |
| } else { |
| i, err := strconv.ParseInt(start, 10, 64) |
| if err != nil || i >= size || i < 0 { |
| return nil, errors.New("invalid range") |
| } |
| r.start = i |
| if end == "" { |
| |
| r.length = size - r.start |
| } else { |
| i, err := strconv.ParseInt(end, 10, 64) |
| if err != nil || r.start > i { |
| return nil, errors.New("invalid range") |
| } |
| if i >= size { |
| i = size - 1 |
| } |
| r.length = i - r.start + 1 |
| } |
| } |
| ranges = append(ranges, r) |
| } |
| return ranges, nil |
| } |
| |
| // countingWriter counts how many bytes have been written to it. |
| type countingWriter int64 |
| |
| func (w *countingWriter) Write(p []byte) (n int, err error) { |
| *w += countingWriter(len(p)) |
| return len(p), nil |
| } |
| |
| // rangesMIMESize returns the number of bytes it takes to encode the |
| // provided ranges as a multipart response. |
| func rangesMIMESize(ranges []httpRange, contentType string, contentSize int64) (encSize int64) { |
| var w countingWriter |
| mw := multipart.NewWriter(&w) |
| for _, ra := range ranges { |
| mw.CreatePart(ra.mimeHeader(contentType, contentSize)) |
| encSize += ra.length |
| } |
| mw.Close() |
| encSize += int64(w) |
| return |
| } |
| |
| func sumRangesSize(ranges []httpRange) (size int64) { |
| for _, ra := range ranges { |
| size += ra.length |
| } |
| return |
| } |
| |
| var raceEnabled = false // set by race.go |
| |
| // A Header represents the key-value pairs in an HTTP header. |
| type Header map[string][]string |
| |
| // Add adds the key, value pair to the header. |
| // It appends to any existing values associated with key. |
| func (h Header) Add(key, value string) { |
| textproto.MIMEHeader(h).Add(key, value) |
| } |
| |
| // Set sets the header entries associated with key to |
| // the single element value. It replaces any existing |
| // values associated with key. |
| func (h Header) Set(key, value string) { |
| textproto.MIMEHeader(h).Set(key, value) |
| } |
| |
| // Get gets the first value associated with the given key. |
| // If there are no values associated with the key, Get returns "". |
| // To access multiple values of a key, access the map directly |
| // with CanonicalHeaderKey. |
| func (h Header) Get(key string) string { |
| return textproto.MIMEHeader(h).Get(key) |
| } |
| |
| // get is like Get, but key must already be in CanonicalHeaderKey form. |
| func (h Header) get(key string) string { |
| if v := h[key]; len(v) > 0 { |
| return v[0] |
| } |
| return "" |
| } |
| |
| // Del deletes the values associated with key. |
| func (h Header) Del(key string) { |
| textproto.MIMEHeader(h).Del(key) |
| } |
| |
| // Write writes a header in wire format. |
| func (h Header) Write(w io.Writer) error { |
| return h.WriteSubset(w, nil) |
| } |
| |
| func (h Header) clone() Header { |
| h2 := make(Header, len(h)) |
| for k, vv := range h { |
| vv2 := make([]string, len(vv)) |
| copy(vv2, vv) |
| h2[k] = vv2 |
| } |
| return h2 |
| } |
| |
| var timeFormats = []string{ |
| TimeFormat, |
| time.RFC850, |
| time.ANSIC, |
| } |
| |
| // ParseTime parses a time header (such as the Date: header), |
| // trying each of the three formats allowed by HTTP/1.1: |
| // TimeFormat, time.RFC850, and time.ANSIC. |
| func ParseTime(text string) (t time.Time, err error) { |
| for _, layout := range timeFormats { |
| t, err = time.Parse(layout, text) |
| if err == nil { |
| return |
| } |
| } |
| return |
| } |
| |
| var headerNewlineToSpace = strings.NewReplacer("\n", " ", "\r", " ") |
| |
| type writeStringer interface { |
| WriteString(string) (int, error) |
| } |
| |
| // stringWriter implements WriteString on a Writer. |
| type stringWriter struct { |
| w io.Writer |
| } |
| |
| func (w stringWriter) WriteString(s string) (n int, err error) { |
| return w.w.Write([]byte(s)) |
| } |
| |
| type keyValues struct { |
| key string |
| values []string |
| } |
| |
| // A headerSorter implements sort.Interface by sorting a []keyValues |
| // by key. It's used as a pointer, so it can fit in a sort.Interface |
| // interface value without allocation. |
| type headerSorter struct { |
| kvs []keyValues |
| } |
| |
| func (s *headerSorter) Len() int { return len(s.kvs) } |
| |
| func (s *headerSorter) Swap(i, j int) { s.kvs[i], s.kvs[j] = s.kvs[j], s.kvs[i] } |
| |
| func (s *headerSorter) Less(i, j int) bool { return s.kvs[i].key < s.kvs[j].key } |
| |
| var headerSorterPool = sync.Pool{ |
| New: func() interface{} { return new(headerSorter) }, |
| } |
| |
| // sortedKeyValues returns h's keys sorted in the returned kvs |
| // slice. The headerSorter used to sort is also returned, for possible |
| // return to headerSorterCache. |
| func (h Header) sortedKeyValues(exclude map[string]bool) (kvs []keyValues, hs *headerSorter) { |
| hs = headerSorterPool.Get().(*headerSorter) |
| if cap(hs.kvs) < len(h) { |
| hs.kvs = make([]keyValues, 0, len(h)) |
| } |
| kvs = hs.kvs[:0] |
| for k, vv := range h { |
| if !exclude[k] { |
| kvs = append(kvs, keyValues{k, vv}) |
| } |
| } |
| hs.kvs = kvs |
| sort.Sort(hs) |
| return kvs, hs |
| } |
| |
| // WriteSubset writes a header in wire format. |
| // If exclude is not nil, keys where exclude[key] == true are not written. |
| func (h Header) WriteSubset(w io.Writer, exclude map[string]bool) error { |
| ws, ok := w.(writeStringer) |
| if !ok { |
| ws = stringWriter{w} |
| } |
| kvs, sorter := h.sortedKeyValues(exclude) |
| for _, kv := range kvs { |
| for _, v := range kv.values { |
| v = headerNewlineToSpace.Replace(v) |
| v = textproto.TrimString(v) |
| for _, s := range []string{kv.key, ": ", v, "\r\n"} { |
| if _, err := ws.WriteString(s); err != nil { |
| return err |
| } |
| } |
| } |
| } |
| headerSorterPool.Put(sorter) |
| return nil |
| } |
| |
| // CanonicalHeaderKey returns the canonical format of the |
| // header key s. The canonicalization converts the first |
| // letter and any letter following a hyphen to upper case; |
| // the rest are converted to lowercase. For example, the |
| // canonical key for "accept-encoding" is "Accept-Encoding". |
| // If s contains a space or invalid header field bytes, it is |
| // returned without modifications. |
| func CanonicalHeaderKey(s string) string { return textproto.CanonicalMIMEHeaderKey(s) } |
| |
| // hasToken reports whether token appears with v, ASCII |
| // case-insensitive, with space or comma boundaries. |
| // token must be all lowercase. |
| // v may contain mixed cased. |
| func hasToken(v, token string) bool { |
| if len(token) > len(v) || token == "" { |
| return false |
| } |
| if v == token { |
| return true |
| } |
| for sp := 0; sp <= len(v)-len(token); sp++ { |
| |
| if b := v[sp]; b != token[0] && b|0x20 != token[0] { |
| continue |
| } |
| |
| if sp > 0 && !isTokenBoundary(v[sp-1]) { |
| continue |
| } |
| |
| if endPos := sp + len(token); endPos != len(v) && !isTokenBoundary(v[endPos]) { |
| continue |
| } |
| if strings.EqualFold(v[sp:sp+len(token)], token) { |
| return true |
| } |
| } |
| return false |
| } |
| |
| func isTokenBoundary(b byte) bool { |
| return b == ' ' || b == ',' || b == '\t' |
| } |
| |
| // A CookieJar manages storage and use of cookies in HTTP requests. |
| // |
| // Implementations of CookieJar must be safe for concurrent use by multiple |
| // goroutines. |
| // |
| // The net/http/cookiejar package provides a CookieJar implementation. |
| type CookieJar interface { |
| // SetCookies handles the receipt of the cookies in a reply for the |
| // given URL. It may or may not choose to save the cookies, depending |
| // on the jar's policy and implementation. |
| SetCookies(u *url.URL, cookies []*Cookie) |
| |
| // Cookies returns the cookies to send in a request for the given URL. |
| // It is up to the implementation to honor the standard cookie use |
| // restrictions such as in RFC 6265. |
| Cookies(u *url.URL) []*Cookie |
| } |
| |
| var isTokenTable = [127]bool{ |
| '!': true, |
| '#': true, |
| '$': true, |
| '%': true, |
| '&': true, |
| '\'': true, |
| '*': true, |
| '+': true, |
| '-': true, |
| '.': true, |
| '0': true, |
| '1': true, |
| '2': true, |
| '3': true, |
| '4': true, |
| '5': true, |
| '6': true, |
| '7': true, |
| '8': true, |
| '9': true, |
| 'A': true, |
| 'B': true, |
| 'C': true, |
| 'D': true, |
| 'E': true, |
| 'F': true, |
| 'G': true, |
| 'H': true, |
| 'I': true, |
| 'J': true, |
| 'K': true, |
| 'L': true, |
| 'M': true, |
| 'N': true, |
| 'O': true, |
| 'P': true, |
| 'Q': true, |
| 'R': true, |
| 'S': true, |
| 'T': true, |
| 'U': true, |
| 'W': true, |
| 'V': true, |
| 'X': true, |
| 'Y': true, |
| 'Z': true, |
| '^': true, |
| '_': true, |
| '` + "`" + `': true, |
| 'a': true, |
| 'b': true, |
| 'c': true, |
| 'd': true, |
| 'e': true, |
| 'f': true, |
| 'g': true, |
| 'h': true, |
| 'i': true, |
| 'j': true, |
| 'k': true, |
| 'l': true, |
| 'm': true, |
| 'n': true, |
| 'o': true, |
| 'p': true, |
| 'q': true, |
| 'r': true, |
| 's': true, |
| 't': true, |
| 'u': true, |
| 'v': true, |
| 'w': true, |
| 'x': true, |
| 'y': true, |
| 'z': true, |
| '|': true, |
| '~': true, |
| } |
| |
| func isToken(r rune) bool { |
| i := int(r) |
| return i < len(isTokenTable) && isTokenTable[i] |
| } |
| |
| func isNotToken(r rune) bool { |
| return !isToken(r) |
| } |
| |
| // headerValuesContainsToken reports whether any string in values |
| // contains the provided token, ASCII case-insensitively. |
| func headerValuesContainsToken(values []string, token string) bool { |
| for _, v := range values { |
| if headerValueContainsToken(v, token) { |
| return true |
| } |
| } |
| return false |
| } |
| |
| // isOWS reports whether b is an optional whitespace byte, as defined |
| // by RFC 7230 section 3.2.3. |
| func isOWS(b byte) bool { return b == ' ' || b == '\t' } |
| |
| // trimOWS returns x with all optional whitespace removes from the |
| // beginning and end. |
| func trimOWS(x string) string { |
| |
| for len(x) > 0 && isOWS(x[0]) { |
| x = x[1:] |
| } |
| for len(x) > 0 && isOWS(x[len(x)-1]) { |
| x = x[:len(x)-1] |
| } |
| return x |
| } |
| |
| // headerValueContainsToken reports whether v (assumed to be a |
| // 0#element, in the ABNF extension described in RFC 7230 section 7) |
| // contains token amongst its comma-separated tokens, ASCII |
| // case-insensitively. |
| func headerValueContainsToken(v string, token string) bool { |
| v = trimOWS(v) |
| if comma := strings.IndexByte(v, ','); comma != -1 { |
| return tokenEqual(trimOWS(v[:comma]), token) || headerValueContainsToken(v[comma+1:], token) |
| } |
| return tokenEqual(v, token) |
| } |
| |
| // lowerASCII returns the ASCII lowercase version of b. |
| func lowerASCII(b byte) byte { |
| if 'A' <= b && b <= 'Z' { |
| return b + ('a' - 'A') |
| } |
| return b |
| } |
| |
| // tokenEqual reports whether t1 and t2 are equal, ASCII case-insensitively. |
| func tokenEqual(t1, t2 string) bool { |
| if len(t1) != len(t2) { |
| return false |
| } |
| for i, b := range t1 { |
| if b >= utf8.RuneSelf { |
| |
| return false |
| } |
| if lowerASCII(byte(b)) != lowerASCII(t2[i]) { |
| return false |
| } |
| } |
| return true |
| } |
| |
| const ( |
| defaultMaxMemory = 32 << 20 // 32 MB |
| ) |
| |
| // ErrMissingFile is returned by FormFile when the provided file field name |
| // is either not present in the request or not a file field. |
| var ErrMissingFile = errors.New("http: no such file") |
| |
| // HTTP request parsing errors. |
| type ProtocolError struct { |
| ErrorString string |
| } |
| |
| func (err *ProtocolError) Error() string { return err.ErrorString } |
| |
| var ( |
| ErrHeaderTooLong = &ProtocolError{"header too long"} |
| ErrShortBody = &ProtocolError{"entity body too short"} |
| ErrNotSupported = &ProtocolError{"feature not supported"} |
| ErrUnexpectedTrailer = &ProtocolError{"trailer header without chunked transfer encoding"} |
| ErrMissingContentLength = &ProtocolError{"missing ContentLength in HEAD response"} |
| ErrNotMultipart = &ProtocolError{"request Content-Type isn't multipart/form-data"} |
| ErrMissingBoundary = &ProtocolError{"no multipart boundary param in Content-Type"} |
| ) |
| |
| type badStringError struct { |
| what string |
| str string |
| } |
| |
| func (e *badStringError) Error() string { return fmt.Sprintf("%s %q", e.what, e.str) } |
| |
| // Headers that Request.Write handles itself and should be skipped. |
| var reqWriteExcludeHeader = map[string]bool{ |
| "Host": true, |
| "User-Agent": true, |
| "Content-Length": true, |
| "Transfer-Encoding": true, |
| "Trailer": true, |
| } |
| |
| // A Request represents an HTTP request received by a server |
| // or to be sent by a client. |
| // |
| // The field semantics differ slightly between client and server |
| // usage. In addition to the notes on the fields below, see the |
| // documentation for Request.Write and RoundTripper. |
| type Request struct { |
| // Method specifies the HTTP method (GET, POST, PUT, etc.). |
| // For client requests an empty string means GET. |
| Method string |
| |
| // URL specifies either the URI being requested (for server |
| // requests) or the URL to access (for client requests). |
| // |
| // For server requests the URL is parsed from the URI |
| // supplied on the Request-Line as stored in RequestURI. For |
| // most requests, fields other than Path and RawQuery will be |
| // empty. (See RFC 2616, Section 5.1.2) |
| // |
| // For client requests, the URL's Host specifies the server to |
| // connect to, while the Request's Host field optionally |
| // specifies the Host header value to send in the HTTP |
| // request. |
| URL *url.URL |
| |
| // The protocol version for incoming requests. |
| // Client requests always use HTTP/1.1. |
| Proto string // "HTTP/1.0" |
| ProtoMajor int // 1 |
| ProtoMinor int // 0 |
| |
| // A header maps request lines to their values. |
| // If the header says |
| // |
| // accept-encoding: gzip, deflate |
| // Accept-Language: en-us |
| // Connection: keep-alive |
| // |
| // then |
| // |
| // Header = map[string][]string{ |
| // "Accept-Encoding": {"gzip, deflate"}, |
| // "Accept-Language": {"en-us"}, |
| // "Connection": {"keep-alive"}, |
| // } |
| // |
| // HTTP defines that header names are case-insensitive. |
| // The request parser implements this by canonicalizing the |
| // name, making the first character and any characters |
| // following a hyphen uppercase and the rest lowercase. |
| // |
| // For client requests certain headers are automatically |
| // added and may override values in Header. |
| // |
| // See the documentation for the Request.Write method. |
| Header Header |
| |
| // Body is the request's body. |
| // |
| // For client requests a nil body means the request has no |
| // body, such as a GET request. The HTTP Client's Transport |
| // is responsible for calling the Close method. |
| // |
| // For server requests the Request Body is always non-nil |
| // but will return EOF immediately when no body is present. |
| // The Server will close the request body. The ServeHTTP |
| // Handler does not need to. |
| Body io.ReadCloser |
| |
| // ContentLength records the length of the associated content. |
| // The value -1 indicates that the length is unknown. |
| // Values >= 0 indicate that the given number of bytes may |
| // be read from Body. |
| // For client requests, a value of 0 means unknown if Body is not nil. |
| ContentLength int64 |
| |
| // TransferEncoding lists the transfer encodings from outermost to |
| // innermost. An empty list denotes the "identity" encoding. |
| // TransferEncoding can usually be ignored; chunked encoding is |
| // automatically added and removed as necessary when sending and |
| // receiving requests. |
| TransferEncoding []string |
| |
| // Close indicates whether to close the connection after |
| // replying to this request (for servers) or after sending |
| // the request (for clients). |
| Close bool |
| |
| // For server requests Host specifies the host on which the |
| // URL is sought. Per RFC 2616, this is either the value of |
| // the "Host" header or the host name given in the URL itself. |
| // It may be of the form "host:port". |
| // |
| // For client requests Host optionally overrides the Host |
| // header to send. If empty, the Request.Write method uses |
| // the value of URL.Host. |
| Host string |
| |
| // Form contains the parsed form data, including both the URL |
| // field's query parameters and the POST or PUT form data. |
| // This field is only available after ParseForm is called. |
| // The HTTP client ignores Form and uses Body instead. |
| Form url.Values |
| |
| // PostForm contains the parsed form data from POST, PATCH, |
| // or PUT body parameters. |
| // |
| // This field is only available after ParseForm is called. |
| // The HTTP client ignores PostForm and uses Body instead. |
| PostForm url.Values |
| |
| // MultipartForm is the parsed multipart form, including file uploads. |
| // This field is only available after ParseMultipartForm is called. |
| // The HTTP client ignores MultipartForm and uses Body instead. |
| MultipartForm *multipart.Form |
| |
| // Trailer specifies additional headers that are sent after the request |
| // body. |
| // |
| // For server requests the Trailer map initially contains only the |
| // trailer keys, with nil values. (The client declares which trailers it |
| // will later send.) While the handler is reading from Body, it must |
| // not reference Trailer. After reading from Body returns EOF, Trailer |
| // can be read again and will contain non-nil values, if they were sent |
| // by the client. |
| // |
| // For client requests Trailer must be initialized to a map containing |
| // the trailer keys to later send. The values may be nil or their final |
| // values. The ContentLength must be 0 or -1, to send a chunked request. |
| // After the HTTP request is sent the map values can be updated while |
| // the request body is read. Once the body returns EOF, the caller must |
| // not mutate Trailer. |
| // |
| // Few HTTP clients, servers, or proxies support HTTP trailers. |
| Trailer Header |
| |
| // RemoteAddr allows HTTP servers and other software to record |
| // the network address that sent the request, usually for |
| // logging. This field is not filled in by ReadRequest and |
| // has no defined format. The HTTP server in this package |
| // sets RemoteAddr to an "IP:port" address before invoking a |
| // handler. |
| // This field is ignored by the HTTP client. |
| RemoteAddr string |
| |
| // RequestURI is the unmodified Request-URI of the |
| // Request-Line (RFC 2616, Section 5.1) as sent by the client |
| // to a server. Usually the URL field should be used instead. |
| // It is an error to set this field in an HTTP client request. |
| RequestURI string |
| |
| // TLS allows HTTP servers and other software to record |
| // information about the TLS connection on which the request |
| // was received. This field is not filled in by ReadRequest. |
| // The HTTP server in this package sets the field for |
| // TLS-enabled connections before invoking a handler; |
| // otherwise it leaves the field nil. |
| // This field is ignored by the HTTP client. |
| TLS *tls.ConnectionState |
| |
| // Cancel is an optional channel whose closure indicates that the client |
| // request should be regarded as canceled. Not all implementations of |
| // RoundTripper may support Cancel. |
| // |
| // For server requests, this field is not applicable. |
| Cancel <-chan struct{} |
| } |
| |
| // ProtoAtLeast reports whether the HTTP protocol used |
| // in the request is at least major.minor. |
| func (r *Request) ProtoAtLeast(major, minor int) bool { |
| return r.ProtoMajor > major || |
| r.ProtoMajor == major && r.ProtoMinor >= minor |
| } |
| |
| // UserAgent returns the client's User-Agent, if sent in the request. |
| func (r *Request) UserAgent() string { |
| return r.Header.Get("User-Agent") |
| } |
| |
| // Cookies parses and returns the HTTP cookies sent with the request. |
| func (r *Request) Cookies() []*Cookie { |
| return readCookies(r.Header, "") |
| } |
| |
| // ErrNoCookie is returned by Request's Cookie method when a cookie is not found. |
| var ErrNoCookie = errors.New("http: named cookie not present") |
| |
| // Cookie returns the named cookie provided in the request or |
| // ErrNoCookie if not found. |
| func (r *Request) Cookie(name string) (*Cookie, error) { |
| for _, c := range readCookies(r.Header, name) { |
| return c, nil |
| } |
| return nil, ErrNoCookie |
| } |
| |
| // AddCookie adds a cookie to the request. Per RFC 6265 section 5.4, |
| // AddCookie does not attach more than one Cookie header field. That |
| // means all cookies, if any, are written into the same line, |
| // separated by semicolon. |
| func (r *Request) AddCookie(c *Cookie) { |
| s := fmt.Sprintf("%s=%s", sanitizeCookieName(c.Name), sanitizeCookieValue(c.Value)) |
| if c := r.Header.Get("Cookie"); c != "" { |
| r.Header.Set("Cookie", c+"; "+s) |
| } else { |
| r.Header.Set("Cookie", s) |
| } |
| } |
| |
| // Referer returns the referring URL, if sent in the request. |
| // |
| // Referer is misspelled as in the request itself, a mistake from the |
| // earliest days of HTTP. This value can also be fetched from the |
| // Header map as Header["Referer"]; the benefit of making it available |
| // as a method is that the compiler can diagnose programs that use the |
| // alternate (correct English) spelling req.Referrer() but cannot |
| // diagnose programs that use Header["Referrer"]. |
| func (r *Request) Referer() string { |
| return r.Header.Get("Referer") |
| } |
| |
| // multipartByReader is a sentinel value. |
| // Its presence in Request.MultipartForm indicates that parsing of the request |
| // body has been handed off to a MultipartReader instead of ParseMultipartFrom. |
| var multipartByReader = &multipart.Form{ |
| Value: make(map[string][]string), |
| File: make(map[string][]*multipart.FileHeader), |
| } |
| |
| // MultipartReader returns a MIME multipart reader if this is a |
| // multipart/form-data POST request, else returns nil and an error. |
| // Use this function instead of ParseMultipartForm to |
| // process the request body as a stream. |
| func (r *Request) MultipartReader() (*multipart.Reader, error) { |
| if r.MultipartForm == multipartByReader { |
| return nil, errors.New("http: MultipartReader called twice") |
| } |
| if r.MultipartForm != nil { |
| return nil, errors.New("http: multipart handled by ParseMultipartForm") |
| } |
| r.MultipartForm = multipartByReader |
| return r.multipartReader() |
| } |
| |
| func (r *Request) multipartReader() (*multipart.Reader, error) { |
| v := r.Header.Get("Content-Type") |
| if v == "" { |
| return nil, ErrNotMultipart |
| } |
| d, params, err := mime.ParseMediaType(v) |
| if err != nil || d != "multipart/form-data" { |
| return nil, ErrNotMultipart |
| } |
| boundary, ok := params["boundary"] |
| if !ok { |
| return nil, ErrMissingBoundary |
| } |
| return multipart.NewReader(r.Body, boundary), nil |
| } |
| |
| // Return value if nonempty, def otherwise. |
| func valueOrDefault(value, def string) string { |
| if value != "" { |
| return value |
| } |
| return def |
| } |
| |
| // NOTE: This is not intended to reflect the actual Go version being used. |
| // It was changed at the time of Go 1.1 release because the former User-Agent |
| // had ended up on a blacklist for some intrusion detection systems. |
| // See https://codereview.appspot.com/7532043. |
| const defaultUserAgent = "Go-http-client/1.1" |
| |
| // Write writes an HTTP/1.1 request, which is the header and body, in wire format. |
| // This method consults the following fields of the request: |
| // Host |
| // URL |
| // Method (defaults to "GET") |
| // Header |
| // ContentLength |
| // TransferEncoding |
| // Body |
| // |
| // If Body is present, Content-Length is <= 0 and TransferEncoding |
| // hasn't been set to "identity", Write adds "Transfer-Encoding: |
| // chunked" to the header. Body is closed after it is sent. |
| func (r *Request) Write(w io.Writer) error { |
| return r.write(w, false, nil) |
| } |
| |
| // WriteProxy is like Write but writes the request in the form |
| // expected by an HTTP proxy. In particular, WriteProxy writes the |
| // initial Request-URI line of the request with an absolute URI, per |
| // section 5.1.2 of RFC 2616, including the scheme and host. |
| // In either case, WriteProxy also writes a Host header, using |
| // either r.Host or r.URL.Host. |
| func (r *Request) WriteProxy(w io.Writer) error { |
| return r.write(w, true, nil) |
| } |
| |
| // extraHeaders may be nil |
| func (req *Request) write(w io.Writer, usingProxy bool, extraHeaders Header) error { |
| |
| host := cleanHost(req.Host) |
| if host == "" { |
| if req.URL == nil { |
| return errors.New("http: Request.Write on Request with no Host or URL set") |
| } |
| host = cleanHost(req.URL.Host) |
| } |
| |
| host = removeZone(host) |
| |
| ruri := req.URL.RequestURI() |
| if usingProxy && req.URL.Scheme != "" && req.URL.Opaque == "" { |
| ruri = req.URL.Scheme + "://" + host + ruri |
| } else if req.Method == "CONNECT" && req.URL.Path == "" { |
| |
| ruri = host |
| } |
| |
| // Wrap the writer in a bufio Writer if it's not already buffered. |
| // Don't always call NewWriter, as that forces a bytes.Buffer |
| // and other small bufio Writers to have a minimum 4k buffer |
| // size. |
| var bw *bufio.Writer |
| if _, ok := w.(io.ByteWriter); !ok { |
| bw = bufio.NewWriter(w) |
| w = bw |
| } |
| |
| _, err := fmt.Fprintf(w, "%s %s HTTP/1.1\r\n", valueOrDefault(req.Method, "GET"), ruri) |
| if err != nil { |
| return err |
| } |
| |
| _, err = fmt.Fprintf(w, "Host: %s\r\n", host) |
| if err != nil { |
| return err |
| } |
| |
| userAgent := defaultUserAgent |
| if req.Header != nil { |
| if ua := req.Header["User-Agent"]; len(ua) > 0 { |
| userAgent = ua[0] |
| } |
| } |
| if userAgent != "" { |
| _, err = fmt.Fprintf(w, "User-Agent: %s\r\n", userAgent) |
| if err != nil { |
| return err |
| } |
| } |
| |
| tw, err := newTransferWriter(req) |
| if err != nil { |
| return err |
| } |
| err = tw.WriteHeader(w) |
| if err != nil { |
| return err |
| } |
| |
| err = req.Header.WriteSubset(w, reqWriteExcludeHeader) |
| if err != nil { |
| return err |
| } |
| |
| if extraHeaders != nil { |
| err = extraHeaders.Write(w) |
| if err != nil { |
| return err |
| } |
| } |
| |
| _, err = io.WriteString(w, "\r\n") |
| if err != nil { |
| return err |
| } |
| |
| err = tw.WriteBody(w) |
| if err != nil { |
| return err |
| } |
| |
| if bw != nil { |
| return bw.Flush() |
| } |
| return nil |
| } |
| |
| // cleanHost strips anything after '/' or ' '. |
| // Ideally we'd clean the Host header according to the spec: |
| // https://tools.ietf.org/html/rfc7230#section-5.4 (Host = uri-host [ ":" port ]") |
| // https://tools.ietf.org/html/rfc7230#section-2.7 (uri-host -> rfc3986's host) |
| // https://tools.ietf.org/html/rfc3986#section-3.2.2 (definition of host) |
| // But practically, what we are trying to avoid is the situation in |
| // issue 11206, where a malformed Host header used in the proxy context |
| // would create a bad request. So it is enough to just truncate at the |
| // first offending character. |
| func cleanHost(in string) string { |
| if i := strings.IndexAny(in, " /"); i != -1 { |
| return in[:i] |
| } |
| return in |
| } |
| |
| // removeZone removes IPv6 zone identifer from host. |
| // E.g., "[fe80::1%en0]:8080" to "[fe80::1]:8080" |
| func removeZone(host string) string { |
| if !strings.HasPrefix(host, "[") { |
| return host |
| } |
| i := strings.LastIndex(host, "]") |
| if i < 0 { |
| return host |
| } |
| j := strings.LastIndex(host[:i], "%") |
| if j < 0 { |
| return host |
| } |
| return host[:j] + host[i:] |
| } |
| |
| // ParseHTTPVersion parses a HTTP version string. |
| // "HTTP/1.0" returns (1, 0, true). |
| func ParseHTTPVersion(vers string) (major, minor int, ok bool) { |
| const Big = 1000000 // arbitrary upper bound |
| switch vers { |
| case "HTTP/1.1": |
| return 1, 1, true |
| case "HTTP/1.0": |
| return 1, 0, true |
| } |
| if !strings.HasPrefix(vers, "HTTP/") { |
| return 0, 0, false |
| } |
| dot := strings.Index(vers, ".") |
| if dot < 0 { |
| return 0, 0, false |
| } |
| major, err := strconv.Atoi(vers[5:dot]) |
| if err != nil || major < 0 || major > Big { |
| return 0, 0, false |
| } |
| minor, err = strconv.Atoi(vers[dot+1:]) |
| if err != nil || minor < 0 || minor > Big { |
| return 0, 0, false |
| } |
| return major, minor, true |
| } |
| |
| // NewRequest returns a new Request given a method, URL, and optional body. |
| // |
| // If the provided body is also an io.Closer, the returned |
| // Request.Body is set to body and will be closed by the Client |
| // methods Do, Post, and PostForm, and Transport.RoundTrip. |
| // |
| // NewRequest returns a Request suitable for use with Client.Do or |
| // Transport.RoundTrip. |
| // To create a request for use with testing a Server Handler use either |
| // ReadRequest or manually update the Request fields. See the Request |
| // type's documentation for the difference between inbound and outbound |
| // request fields. |
| func NewRequest(method, urlStr string, body io.Reader) (*Request, error) { |
| u, err := url.Parse(urlStr) |
| if err != nil { |
| return nil, err |
| } |
| rc, ok := body.(io.ReadCloser) |
| if !ok && body != nil { |
| rc = io.NopCloser(body) |
| } |
| req := &Request{ |
| Method: method, |
| URL: u, |
| Proto: "HTTP/1.1", |
| ProtoMajor: 1, |
| ProtoMinor: 1, |
| Header: make(Header), |
| Body: rc, |
| Host: u.Host, |
| } |
| if body != nil { |
| switch v := body.(type) { |
| case *bytes.Buffer: |
| req.ContentLength = int64(v.Len()) |
| case *bytes.Reader: |
| req.ContentLength = int64(v.Len()) |
| case *strings.Reader: |
| req.ContentLength = int64(v.Len()) |
| } |
| } |
| |
| return req, nil |
| } |
| |
| // BasicAuth returns the username and password provided in the request's |
| // Authorization header, if the request uses HTTP Basic Authentication. |
| // See RFC 2617, Section 2. |
| func (r *Request) BasicAuth() (username, password string, ok bool) { |
| auth := r.Header.Get("Authorization") |
| if auth == "" { |
| return |
| } |
| return parseBasicAuth(auth) |
| } |
| |
| // parseBasicAuth parses an HTTP Basic Authentication string. |
| // "Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==" returns ("Aladdin", "open sesame", true). |
| func parseBasicAuth(auth string) (username, password string, ok bool) { |
| const prefix = "Basic " |
| if !strings.HasPrefix(auth, prefix) { |
| return |
| } |
| c, err := base64.StdEncoding.DecodeString(auth[len(prefix):]) |
| if err != nil { |
| return |
| } |
| cs := string(c) |
| s := strings.IndexByte(cs, ':') |
| if s < 0 { |
| return |
| } |
| return cs[:s], cs[s+1:], true |
| } |
| |
| // SetBasicAuth sets the request's Authorization header to use HTTP |
| // Basic Authentication with the provided username and password. |
| // |
| // With HTTP Basic Authentication the provided username and password |
| // are not encrypted. |
| func (r *Request) SetBasicAuth(username, password string) { |
| r.Header.Set("Authorization", "Basic "+basicAuth(username, password)) |
| } |
| |
| // parseRequestLine parses "GET /foo HTTP/1.1" into its three parts. |
| func parseRequestLine(line string) (method, requestURI, proto string, ok bool) { |
| s1 := strings.Index(line, " ") |
| s2 := strings.Index(line[s1+1:], " ") |
| if s1 < 0 || s2 < 0 { |
| return |
| } |
| s2 += s1 + 1 |
| return line[:s1], line[s1+1 : s2], line[s2+1:], true |
| } |
| |
| var textprotoReaderPool sync.Pool |
| |
| func newTextprotoReader(br *bufio.Reader) *textproto.Reader { |
| if v := textprotoReaderPool.Get(); v != nil { |
| tr := v.(*textproto.Reader) |
| tr.R = br |
| return tr |
| } |
| return textproto.NewReader(br) |
| } |
| |
| func putTextprotoReader(r *textproto.Reader) { |
| r.R = nil |
| textprotoReaderPool.Put(r) |
| } |
| |
| // ReadRequest reads and parses an incoming request from b. |
| func ReadRequest(b *bufio.Reader) (req *Request, err error) { |
| |
| tp := newTextprotoReader(b) |
| req = new(Request) |
| |
| // First line: GET /index.html HTTP/1.0 |
| var s string |
| if s, err = tp.ReadLine(); err != nil { |
| return nil, err |
| } |
| defer func() { |
| putTextprotoReader(tp) |
| if err == io.EOF { |
| err = io.ErrUnexpectedEOF |
| } |
| }() |
| |
| var ok bool |
| req.Method, req.RequestURI, req.Proto, ok = parseRequestLine(s) |
| if !ok { |
| return nil, &badStringError{"malformed HTTP request", s} |
| } |
| rawurl := req.RequestURI |
| if req.ProtoMajor, req.ProtoMinor, ok = ParseHTTPVersion(req.Proto); !ok { |
| return nil, &badStringError{"malformed HTTP version", req.Proto} |
| } |
| |
| justAuthority := req.Method == "CONNECT" && !strings.HasPrefix(rawurl, "/") |
| if justAuthority { |
| rawurl = "http://" + rawurl |
| } |
| |
| if req.URL, err = url.ParseRequestURI(rawurl); err != nil { |
| return nil, err |
| } |
| |
| if justAuthority { |
| |
| req.URL.Scheme = "" |
| } |
| |
| mimeHeader, err := tp.ReadMIMEHeader() |
| if err != nil { |
| return nil, err |
| } |
| req.Header = Header(mimeHeader) |
| |
| req.Host = req.URL.Host |
| if req.Host == "" { |
| req.Host = req.Header.get("Host") |
| } |
| delete(req.Header, "Host") |
| |
| fixPragmaCacheControl(req.Header) |
| |
| req.Close = shouldClose(req.ProtoMajor, req.ProtoMinor, req.Header, false) |
| |
| err = readTransfer(req, b) |
| if err != nil { |
| return nil, err |
| } |
| |
| return req, nil |
| } |
| |
| // MaxBytesReader is similar to io.LimitReader but is intended for |
| // limiting the size of incoming request bodies. In contrast to |
| // io.LimitReader, MaxBytesReader's result is a ReadCloser, returns a |
| // non-EOF error for a Read beyond the limit, and closes the |
| // underlying reader when its Close method is called. |
| // |
| // MaxBytesReader prevents clients from accidentally or maliciously |
| // sending a large request and wasting server resources. |
| func MaxBytesReader(w ResponseWriter, r io.ReadCloser, n int64) io.ReadCloser { |
| return &maxBytesReader{w: w, r: r, n: n} |
| } |
| |
| type maxBytesReader struct { |
| w ResponseWriter |
| r io.ReadCloser // underlying reader |
| n int64 // max bytes remaining |
| stopped bool |
| sawEOF bool |
| } |
| |
| func (l *maxBytesReader) tooLarge() (n int, err error) { |
| if !l.stopped { |
| l.stopped = true |
| if res, ok := l.w.(*response); ok { |
| res.requestTooLarge() |
| } |
| } |
| return 0, errors.New("http: request body too large") |
| } |
| |
| func (l *maxBytesReader) Read(p []byte) (n int, err error) { |
| toRead := l.n |
| if l.n == 0 { |
| if l.sawEOF { |
| return l.tooLarge() |
| } |
| |
| toRead = 1 |
| } |
| if int64(len(p)) > toRead { |
| p = p[:toRead] |
| } |
| n, err = l.r.Read(p) |
| if err == io.EOF { |
| l.sawEOF = true |
| } |
| if l.n == 0 { |
| |
| if n > 0 { |
| return l.tooLarge() |
| } |
| return 0, err |
| } |
| l.n -= int64(n) |
| if l.n < 0 { |
| l.n = 0 |
| } |
| return |
| } |
| |
| func (l *maxBytesReader) Close() error { |
| return l.r.Close() |
| } |
| |
| func copyValues(dst, src url.Values) { |
| for k, vs := range src { |
| for _, value := range vs { |
| dst.Add(k, value) |
| } |
| } |
| } |
| |
| func parsePostForm(r *Request) (vs url.Values, err error) { |
| if r.Body == nil { |
| err = errors.New("missing form body") |
| return |
| } |
| ct := r.Header.Get("Content-Type") |
| |
| if ct == "" { |
| ct = "application/octet-stream" |
| } |
| ct, _, err = mime.ParseMediaType(ct) |
| switch { |
| case ct == "application/x-www-form-urlencoded": |
| var reader io.Reader = r.Body |
| maxFormSize := int64(1<<63 - 1) |
| if _, ok := r.Body.(*maxBytesReader); !ok { |
| maxFormSize = int64(10 << 20) |
| reader = io.LimitReader(r.Body, maxFormSize+1) |
| } |
| b, e := io.ReadAll(reader) |
| if e != nil { |
| if err == nil { |
| err = e |
| } |
| break |
| } |
| if int64(len(b)) > maxFormSize { |
| err = errors.New("http: POST too large") |
| return |
| } |
| vs, e = url.ParseQuery(string(b)) |
| if err == nil { |
| err = e |
| } |
| case ct == "multipart/form-data": |
| |
| } |
| return |
| } |
| |
| // ParseForm parses the raw query from the URL and updates r.Form. |
| // |
| // For POST or PUT requests, it also parses the request body as a form and |
| // put the results into both r.PostForm and r.Form. |
| // POST and PUT body parameters take precedence over URL query string values |
| // in r.Form. |
| // |
| // If the request Body's size has not already been limited by MaxBytesReader, |
| // the size is capped at 10MB. |
| // |
| // ParseMultipartForm calls ParseForm automatically. |
| // It is idempotent. |
| func (r *Request) ParseForm() error { |
| var err error |
| if r.PostForm == nil { |
| if r.Method == "POST" || r.Method == "PUT" || r.Method == "PATCH" { |
| r.PostForm, err = parsePostForm(r) |
| } |
| if r.PostForm == nil { |
| r.PostForm = make(url.Values) |
| } |
| } |
| if r.Form == nil { |
| if len(r.PostForm) > 0 { |
| r.Form = make(url.Values) |
| copyValues(r.Form, r.PostForm) |
| } |
| var newValues url.Values |
| if r.URL != nil { |
| var e error |
| newValues, e = url.ParseQuery(r.URL.RawQuery) |
| if err == nil { |
| err = e |
| } |
| } |
| if newValues == nil { |
| newValues = make(url.Values) |
| } |
| if r.Form == nil { |
| r.Form = newValues |
| } else { |
| copyValues(r.Form, newValues) |
| } |
| } |
| return err |
| } |
| |
| // ParseMultipartForm parses a request body as multipart/form-data. |
| // The whole request body is parsed and up to a total of maxMemory bytes of |
| // its file parts are stored in memory, with the remainder stored on |
| // disk in temporary files. |
| // ParseMultipartForm calls ParseForm if necessary. |
| // After one call to ParseMultipartForm, subsequent calls have no effect. |
| func (r *Request) ParseMultipartForm(maxMemory int64) error { |
| if r.MultipartForm == multipartByReader { |
| return errors.New("http: multipart handled by MultipartReader") |
| } |
| if r.Form == nil { |
| err := r.ParseForm() |
| if err != nil { |
| return err |
| } |
| } |
| if r.MultipartForm != nil { |
| return nil |
| } |
| |
| mr, err := r.multipartReader() |
| if err != nil { |
| return err |
| } |
| |
| f, err := mr.ReadForm(maxMemory) |
| if err != nil { |
| return err |
| } |
| for k, v := range f.Value { |
| r.Form[k] = append(r.Form[k], v...) |
| } |
| r.MultipartForm = f |
| |
| return nil |
| } |
| |
| // FormValue returns the first value for the named component of the query. |
| // POST and PUT body parameters take precedence over URL query string values. |
| // FormValue calls ParseMultipartForm and ParseForm if necessary and ignores |
| // any errors returned by these functions. |
| // If key is not present, FormValue returns the empty string. |
| // To access multiple values of the same key, call ParseForm and |
| // then inspect Request.Form directly. |
| func (r *Request) FormValue(key string) string { |
| if r.Form == nil { |
| r.ParseMultipartForm(defaultMaxMemory) |
| } |
| if vs := r.Form[key]; len(vs) > 0 { |
| return vs[0] |
| } |
| return "" |
| } |
| |
| // PostFormValue returns the first value for the named component of the POST |
| // or PUT request body. URL query parameters are ignored. |
| // PostFormValue calls ParseMultipartForm and ParseForm if necessary and ignores |
| // any errors returned by these functions. |
| // If key is not present, PostFormValue returns the empty string. |
| func (r *Request) PostFormValue(key string) string { |
| if r.PostForm == nil { |
| r.ParseMultipartForm(defaultMaxMemory) |
| } |
| if vs := r.PostForm[key]; len(vs) > 0 { |
| return vs[0] |
| } |
| return "" |
| } |
| |
| // FormFile returns the first file for the provided form key. |
| // FormFile calls ParseMultipartForm and ParseForm if necessary. |
| func (r *Request) FormFile(key string) (multipart.File, *multipart.FileHeader, error) { |
| if r.MultipartForm == multipartByReader { |
| return nil, nil, errors.New("http: multipart handled by MultipartReader") |
| } |
| if r.MultipartForm == nil { |
| err := r.ParseMultipartForm(defaultMaxMemory) |
| if err != nil { |
| return nil, nil, err |
| } |
| } |
| if r.MultipartForm != nil && r.MultipartForm.File != nil { |
| if fhs := r.MultipartForm.File[key]; len(fhs) > 0 { |
| f, err := fhs[0].Open() |
| return f, fhs[0], err |
| } |
| } |
| return nil, nil, ErrMissingFile |
| } |
| |
| func (r *Request) expectsContinue() bool { |
| return hasToken(r.Header.get("Expect"), "100-continue") |
| } |
| |
| func (r *Request) wantsHttp10KeepAlive() bool { |
| if r.ProtoMajor != 1 || r.ProtoMinor != 0 { |
| return false |
| } |
| return hasToken(r.Header.get("Connection"), "keep-alive") |
| } |
| |
| func (r *Request) wantsClose() bool { |
| return hasToken(r.Header.get("Connection"), "close") |
| } |
| |
| func (r *Request) closeBody() { |
| if r.Body != nil { |
| r.Body.Close() |
| } |
| } |
| |
| var respExcludeHeader = map[string]bool{ |
| "Content-Length": true, |
| "Transfer-Encoding": true, |
| "Trailer": true, |
| } |
| |
| // Response represents the response from an HTTP request. |
| // |
| type Response struct { |
| Status string // e.g. "200 OK" |
| StatusCode int // e.g. 200 |
| Proto string // e.g. "HTTP/1.0" |
| ProtoMajor int // e.g. 1 |
| ProtoMinor int // e.g. 0 |
| |
| // Header maps header keys to values. If the response had multiple |
| // headers with the same key, they may be concatenated, with comma |
| // delimiters. (Section 4.2 of RFC 2616 requires that multiple headers |
| // be semantically equivalent to a comma-delimited sequence.) Values |
| // duplicated by other fields in this struct (e.g., ContentLength) are |
| // omitted from Header. |
| // |
| // Keys in the map are canonicalized (see CanonicalHeaderKey). |
| Header Header |
| |
| // Body represents the response body. |
| // |
| // The http Client and Transport guarantee that Body is always |
| // non-nil, even on responses without a body or responses with |
| // a zero-length body. It is the caller's responsibility to |
| // close Body. The default HTTP client's Transport does not |
| // attempt to reuse HTTP/1.0 or HTTP/1.1 TCP connections |
| // ("keep-alive") unless the Body is read to completion and is |
| // closed. |
| // |
| // The Body is automatically dechunked if the server replied |
| // with a "chunked" Transfer-Encoding. |
| Body io.ReadCloser |
| |
| // ContentLength records the length of the associated content. The |
| // value -1 indicates that the length is unknown. Unless Request.Method |
| // is "HEAD", values >= 0 indicate that the given number of bytes may |
| // be read from Body. |
| ContentLength int64 |
| |
| // Contains transfer encodings from outer-most to inner-most. Value is |
| // nil, means that "identity" encoding is used. |
| TransferEncoding []string |
| |
| // Close records whether the header directed that the connection be |
| // closed after reading Body. The value is advice for clients: neither |
| // ReadResponse nor Response.Write ever closes a connection. |
| Close bool |
| |
| // Trailer maps trailer keys to values, in the same |
| // format as the header. |
| Trailer Header |
| |
| // The Request that was sent to obtain this Response. |
| // Request's Body is nil (having already been consumed). |
| // This is only populated for Client requests. |
| Request *Request |
| |
| // TLS contains information about the TLS connection on which the |
| // response was received. It is nil for unencrypted responses. |
| // The pointer is shared between responses and should not be |
| // modified. |
| TLS *tls.ConnectionState |
| } |
| |
| // Cookies parses and returns the cookies set in the Set-Cookie headers. |
| func (r *Response) Cookies() []*Cookie { |
| return readSetCookies(r.Header) |
| } |
| |
| // ErrNoLocation is returned by Response's Location method |
| // when no Location header is present. |
| var ErrNoLocation = errors.New("http: no Location header in response") |
| |
| // Location returns the URL of the response's "Location" header, |
| // if present. Relative redirects are resolved relative to |
| // the Response's Request. ErrNoLocation is returned if no |
| // Location header is present. |
| func (r *Response) Location() (*url.URL, error) { |
| lv := r.Header.Get("Location") |
| if lv == "" { |
| return nil, ErrNoLocation |
| } |
| if r.Request != nil && r.Request.URL != nil { |
| return r.Request.URL.Parse(lv) |
| } |
| return url.Parse(lv) |
| } |
| |
| // ReadResponse reads and returns an HTTP response from r. |
| // The req parameter optionally specifies the Request that corresponds |
| // to this Response. If nil, a GET request is assumed. |
| // Clients must call resp.Body.Close when finished reading resp.Body. |
| // After that call, clients can inspect resp.Trailer to find key/value |
| // pairs included in the response trailer. |
| func ReadResponse(r *bufio.Reader, req *Request) (*Response, error) { |
| tp := textproto.NewReader(r) |
| resp := &Response{ |
| Request: req, |
| } |
| |
| line, err := tp.ReadLine() |
| if err != nil { |
| if err == io.EOF { |
| err = io.ErrUnexpectedEOF |
| } |
| return nil, err |
| } |
| f := strings.SplitN(line, " ", 3) |
| if len(f) < 2 { |
| return nil, &badStringError{"malformed HTTP response", line} |
| } |
| reasonPhrase := "" |
| if len(f) > 2 { |
| reasonPhrase = f[2] |
| } |
| resp.Status = f[1] + " " + reasonPhrase |
| resp.StatusCode, err = strconv.Atoi(f[1]) |
| if err != nil { |
| return nil, &badStringError{"malformed HTTP status code", f[1]} |
| } |
| |
| resp.Proto = f[0] |
| var ok bool |
| if resp.ProtoMajor, resp.ProtoMinor, ok = ParseHTTPVersion(resp.Proto); !ok { |
| return nil, &badStringError{"malformed HTTP version", resp.Proto} |
| } |
| |
| mimeHeader, err := tp.ReadMIMEHeader() |
| if err != nil { |
| if err == io.EOF { |
| err = io.ErrUnexpectedEOF |
| } |
| return nil, err |
| } |
| resp.Header = Header(mimeHeader) |
| |
| fixPragmaCacheControl(resp.Header) |
| |
| err = readTransfer(resp, r) |
| if err != nil { |
| return nil, err |
| } |
| |
| return resp, nil |
| } |
| |
| // RFC2616: Should treat |
| // Pragma: no-cache |
| // like |
| // Cache-Control: no-cache |
| func fixPragmaCacheControl(header Header) { |
| if hp, ok := header["Pragma"]; ok && len(hp) > 0 && hp[0] == "no-cache" { |
| if _, presentcc := header["Cache-Control"]; !presentcc { |
| header["Cache-Control"] = []string{"no-cache"} |
| } |
| } |
| } |
| |
| // ProtoAtLeast reports whether the HTTP protocol used |
| // in the response is at least major.minor. |
| func (r *Response) ProtoAtLeast(major, minor int) bool { |
| return r.ProtoMajor > major || |
| r.ProtoMajor == major && r.ProtoMinor >= minor |
| } |
| |
| // Write writes r to w in the HTTP/1.n server response format, |
| // including the status line, headers, body, and optional trailer. |
| // |
| // This method consults the following fields of the response r: |
| // |
| // StatusCode |
| // ProtoMajor |
| // ProtoMinor |
| // Request.Method |
| // TransferEncoding |
| // Trailer |
| // Body |
| // ContentLength |
| // Header, values for non-canonical keys will have unpredictable behavior |
| // |
| // The Response Body is closed after it is sent. |
| func (r *Response) Write(w io.Writer) error { |
| |
| text := r.Status |
| if text == "" { |
| var ok bool |
| text, ok = statusText[r.StatusCode] |
| if !ok { |
| text = "status code " + strconv.Itoa(r.StatusCode) |
| } |
| } |
| protoMajor, protoMinor := strconv.Itoa(r.ProtoMajor), strconv.Itoa(r.ProtoMinor) |
| statusCode := strconv.Itoa(r.StatusCode) + " " |
| text = strings.TrimPrefix(text, statusCode) |
| if _, err := io.WriteString(w, "HTTP/"+protoMajor+"."+protoMinor+" "+statusCode+text+"\r\n"); err != nil { |
| return err |
| } |
| |
| r1 := new(Response) |
| *r1 = *r |
| if r1.ContentLength == 0 && r1.Body != nil { |
| // Is it actually 0 length? Or just unknown? |
| var buf [1]byte |
| n, err := r1.Body.Read(buf[:]) |
| if err != nil && err != io.EOF { |
| return err |
| } |
| if n == 0 { |
| |
| r1.Body = eofReader |
| } else { |
| r1.ContentLength = -1 |
| r1.Body = struct { |
| io.Reader |
| io.Closer |
| }{ |
| io.MultiReader(bytes.NewReader(buf[:1]), r.Body), |
| r.Body, |
| } |
| } |
| } |
| |
| if r1.ContentLength == -1 && !r1.Close && r1.ProtoAtLeast(1, 1) && !chunked(r1.TransferEncoding) { |
| r1.Close = true |
| } |
| |
| tw, err := newTransferWriter(r1) |
| if err != nil { |
| return err |
| } |
| err = tw.WriteHeader(w) |
| if err != nil { |
| return err |
| } |
| |
| err = r.Header.WriteSubset(w, respExcludeHeader) |
| if err != nil { |
| return err |
| } |
| |
| contentLengthAlreadySent := tw.shouldSendContentLength() |
| if r1.ContentLength == 0 && !chunked(r1.TransferEncoding) && !contentLengthAlreadySent { |
| if _, err := io.WriteString(w, "Content-Length: 0\r\n"); err != nil { |
| return err |
| } |
| } |
| |
| if _, err := io.WriteString(w, "\r\n"); err != nil { |
| return err |
| } |
| |
| err = tw.WriteBody(w) |
| if err != nil { |
| return err |
| } |
| |
| return nil |
| } |
| |
| // Errors introduced by the HTTP server. |
| var ( |
| ErrWriteAfterFlush = errors.New("Conn.Write called after Flush") |
| ErrBodyNotAllowed = errors.New("http: request method or response status code does not allow body") |
| ErrHijacked = errors.New("Conn has been hijacked") |
| ErrContentLength = errors.New("Conn.Write wrote more than the declared Content-Length") |
| ) |
| |
| // Objects implementing the Handler interface can be |
| // registered to serve a particular path or subtree |
| // in the HTTP server. |
| // |
| // ServeHTTP should write reply headers and data to the ResponseWriter |
| // and then return. Returning signals that the request is finished |
| // and that the HTTP server can move on to the next request on |
| // the connection. |
| // |
| // If ServeHTTP panics, the server (the caller of ServeHTTP) assumes |
| // that the effect of the panic was isolated to the active request. |
| // It recovers the panic, logs a stack trace to the server error log, |
| // and hangs up the connection. |
| // |
| type Handler interface { |
| ServeHTTP(ResponseWriter, *Request) |
| } |
| |
| // A ResponseWriter interface is used by an HTTP handler to |
| // construct an HTTP response. |
| type ResponseWriter interface { |
| // Header returns the header map that will be sent by |
| // WriteHeader. Changing the header after a call to |
| // WriteHeader (or Write) has no effect unless the modified |
| // headers were declared as trailers by setting the |
| // "Trailer" header before the call to WriteHeader (see example). |
| // To suppress implicit response headers, set their value to nil. |
| Header() Header |
| |
| // Write writes the data to the connection as part of an HTTP reply. |
| // If WriteHeader has not yet been called, Write calls WriteHeader(http.StatusOK) |
| // before writing the data. If the Header does not contain a |
| // Content-Type line, Write adds a Content-Type set to the result of passing |
| // the initial 512 bytes of written data to DetectContentType. |
| Write([]byte) (int, error) |
| |
| // WriteHeader sends an HTTP response header with status code. |
| // If WriteHeader is not called explicitly, the first call to Write |
| // will trigger an implicit WriteHeader(http.StatusOK). |
| // Thus explicit calls to WriteHeader are mainly used to |
| // send error codes. |
| WriteHeader(int) |
| } |
| |
| // The Flusher interface is implemented by ResponseWriters that allow |
| // an HTTP handler to flush buffered data to the client. |
| // |
| // Note that even for ResponseWriters that support Flush, |
| // if the client is connected through an HTTP proxy, |
| // the buffered data may not reach the client until the response |
| // completes. |
| type Flusher interface { |
| // Flush sends any buffered data to the client. |
| Flush() |
| } |
| |
| // The Hijacker interface is implemented by ResponseWriters that allow |
| // an HTTP handler to take over the connection. |
| type Hijacker interface { |
| // Hijack lets the caller take over the connection. |
| // After a call to Hijack(), the HTTP server library |
| // will not do anything else with the connection. |
| // |
| // It becomes the caller's responsibility to manage |
| // and close the connection. |
| // |
| // The returned net.Conn may have read or write deadlines |
| // already set, depending on the configuration of the |
| // Server. It is the caller's responsibility to set |
| // or clear those deadlines as needed. |
| Hijack() (net.Conn, *bufio.ReadWriter, error) |
| } |
| |
| // The CloseNotifier interface is implemented by ResponseWriters which |
| // allow detecting when the underlying connection has gone away. |
| // |
| // This mechanism can be used to cancel long operations on the server |
| // if the client has disconnected before the response is ready. |
| type CloseNotifier interface { |
| // CloseNotify returns a channel that receives a single value |
| // when the client connection has gone away. |
| CloseNotify() <-chan bool |
| } |
| |
| // A conn represents the server side of an HTTP connection. |
| type conn struct { |
| remoteAddr string // network address of remote side |
| server *Server // the Server on which the connection arrived |
| rwc net.Conn // i/o connection |
| w io.Writer // checkConnErrorWriter's copy of wrc, not zeroed on Hijack |
| werr error // any errors writing to w |
| sr liveSwitchReader // where the LimitReader reads from; usually the rwc |
| lr *io.LimitedReader // io.LimitReader(sr) |
| buf *bufio.ReadWriter // buffered(lr,rwc), reading from bufio->limitReader->sr->rwc |
| tlsState *tls.ConnectionState // or nil when not using TLS |
| lastMethod string // method of previous request, or "" |
| |
| mu sync.Mutex // guards the following |
| clientGone bool // if client has disconnected mid-request |
| closeNotifyc chan bool // made lazily |
| hijackedv bool // connection has been hijacked by handler |
| } |
| |
| func (c *conn) hijacked() bool { |
| c.mu.Lock() |
| defer c.mu.Unlock() |
| return c.hijackedv |
| } |
| |
| func (c *conn) hijack() (rwc net.Conn, buf *bufio.ReadWriter, err error) { |
| c.mu.Lock() |
| defer c.mu.Unlock() |
| if c.hijackedv { |
| return nil, nil, ErrHijacked |
| } |
| if c.closeNotifyc != nil { |
| return nil, nil, errors.New("http: Hijack is incompatible with use of CloseNotifier") |
| } |
| c.hijackedv = true |
| rwc = c.rwc |
| buf = c.buf |
| c.rwc = nil |
| c.buf = nil |
| c.setState(rwc, StateHijacked) |
| return |
| } |
| |
| func (c *conn) closeNotify() <-chan bool { |
| c.mu.Lock() |
| defer c.mu.Unlock() |
| if c.closeNotifyc == nil { |
| c.closeNotifyc = make(chan bool, 1) |
| if c.hijackedv { |
| |
| return c.closeNotifyc |
| } |
| pr, pw := io.Pipe() |
| |
| readSource := c.sr.r |
| c.sr.Lock() |
| c.sr.r = pr |
| c.sr.Unlock() |
| go func() { |
| _, err := io.Copy(pw, readSource) |
| if err == nil { |
| err = io.EOF |
| } |
| pw.CloseWithError(err) |
| c.noteClientGone() |
| }() |
| } |
| return c.closeNotifyc |
| } |
| |
| func (c *conn) noteClientGone() { |
| c.mu.Lock() |
| defer c.mu.Unlock() |
| if c.closeNotifyc != nil && !c.clientGone { |
| c.closeNotifyc <- true |
| } |
| c.clientGone = true |
| } |
| |
| // A switchWriter can have its Writer changed at runtime. |
| // It's not safe for concurrent Writes and switches. |
| type switchWriter struct { |
| io.Writer |
| } |
| |
| // A liveSwitchReader can have its Reader changed at runtime. It's |
| // safe for concurrent reads and switches, if its mutex is held. |
| type liveSwitchReader struct { |
| sync.Mutex |
| r io.Reader |
| } |
| |
| func (sr *liveSwitchReader) Read(p []byte) (n int, err error) { |
| sr.Lock() |
| r := sr.r |
| sr.Unlock() |
| return r.Read(p) |
| } |
| |
| // This should be >= 512 bytes for DetectContentType, |
| // but otherwise it's somewhat arbitrary. |
| const bufferBeforeChunkingSize = 2048 |
| |
| // chunkWriter writes to a response's conn buffer, and is the writer |
| // wrapped by the response.bufw buffered writer. |
| // |
| // chunkWriter also is responsible for finalizing the Header, including |
| // conditionally setting the Content-Type and setting a Content-Length |
| // in cases where the handler's final output is smaller than the buffer |
| // size. It also conditionally adds chunk headers, when in chunking mode. |
| // |
| // See the comment above (*response).Write for the entire write flow. |
| type chunkWriter struct { |
| res *response |
| |
| // header is either nil or a deep clone of res.handlerHeader |
| // at the time of res.WriteHeader, if res.WriteHeader is |
| // called and extra buffering is being done to calculate |
| // Content-Type and/or Content-Length. |
| header Header |
| |
| // wroteHeader tells whether the header's been written to "the |
| // wire" (or rather: w.conn.buf). this is unlike |
| // (*response).wroteHeader, which tells only whether it was |
| // logically written. |
| wroteHeader bool |
| |
| // set by the writeHeader method: |
| chunking bool // using chunked transfer encoding for reply body |
| } |
| |
| var ( |
| crlf = []byte("\r\n") |
| colonSpace = []byte(": ") |
| ) |
| |
| func (cw *chunkWriter) Write(p []byte) (n int, err error) { |
| if !cw.wroteHeader { |
| cw.writeHeader(p) |
| } |
| if cw.res.req.Method == "HEAD" { |
| |
| return len(p), nil |
| } |
| if cw.chunking { |
| _, err = fmt.Fprintf(cw.res.conn.buf, "%x\r\n", len(p)) |
| if err != nil { |
| cw.res.conn.rwc.Close() |
| return |
| } |
| } |
| n, err = cw.res.conn.buf.Write(p) |
| if cw.chunking && err == nil { |
| _, err = cw.res.conn.buf.Write(crlf) |
| } |
| if err != nil { |
| cw.res.conn.rwc.Close() |
| } |
| return |
| } |
| |
| func (cw *chunkWriter) flush() { |
| if !cw.wroteHeader { |
| cw.writeHeader(nil) |
| } |
| cw.res.conn.buf.Flush() |
| } |
| |
| func (cw *chunkWriter) close() { |
| if !cw.wroteHeader { |
| cw.writeHeader(nil) |
| } |
| if cw.chunking { |
| bw := cw.res.conn.buf |
| |
| bw.WriteString("0\r\n") |
| if len(cw.res.trailers) > 0 { |
| trailers := make(Header) |
| for _, h := range cw.res.trailers { |
| if vv := cw.res.handlerHeader[h]; len(vv) > 0 { |
| trailers[h] = vv |
| } |
| } |
| trailers.Write(bw) |
| } |
| |
| bw.WriteString("\r\n") |
| } |
| } |
| |
| // A response represents the server side of an HTTP response. |
| type response struct { |
| conn *conn |
| req *Request // request for this response |
| wroteHeader bool // reply header has been (logically) written |
| wroteContinue bool // 100 Continue response was written |
| |
| w *bufio.Writer // buffers output in chunks to chunkWriter |
| cw chunkWriter |
| sw *switchWriter // of the bufio.Writer, for return to putBufioWriter |
| |
| // handlerHeader is the Header that Handlers get access to, |
| // which may be retained and mutated even after WriteHeader. |
| // handlerHeader is copied into cw.header at WriteHeader |
| // time, and privately mutated thereafter. |
| handlerHeader Header |
| calledHeader bool // handler accessed handlerHeader via Header |
| |
| written int64 // number of bytes written in body |
| contentLength int64 // explicitly-declared Content-Length; or -1 |
| status int // status code passed to WriteHeader |
| |
| // close connection after this reply. set on request and |
| // updated after response from handler if there's a |
| // "Connection: keep-alive" response header and a |
| // Content-Length. |
| closeAfterReply bool |
| |
| // requestBodyLimitHit is set by requestTooLarge when |
| // maxBytesReader hits its max size. It is checked in |
| // WriteHeader, to make sure we don't consume the |
| // remaining request body to try to advance to the next HTTP |
| // request. Instead, when this is set, we stop reading |
| // subsequent requests on this connection and stop reading |
| // input from it. |
| requestBodyLimitHit bool |
| |
| // trailers are the headers to be sent after the handler |
| // finishes writing the body. This field is initialized from |
| // the Trailer response header when the response header is |
| // written. |
| trailers []string |
| |
| handlerDone bool // set true when the handler exits |
| |
| // Buffers for Date and Content-Length |
| dateBuf [len(TimeFormat)]byte |
| clenBuf [10]byte |
| } |
| |
| // declareTrailer is called for each Trailer header when the |
| // response header is written. It notes that a header will need to be |
| // written in the trailers at the end of the response. |
| func (w *response) declareTrailer(k string) { |
| k = CanonicalHeaderKey(k) |
| switch k { |
| case "Transfer-Encoding", "Content-Length", "Trailer": |
| |
| return |
| } |
| w.trailers = append(w.trailers, k) |
| } |
| |
| // requestTooLarge is called by maxBytesReader when too much input has |
| // been read from the client. |
| func (w *response) requestTooLarge() { |
| w.closeAfterReply = true |
| w.requestBodyLimitHit = true |
| if !w.wroteHeader { |
| w.Header().Set("Connection", "close") |
| } |
| } |
| |
| // needsSniff reports whether a Content-Type still needs to be sniffed. |
| func (w *response) needsSniff() bool { |
| _, haveType := w.handlerHeader["Content-Type"] |
| return !w.cw.wroteHeader && !haveType && w.written < sniffLen |
| } |
| |
| // writerOnly hides an io.Writer value's optional ReadFrom method |
| // from io.Copy. |
| type writerOnly struct { |
| io.Writer |
| } |
| |
| func srcIsRegularFile(src io.Reader) (isRegular bool, err error) { |
| switch v := src.(type) { |
| case *os.File: |
| fi, err := v.Stat() |
| if err != nil { |
| return false, err |
| } |
| return fi.Mode().IsRegular(), nil |
| case *io.LimitedReader: |
| return srcIsRegularFile(v.R) |
| default: |
| return |
| } |
| } |
| |
| // ReadFrom is here to optimize copying from an *os.File regular file |
| // to a *net.TCPConn with sendfile. |
| func (w *response) ReadFrom(src io.Reader) (n int64, err error) { |
| |
| rf, ok := w.conn.rwc.(io.ReaderFrom) |
| regFile, err := srcIsRegularFile(src) |
| if err != nil { |
| return 0, err |
| } |
| if !ok || !regFile { |
| return io.Copy(writerOnly{w}, src) |
| } |
| |
| if !w.wroteHeader { |
| w.WriteHeader(StatusOK) |
| } |
| |
| if w.needsSniff() { |
| n0, err := io.Copy(writerOnly{w}, io.LimitReader(src, sniffLen)) |
| n += n0 |
| if err != nil { |
| return n, err |
| } |
| } |
| |
| w.w.Flush() |
| w.cw.flush() |
| |
| if !w.cw.chunking && w.bodyAllowed() { |
| n0, err := rf.ReadFrom(src) |
| n += n0 |
| w.written += n0 |
| return n, err |
| } |
| |
| n0, err := io.Copy(writerOnly{w}, src) |
| n += n0 |
| return n, err |
| } |
| |
| // noLimit is an effective infinite upper bound for io.LimitedReader |
| const noLimit int64 = (1 << 63) - 1 |
| |
| // debugServerConnections controls whether all server connections are wrapped |
| // with a verbose logging wrapper. |
| const debugServerConnections = false |
| |
| // Create new connection from rwc. |
| func (srv *Server) newConn(rwc net.Conn) (c *conn, err error) { |
| c = new(conn) |
| c.remoteAddr = rwc.RemoteAddr().String() |
| c.server = srv |
| c.rwc = rwc |
| c.w = rwc |
| if debugServerConnections { |
| c.rwc = newLoggingConn("server", c.rwc) |
| } |
| c.sr.r = c.rwc |
| c.lr = io.LimitReader(&c.sr, noLimit).(*io.LimitedReader) |
| br := newBufioReader(c.lr) |
| bw := newBufioWriterSize(checkConnErrorWriter{c}, 4<<10) |
| c.buf = bufio.NewReadWriter(br, bw) |
| return c, nil |
| } |
| |
| var ( |
| bufioReaderPool sync.Pool |
| bufioWriter2kPool sync.Pool |
| bufioWriter4kPool sync.Pool |
| ) |
| |
| func bufioWriterPool(size int) *sync.Pool { |
| switch size { |
| case 2 << 10: |
| return &bufioWriter2kPool |
| case 4 << 10: |
| return &bufioWriter4kPool |
| } |
| return nil |
| } |
| |
| func newBufioReader(r io.Reader) *bufio.Reader { |
| if v := bufioReaderPool.Get(); v != nil { |
| br := v.(*bufio.Reader) |
| br.Reset(r) |
| return br |
| } |
| |
| return bufio.NewReader(r) |
| } |
| |
| func putBufioReader(br *bufio.Reader) { |
| br.Reset(nil) |
| bufioReaderPool.Put(br) |
| } |
| |
| func newBufioWriterSize(w io.Writer, size int) *bufio.Writer { |
| pool := bufioWriterPool(size) |
| if pool != nil { |
| if v := pool.Get(); v != nil { |
| bw := v.(*bufio.Writer) |
| bw.Reset(w) |
| return bw |
| } |
| } |
| return bufio.NewWriterSize(w, size) |
| } |
| |
| func putBufioWriter(bw *bufio.Writer) { |
| bw.Reset(nil) |
| if pool := bufioWriterPool(bw.Available()); pool != nil { |
| pool.Put(bw) |
| } |
| } |
| |
| // DefaultMaxHeaderBytes is the maximum permitted size of the headers |
| // in an HTTP request. |
| // This can be overridden by setting Server.MaxHeaderBytes. |
| const DefaultMaxHeaderBytes = 1 << 20 // 1 MB |
| |
| func (srv *Server) maxHeaderBytes() int { |
| if srv.MaxHeaderBytes > 0 { |
| return srv.MaxHeaderBytes |
| } |
| return DefaultMaxHeaderBytes |
| } |
| |
| func (srv *Server) initialLimitedReaderSize() int64 { |
| return int64(srv.maxHeaderBytes()) + 4096 |
| } |
| |
| // wrapper around io.ReaderCloser which on first read, sends an |
| // HTTP/1.1 100 Continue header |
| type expectContinueReader struct { |
| resp *response |
| readCloser io.ReadCloser |
| closed bool |
| sawEOF bool |
| } |
| |
| func (ecr *expectContinueReader) Read(p []byte) (n int, err error) { |
| if ecr.closed { |
| return 0, ErrBodyReadAfterClose |
| } |
| if !ecr.resp.wroteContinue && !ecr.resp.conn.hijacked() { |
| ecr.resp.wroteContinue = true |
| ecr.resp.conn.buf.WriteString("HTTP/1.1 100 Continue\r\n\r\n") |
| ecr.resp.conn.buf.Flush() |
| } |
| n, err = ecr.readCloser.Read(p) |
| if err == io.EOF { |
| ecr.sawEOF = true |
| } |
| return |
| } |
| |
| func (ecr *expectContinueReader) Close() error { |
| ecr.closed = true |
| return ecr.readCloser.Close() |
| } |
| |
| // TimeFormat is the time format to use with |
| // time.Parse and time.Time.Format when parsing |
| // or generating times in HTTP headers. |
| // It is like time.RFC1123 but hard codes GMT as the time zone. |
| const TimeFormat = "Mon, 02 Jan 2006 15:04:05 GMT" |
| |
| // appendTime is a non-allocating version of []byte(t.UTC().Format(TimeFormat)) |
| func appendTime(b []byte, t time.Time) []byte { |
| const days = "SunMonTueWedThuFriSat" |
| const months = "JanFebMarAprMayJunJulAugSepOctNovDec" |
| |
| t = t.UTC() |
| yy, mm, dd := t.Date() |
| hh, mn, ss := t.Clock() |
| day := days[3*t.Weekday():] |
| mon := months[3*(mm-1):] |
| |
| return append(b, |
| day[0], day[1], day[2], ',', ' ', |
| byte('0'+dd/10), byte('0'+dd%10), ' ', |
| mon[0], mon[1], mon[2], ' ', |
| byte('0'+yy/1000), byte('0'+(yy/100)%10), byte('0'+(yy/10)%10), byte('0'+yy%10), ' ', |
| byte('0'+hh/10), byte('0'+hh%10), ':', |
| byte('0'+mn/10), byte('0'+mn%10), ':', |
| byte('0'+ss/10), byte('0'+ss%10), ' ', |
| 'G', 'M', 'T') |
| } |
| |
| var errTooLarge = errors.New("http: request too large") |
| |
| // Read next request from connection. |
| func (c *conn) readRequest() (w *response, err error) { |
| if c.hijacked() { |
| return nil, ErrHijacked |
| } |
| |
| if d := c.server.ReadTimeout; d != 0 { |
| c.rwc.SetReadDeadline(time.Now().Add(d)) |
| } |
| if d := c.server.WriteTimeout; d != 0 { |
| defer func() { |
| c.rwc.SetWriteDeadline(time.Now().Add(d)) |
| }() |
| } |
| |
| c.lr.N = c.server.initialLimitedReaderSize() |
| if c.lastMethod == "POST" { |
| |
| peek, _ := c.buf.Reader.Peek(4) |
| c.buf.Reader.Discard(numLeadingCRorLF(peek)) |
| } |
| var req *Request |
| if req, err = ReadRequest(c.buf.Reader); err != nil { |
| if c.lr.N == 0 { |
| return nil, errTooLarge |
| } |
| return nil, err |
| } |
| c.lr.N = noLimit |
| c.lastMethod = req.Method |
| |
| req.RemoteAddr = c.remoteAddr |
| req.TLS = c.tlsState |
| if body, ok := req.Body.(*body); ok { |
| body.doEarlyClose = true |
| } |
| |
| w = &response{ |
| conn: c, |
| req: req, |
| handlerHeader: make(Header), |
| contentLength: -1, |
| } |
| w.cw.res = w |
| w.w = newBufioWriterSize(&w.cw, bufferBeforeChunkingSize) |
| return w, nil |
| } |
| |
| func (w *response) Header() Header { |
| if w.cw.header == nil && w.wroteHeader && !w.cw.wroteHeader { |
| |
| w.cw.header = w.handlerHeader.clone() |
| } |
| w.calledHeader = true |
| return w.handlerHeader |
| } |
| |
| // maxPostHandlerReadBytes is the max number of Request.Body bytes not |
| // consumed by a handler that the server will read from the client |
| // in order to keep a connection alive. If there are more bytes than |
| // this then the server to be paranoid instead sends a "Connection: |
| // close" response. |
| // |
| // This number is approximately what a typical machine's TCP buffer |
| // size is anyway. (if we have the bytes on the machine, we might as |
| // well read them) |
| const maxPostHandlerReadBytes = 256 << 10 |
| |
| func (w *response) WriteHeader(code int) { |
| if w.conn.hijacked() { |
| w.conn.server.logf("http: response.WriteHeader on hijacked connection") |
| return |
| } |
| if w.wroteHeader { |
| w.conn.server.logf("http: multiple response.WriteHeader calls") |
| return |
| } |
| w.wroteHeader = true |
| w.status = code |
| |
| if w.calledHeader && w.cw.header == nil { |
| w.cw.header = w.handlerHeader.clone() |
| } |
| |
| if cl := w.handlerHeader.get("Content-Length"); cl != "" { |
| v, err := strconv.ParseInt(cl, 10, 64) |
| if err == nil && v >= 0 { |
| w.contentLength = v |
| } else { |
| w.conn.server.logf("http: invalid Content-Length of %q", cl) |
| w.handlerHeader.Del("Content-Length") |
| } |
| } |
| } |
| |
| // extraHeader is the set of headers sometimes added by chunkWriter.writeHeader. |
| // This type is used to avoid extra allocations from cloning and/or populating |
| // the response Header map and all its 1-element slices. |
| type extraHeader struct { |
| contentType string |
| connection string |
| transferEncoding string |
| date []byte // written if not nil |
| contentLength []byte // written if not nil |
| } |
| |
| // Sorted the same as extraHeader.Write's loop. |
| var extraHeaderKeys = [][]byte{ |
| []byte("Content-Type"), |
| []byte("Connection"), |
| []byte("Transfer-Encoding"), |
| } |
| |
| var ( |
| headerContentLength = []byte("Content-Length: ") |
| headerDate = []byte("Date: ") |
| ) |
| |
| // Write writes the headers described in h to w. |
| // |
| // This method has a value receiver, despite the somewhat large size |
| // of h, because it prevents an allocation. The escape analysis isn't |
| // smart enough to realize this function doesn't mutate h. |
| func (h extraHeader) Write(w *bufio.Writer) { |
| if h.date != nil { |
| w.Write(headerDate) |
| w.Write(h.date) |
| w.Write(crlf) |
| } |
| if h.contentLength != nil { |
| w.Write(headerContentLength) |
| w.Write(h.contentLength) |
| w.Write(crlf) |
| } |
| for i, v := range []string{h.contentType, h.connection, h.transferEncoding} { |
| if v != "" { |
| w.Write(extraHeaderKeys[i]) |
| w.Write(colonSpace) |
| w.WriteString(v) |
| w.Write(crlf) |
| } |
| } |
| } |
| |
| // writeHeader finalizes the header sent to the client and writes it |
| // to cw.res.conn.buf. |
| // |
| // p is not written by writeHeader, but is the first chunk of the body |
| // that will be written. It is sniffed for a Content-Type if none is |
| // set explicitly. It's also used to set the Content-Length, if the |
| // total body size was small and the handler has already finished |
| // running. |
| func (cw *chunkWriter) writeHeader(p []byte) { |
| if cw.wroteHeader { |
| return |
| } |
| cw.wroteHeader = true |
| |
| w := cw.res |
| keepAlivesEnabled := w.conn.server.doKeepAlives() |
| isHEAD := w.req.Method == "HEAD" |
| |
| header := cw.header |
| owned := header != nil |
| if !owned { |
| header = w.handlerHeader |
| } |
| var excludeHeader map[string]bool |
| delHeader := func(key string) { |
| if owned { |
| header.Del(key) |
| return |
| } |
| if _, ok := header[key]; !ok { |
| return |
| } |
| if excludeHeader == nil { |
| excludeHeader = make(map[string]bool) |
| } |
| excludeHeader[key] = true |
| } |
| var setHeader extraHeader |
| |
| trailers := false |
| for _, v := range cw.header["Trailer"] { |
| trailers = true |
| foreachHeaderElement(v, cw.res.declareTrailer) |
| } |
| |
| te := header.get("Transfer-Encoding") |
| hasTE := te != "" |
| |
| if w.handlerDone && !trailers && !hasTE && bodyAllowedForStatus(w.status) && header.get("Content-Length") == "" && (!isHEAD || len(p) > 0) { |
| w.contentLength = int64(len(p)) |
| setHeader.contentLength = strconv.AppendInt(cw.res.clenBuf[:0], int64(len(p)), 10) |
| } |
| |
| if w.req.wantsHttp10KeepAlive() && keepAlivesEnabled { |
| sentLength := header.get("Content-Length") != "" |
| if sentLength && header.get("Connection") == "keep-alive" { |
| w.closeAfterReply = false |
| } |
| } |
| |
| hasCL := w.contentLength != -1 |
| |
| if w.req.wantsHttp10KeepAlive() && (isHEAD || hasCL) { |
| _, connectionHeaderSet := header["Connection"] |
| if !connectionHeaderSet { |
| setHeader.connection = "keep-alive" |
| } |
| } else if !w.req.ProtoAtLeast(1, 1) || w.req.wantsClose() { |
| w.closeAfterReply = true |
| } |
| |
| if header.get("Connection") == "close" || !keepAlivesEnabled { |
| w.closeAfterReply = true |
| } |
| |
| if ecr, ok := w.req.Body.(*expectContinueReader); ok && !ecr.sawEOF { |
| w.closeAfterReply = true |
| } |
| |
| if w.req.ContentLength != 0 && !w.closeAfterReply { |
| var discard, tooBig bool |
| |
| switch bdy := w.req.Body.(type) { |
| case *expectContinueReader: |
| if bdy.resp.wroteContinue { |
| discard = true |
| } |
| case *body: |
| bdy.mu.Lock() |
| switch { |
| case bdy.closed: |
| if !bdy.sawEOF { |
| |
| w.closeAfterReply = true |
| } |
| case bdy.unreadDataSizeLocked() >= maxPostHandlerReadBytes: |
| tooBig = true |
| default: |
| discard = true |
| } |
| bdy.mu.Unlock() |
| default: |
| discard = true |
| } |
| |
| if discard { |
| _, err := io.CopyN(io.Discard, w.req.Body, maxPostHandlerReadBytes+1) |
| switch err { |
| case nil: |
| |
| tooBig = true |
| case ErrBodyReadAfterClose: |
| |
| case io.EOF: |
| |
| err = w.req.Body.Close() |
| if err != nil { |
| w.closeAfterReply = true |
| } |
| default: |
| |
| w.closeAfterReply = true |
| } |
| } |
| |
| if tooBig { |
| w.requestTooLarge() |
| delHeader("Connection") |
| setHeader.connection = "close" |
| } |
| } |
| |
| code := w.status |
| if bodyAllowedForStatus(code) { |
| |
| _, haveType := header["Content-Type"] |
| if !haveType && !hasTE { |
| setHeader.contentType = DetectContentType(p) |
| } |
| } else { |
| for _, k := range suppressedHeaders(code) { |
| delHeader(k) |
| } |
| } |
| |
| if _, ok := header["Date"]; !ok { |
| setHeader.date = appendTime(cw.res.dateBuf[:0], time.Now()) |
| } |
| |
| if hasCL && hasTE && te != "identity" { |
| |
| w.conn.server.logf("http: WriteHeader called with both Transfer-Encoding of %q and a Content-Length of %d", |
| te, w.contentLength) |
| delHeader("Content-Length") |
| hasCL = false |
| } |
| |
| if w.req.Method == "HEAD" || !bodyAllowedForStatus(code) { |
| |
| } else if code == StatusNoContent { |
| delHeader("Transfer-Encoding") |
| } else if hasCL { |
| delHeader("Transfer-Encoding") |
| } else if w.req.ProtoAtLeast(1, 1) { |
| |
| if hasTE && te == "identity" { |
| cw.chunking = false |
| w.closeAfterReply = true |
| } else { |
| |
| cw.chunking = true |
| setHeader.transferEncoding = "chunked" |
| } |
| } else { |
| |
| w.closeAfterReply = true |
| delHeader("Transfer-Encoding") |
| } |
| |
| if cw.chunking { |
| delHeader("Content-Length") |
| } |
| if !w.req.ProtoAtLeast(1, 0) { |
| return |
| } |
| |
| if w.closeAfterReply && (!keepAlivesEnabled || !hasToken(cw.header.get("Connection"), "close")) { |
| delHeader("Connection") |
| if w.req.ProtoAtLeast(1, 1) { |
| setHeader.connection = "close" |
| } |
| } |
| |
| w.conn.buf.WriteString(statusLine(w.req, code)) |
| cw.header.WriteSubset(w.conn.buf, excludeHeader) |
| setHeader.Write(w.conn.buf.Writer) |
| w.conn.buf.Write(crlf) |
| } |
| |
| // foreachHeaderElement splits v according to the "#rule" construction |
| // in RFC 2616 section 2.1 and calls fn for each non-empty element. |
| func foreachHeaderElement(v string, fn func(string)) { |
| v = textproto.TrimString(v) |
| if v == "" { |
| return |
| } |
| if !strings.Contains(v, ",") { |
| fn(v) |
| return |
| } |
| for _, f := range strings.Split(v, ",") { |
| if f = textproto.TrimString(f); f != "" { |
| fn(f) |
| } |
| } |
| } |
| |
| // statusLines is a cache of Status-Line strings, keyed by code (for |
| // HTTP/1.1) or negative code (for HTTP/1.0). This is faster than a |
| // map keyed by struct of two fields. This map's max size is bounded |
| // by 2*len(statusText), two protocol types for each known official |
| // status code in the statusText map. |
| var ( |
| statusMu sync.RWMutex |
| statusLines = make(map[int]string) |
| ) |
| |
| // statusLine returns a response Status-Line (RFC 2616 Section 6.1) |
| // for the given request and response status code. |
| func statusLine(req *Request, code int) string { |
| |
| key := code |
| proto11 := req.ProtoAtLeast(1, 1) |
| if !proto11 { |
| key = -key |
| } |
| statusMu.RLock() |
| line, ok := statusLines[key] |
| statusMu.RUnlock() |
| if ok { |
| return line |
| } |
| |
| proto := "HTTP/1.0" |
| if proto11 { |
| proto = "HTTP/1.1" |
| } |
| codestring := strconv.Itoa(code) |
| text, ok := statusText[code] |
| if !ok { |
| text = "status code " + codestring |
| } |
| line = proto + " " + codestring + " " + text + "\r\n" |
| if ok { |
| statusMu.Lock() |
| defer statusMu.Unlock() |
| statusLines[key] = line |
| } |
| return line |
| } |
| |
| // bodyAllowed reports whether a Write is allowed for this response type. |
| // It's illegal to call this before the header has been flushed. |
| func (w *response) bodyAllowed() bool { |
| if !w.wroteHeader { |
| panic("") |
| } |
| return bodyAllowedForStatus(w.status) |
| } |
| |
| // The Life Of A Write is like this: |
| // |
| // Handler starts. No header has been sent. The handler can either |
| // write a header, or just start writing. Writing before sending a header |
| // sends an implicitly empty 200 OK header. |
| // |
| // If the handler didn't declare a Content-Length up front, we either |
| // go into chunking mode or, if the handler finishes running before |
| // the chunking buffer size, we compute a Content-Length and send that |
| // in the header instead. |
| // |
| // Likewise, if the handler didn't set a Content-Type, we sniff that |
| // from the initial chunk of output. |
| // |
| // The Writers are wired together like: |
| // |
| // 1. *response (the ResponseWriter) -> |
| // 2. (*response).w, a *bufio.Writer of bufferBeforeChunkingSize bytes |
| // 3. chunkWriter.Writer (whose writeHeader finalizes Content-Length/Type) |
| // and which writes the chunk headers, if needed. |
| // 4. conn.buf, a bufio.Writer of default (4kB) bytes, writing to -> |
| // 5. checkConnErrorWriter{c}, which notes any non-nil error on Write |
| // and populates c.werr with it if so. but otherwise writes to: |
| // 6. the rwc, the net.Conn. |
| // |
| // TODO(bradfitz): short-circuit some of the buffering when the |
| // initial header contains both a Content-Type and Content-Length. |
| // Also short-circuit in (1) when the header's been sent and not in |
| // chunking mode, writing directly to (4) instead, if (2) has no |
| // buffered data. More generally, we could short-circuit from (1) to |
| // (3) even in chunking mode if the write size from (1) is over some |
| // threshold and nothing is in (2). The answer might be mostly making |
| // bufferBeforeChunkingSize smaller and having bufio's fast-paths deal |
| // with this instead. |
| func (w *response) Write(data []byte) (n int, err error) { |
| return w.write(len(data), data, "") |
| } |
| |
| func (w *response) WriteString(data string) (n int, err error) { |
| return w.write(len(data), nil, data) |
| } |
| |
| // either dataB or dataS is non-zero. |
| func (w *response) write(lenData int, dataB []byte, dataS string) (n int, err error) { |
| if w.conn.hijacked() { |
| w.conn.server.logf("http: response.Write on hijacked connection") |
| return 0, ErrHijacked |
| } |
| if !w.wroteHeader { |
| w.WriteHeader(StatusOK) |
| } |
| if lenData == 0 { |
| return 0, nil |
| } |
| if !w.bodyAllowed() { |
| return 0, ErrBodyNotAllowed |
| } |
| |
| w.written += int64(lenData) |
| if w.contentLength != -1 && w.written > w.contentLength { |
| return 0, ErrContentLength |
| } |
| if dataB != nil { |
| return w.w.Write(dataB) |
| } else { |
| return w.w.WriteString(dataS) |
| } |
| } |
| |
| func (w *response) finishRequest() { |
| w.handlerDone = true |
| |
| if !w.wroteHeader { |
| w.WriteHeader(StatusOK) |
| } |
| |
| w.w.Flush() |
| putBufioWriter(w.w) |
| w.cw.close() |
| w.conn.buf.Flush() |
| |
| w.req.Body.Close() |
| |
| if w.req.MultipartForm != nil { |
| w.req.MultipartForm.RemoveAll() |
| } |
| } |
| |
| // shouldReuseConnection reports whether the underlying TCP connection can be reused. |
| // It must only be called after the handler is done executing. |
| func (w *response) shouldReuseConnection() bool { |
| if w.closeAfterReply { |
| |
| return false |
| } |
| |
| if w.req.Method != "HEAD" && w.contentLength != -1 && w.bodyAllowed() && w.contentLength != w.written { |
| |
| return false |
| } |
| |
| if w.conn.werr != nil { |
| return false |
| } |
| |
| if w.closedRequestBodyEarly() { |
| return false |
| } |
| |
| return true |
| } |
| |
| func (w *response) closedRequestBodyEarly() bool { |
| body, ok := w.req.Body.(*body) |
| return ok && body.didEarlyClose() |
| } |
| |
| func (w *response) Flush() { |
| if !w.wroteHeader { |
| w.WriteHeader(StatusOK) |
| } |
| w.w.Flush() |
| w.cw.flush() |
| } |
| |
| func (c *conn) finalFlush() { |
| if c.buf != nil { |
| c.buf.Flush() |
| |
| putBufioReader(c.buf.Reader) |
| |
| putBufioWriter(c.buf.Writer) |
| |
| c.buf = nil |
| } |
| } |
| |
| // Close the connection. |
| func (c *conn) close() { |
| c.finalFlush() |
| if c.rwc != nil { |
| c.rwc.Close() |
| c.rwc = nil |
| } |
| } |
| |
| // rstAvoidanceDelay is the amount of time we sleep after closing the |
| // write side of a TCP connection before closing the entire socket. |
| // By sleeping, we increase the chances that the client sees our FIN |
| // and processes its final data before they process the subsequent RST |
| // from closing a connection with known unread data. |
| // This RST seems to occur mostly on BSD systems. (And Windows?) |
| // This timeout is somewhat arbitrary (~latency around the planet). |
| const rstAvoidanceDelay = 500 * time.Millisecond |
| |
| type closeWriter interface { |
| CloseWrite() error |
| } |
| |
| var _ closeWriter = (*net.TCPConn)(nil) |
| |
| // closeWrite flushes any outstanding data and sends a FIN packet (if |
| // client is connected via TCP), signalling that we're done. We then |
| // pause for a bit, hoping the client processes it before any |
| // subsequent RST. |
| // |
| // See https://golang.org/issue/3595 |
| func (c *conn) closeWriteAndWait() { |
| c.finalFlush() |
| if tcp, ok := c.rwc.(closeWriter); ok { |
| tcp.CloseWrite() |
| } |
| time.Sleep(rstAvoidanceDelay) |
| } |
| |
| // validNPN reports whether the proto is not a blacklisted Next |
| // Protocol Negotiation protocol. Empty and built-in protocol types |
| // are blacklisted and can't be overridden with alternate |
| // implementations. |
| func validNPN(proto string) bool { |
| switch proto { |
| case "", "http/1.1", "http/1.0": |
| return false |
| } |
| return true |
| } |
| |
| func (c *conn) setState(nc net.Conn, state ConnState) { |
| if hook := c.server.ConnState; hook != nil { |
| hook(nc, state) |
| } |
| } |
| |
| // Serve a new connection. |
| func (c *conn) serve() { |
| origConn := c.rwc |
| defer func() { |
| if err := recover(); err != nil { |
| const size = 64 << 10 |
| buf := make([]byte, size) |
| buf = buf[:runtime.Stack(buf, false)] |
| c.server.logf("http: panic serving %v: %v\n%s", c.remoteAddr, err, buf) |
| } |
| if !c.hijacked() { |
| c.close() |
| c.setState(origConn, StateClosed) |
| } |
| }() |
| |
| if tlsConn, ok := c.rwc.(*tls.Conn); ok { |
| if d := c.server.ReadTimeout; d != 0 { |
| c.rwc.SetReadDeadline(time.Now().Add(d)) |
| } |
| if d := c.server.WriteTimeout; d != 0 { |
| c.rwc.SetWriteDeadline(time.Now().Add(d)) |
| } |
| if err := tlsConn.Handshake(); err != nil { |
| c.server.logf("http: TLS handshake error from %s: %v", c.rwc.RemoteAddr(), err) |
| return |
| } |
| c.tlsState = new(tls.ConnectionState) |
| *c.tlsState = tlsConn.ConnectionState() |
| if proto := c.tlsState.NegotiatedProtocol; validNPN(proto) { |
| if fn := c.server.TLSNextProto[proto]; fn != nil { |
| h := initNPNRequest{tlsConn, serverHandler{c.server}} |
| fn(c.server, tlsConn, h) |
| } |
| return |
| } |
| } |
| |
| for { |
| w, err := c.readRequest() |
| if c.lr.N != c.server.initialLimitedReaderSize() { |
| |
| c.setState(c.rwc, StateActive) |
| } |
| if err != nil { |
| if err == errTooLarge { |
| |
| io.WriteString(c.rwc, "HTTP/1.1 413 Request Entity Too Large\r\n\r\n") |
| c.closeWriteAndWait() |
| break |
| } else if err == io.EOF { |
| break |
| } else if neterr, ok := err.(net.Error); ok && neterr.Timeout() { |
| break |
| } |
| io.WriteString(c.rwc, "HTTP/1.1 400 Bad Request\r\n\r\n") |
| break |
| } |
| |
| req := w.req |
| if req.expectsContinue() { |
| if req.ProtoAtLeast(1, 1) && req.ContentLength != 0 { |
| |
| req.Body = &expectContinueReader{readCloser: req.Body, resp: w} |
| } |
| req.Header.Del("Expect") |
| } else if req.Header.get("Expect") != "" { |
| w.sendExpectationFailed() |
| break |
| } |
| |
| serverHandler{c.server}.ServeHTTP(w, w.req) |
| if c.hijacked() { |
| return |
| } |
| w.finishRequest() |
| if !w.shouldReuseConnection() { |
| if w.requestBodyLimitHit || w.closedRequestBodyEarly() { |
| c.closeWriteAndWait() |
| } |
| break |
| } |
| c.setState(c.rwc, StateIdle) |
| } |
| } |
| |
| func (w *response) sendExpectationFailed() { |
| |
| w.Header().Set("Connection", "close") |
| w.WriteHeader(StatusExpectationFailed) |
| w.finishRequest() |
| } |
| |
| // Hijack implements the Hijacker.Hijack method. Our response is both a ResponseWriter |
| // and a Hijacker. |
| func (w *response) Hijack() (rwc net.Conn, buf *bufio.ReadWriter, err error) { |
| if w.wroteHeader { |
| w.cw.flush() |
| } |
| |
| rwc, buf, err = w.conn.hijack() |
| if err == nil { |
| putBufioWriter(w.w) |
| w.w = nil |
| } |
| return rwc, buf, err |
| } |
| |
| func (w *response) CloseNotify() <-chan bool { |
| return w.conn.closeNotify() |
| } |
| |
| // The HandlerFunc type is an adapter to allow the use of |
| // ordinary functions as HTTP handlers. If f is a function |
| // with the appropriate signature, HandlerFunc(f) is a |
| // Handler object that calls f. |
| type HandlerFunc func(ResponseWriter, *Request) |
| |
| // ServeHTTP calls f(w, r). |
| func (f HandlerFunc) ServeHTTP(w ResponseWriter, r *Request) { |
| f(w, r) |
| } |
| |
| // Error replies to the request with the specified error message and HTTP code. |
| // The error message should be plain text. |
| func Error(w ResponseWriter, error string, code int) { |
| w.Header().Set("Content-Type", "text/plain; charset=utf-8") |
| w.Header().Set("X-Content-Type-Options", "nosniff") |
| w.WriteHeader(code) |
| fmt.Fprintln(w, error) |
| } |
| |
| // NotFound replies to the request with an HTTP 404 not found error. |
| func NotFound(w ResponseWriter, r *Request) { Error(w, "404 page not found", StatusNotFound) } |
| |
| // NotFoundHandler returns a simple request handler |
| // that replies to each request with a ` + "`" + `` + "`" + `404 page not found'' reply. |
| func NotFoundHandler() Handler { return HandlerFunc(NotFound) } |
| |
| // StripPrefix returns a handler that serves HTTP requests |
| // by removing the given prefix from the request URL's Path |
| // and invoking the handler h. StripPrefix handles a |
| // request for a path that doesn't begin with prefix by |
| // replying with an HTTP 404 not found error. |
| func StripPrefix(prefix string, h Handler) Handler { |
| if prefix == "" { |
| return h |
| } |
| return HandlerFunc(func(w ResponseWriter, r *Request) { |
| if p := strings.TrimPrefix(r.URL.Path, prefix); len(p) < len(r.URL.Path) { |
| r.URL.Path = p |
| h.ServeHTTP(w, r) |
| } else { |
| NotFound(w, r) |
| } |
| }) |
| } |
| |
| // Redirect replies to the request with a redirect to url, |
| // which may be a path relative to the request path. |
| func Redirect(w ResponseWriter, r *Request, urlStr string, code int) { |
| if u, err := url.Parse(urlStr); err == nil { |
| |
| oldpath := r.URL.Path |
| if oldpath == "" { |
| oldpath = "/" |
| } |
| if u.Scheme == "" { |
| |
| if urlStr == "" || urlStr[0] != '/' { |
| |
| olddir, _ := path.Split(oldpath) |
| urlStr = olddir + urlStr |
| } |
| |
| var query string |
| if i := strings.Index(urlStr, "?"); i != -1 { |
| urlStr, query = urlStr[:i], urlStr[i:] |
| } |
| |
| trailing := strings.HasSuffix(urlStr, "/") |
| urlStr = path.Clean(urlStr) |
| if trailing && !strings.HasSuffix(urlStr, "/") { |
| urlStr += "/" |
| } |
| urlStr += query |
| } |
| } |
| |
| w.Header().Set("Location", urlStr) |
| w.WriteHeader(code) |
| |
| if r.Method == "GET" { |
| note := "<a href=\"" + htmlEscape(urlStr) + "\">" + statusText[code] + "</a>.\n" |
| fmt.Fprintln(w, note) |
| } |
| } |
| |
| var htmlReplacer = strings.NewReplacer( |
| "&", "&", |
| "<", "<", |
| ">", ">", |
| |
| ` + "`" + `"` + "`" + `, """, |
| |
| "'", "'", |
| ) |
| |
| func htmlEscape(s string) string { |
| return htmlReplacer.Replace(s) |
| } |
| |
| // Redirect to a fixed URL |
| type redirectHandler struct { |
| url string |
| code int |
| } |
| |
| func (rh *redirectHandler) ServeHTTP(w ResponseWriter, r *Request) { |
| Redirect(w, r, rh.url, rh.code) |
| } |
| |
| // RedirectHandler returns a request handler that redirects |
| // each request it receives to the given url using the given |
| // status code. |
| func RedirectHandler(url string, code int) Handler { |
| return &redirectHandler{url, code} |
| } |
| |
| // ServeMux is an HTTP request multiplexer. |
| // It matches the URL of each incoming request against a list of registered |
| // patterns and calls the handler for the pattern that |
| // most closely matches the URL. |
| // |
| // Patterns name fixed, rooted paths, like "/favicon.ico", |
| // or rooted subtrees, like "/images/" (note the trailing slash). |
| // Longer patterns take precedence over shorter ones, so that |
| // if there are handlers registered for both "/images/" |
| // and "/images/thumbnails/", the latter handler will be |
| // called for paths beginning "/images/thumbnails/" and the |
| // former will receive requests for any other paths in the |
| // "/images/" subtree. |
| // |
| // Note that since a pattern ending in a slash names a rooted subtree, |
| // the pattern "/" matches all paths not matched by other registered |
| // patterns, not just the URL with Path == "/". |
| // |
| // Patterns may optionally begin with a host name, restricting matches to |
| // URLs on that host only. Host-specific patterns take precedence over |
| // general patterns, so that a handler might register for the two patterns |
| // "/codesearch" and "codesearch.google.com/" without also taking over |
| // requests for "http://www.google.com/". |
| // |
| // ServeMux also takes care of sanitizing the URL request path, |
| // redirecting any request containing . or .. elements to an |
| // equivalent .- and ..-free URL. |
| type ServeMux struct { |
| mu sync.RWMutex |
| m map[string]muxEntry |
| hosts bool // whether any patterns contain hostnames |
| } |
| |
| type muxEntry struct { |
| explicit bool |
| h Handler |
| pattern string |
| } |
| |
| // NewServeMux allocates and returns a new ServeMux. |
| func NewServeMux() *ServeMux { return &ServeMux{m: make(map[string]muxEntry)} } |
| |
| // DefaultServeMux is the default ServeMux used by Serve. |
| var DefaultServeMux = NewServeMux() |
| |
| // Does path match pattern? |
| func pathMatch(pattern, path string) bool { |
| if len(pattern) == 0 { |
| |
| return false |
| } |
| n := len(pattern) |
| if pattern[n-1] != '/' { |
| return pattern == path |
| } |
| return len(path) >= n && path[0:n] == pattern |
| } |
| |
| // Return the canonical path for p, eliminating . and .. elements. |
| func cleanPath(p string) string { |
| if p == "" { |
| return "/" |
| } |
| if p[0] != '/' { |
| p = "/" + p |
| } |
| np := path.Clean(p) |
| |
| if p[len(p)-1] == '/' && np != "/" { |
| np += "/" |
| } |
| return np |
| } |
| |
| // Find a handler on a handler map given a path string |
| // Most-specific (longest) pattern wins |
| func (mux *ServeMux) match(path string) (h Handler, pattern string) { |
| var n = 0 |
| for k, v := range mux.m { |
| if !pathMatch(k, path) { |
| continue |
| } |
| if h == nil || len(k) > n { |
| n = len(k) |
| h = v.h |
| pattern = v.pattern |
| } |
| } |
| return |
| } |
| |
| // Handler returns the handler to use for the given request, |
| // consulting r.Method, r.Host, and r.URL.Path. It always returns |
| // a non-nil handler. If the path is not in its canonical form, the |
| // handler will be an internally-generated handler that redirects |
| // to the canonical path. |
| // |
| // Handler also returns the registered pattern that matches the |
| // request or, in the case of internally-generated redirects, |
| // the pattern that will match after following the redirect. |
| // |
| // If there is no registered handler that applies to the request, |
| // Handler returns a ` + "`" + `` + "`" + `page not found'' handler and an empty pattern. |
| func (mux *ServeMux) Handler(r *Request) (h Handler, pattern string) { |
| if r.Method != "CONNECT" { |
| if p := cleanPath(r.URL.Path); p != r.URL.Path { |
| _, pattern = mux.handler(r.Host, p) |
| url := *r.URL |
| url.Path = p |
| return RedirectHandler(url.String(), StatusMovedPermanently), pattern |
| } |
| } |
| |
| return mux.handler(r.Host, r.URL.Path) |
| } |
| |
| // handler is the main implementation of Handler. |
| // The path is known to be in canonical form, except for CONNECT methods. |
| func (mux *ServeMux) handler(host, path string) (h Handler, pattern string) { |
| mux.mu.RLock() |
| defer mux.mu.RUnlock() |
| |
| if mux.hosts { |
| h, pattern = mux.match(host + path) |
| } |
| if h == nil { |
| h, pattern = mux.match(path) |
| } |
| if h == nil { |
| h, pattern = NotFoundHandler(), "" |
| } |
| return |
| } |
| |
| // ServeHTTP dispatches the request to the handler whose |
| // pattern most closely matches the request URL. |
| func (mux *ServeMux) ServeHTTP(w ResponseWriter, r *Request) { |
| if r.RequestURI == "*" { |
| if r.ProtoAtLeast(1, 1) { |
| w.Header().Set("Connection", "close") |
| } |
| w.WriteHeader(StatusBadRequest) |
| return |
| } |
| h, _ := mux.Handler(r) |
| h.ServeHTTP(w, r) |
| } |
| |
| // Handle registers the handler for the given pattern. |
| // If a handler already exists for pattern, Handle panics. |
| func (mux *ServeMux) Handle(pattern string, handler Handler) { |
| mux.mu.Lock() |
| defer mux.mu.Unlock() |
| |
| if pattern == "" { |
| panic("http: invalid pattern " + pattern) |
| } |
| if handler == nil { |
| panic("http: nil handler") |
| } |
| if mux.m[pattern].explicit { |
| panic("http: multiple registrations for " + pattern) |
| } |
| |
| mux.m[pattern] = muxEntry{explicit: true, h: handler, pattern: pattern} |
| |
| if pattern[0] != '/' { |
| mux.hosts = true |
| } |
| |
| n := len(pattern) |
| if n > 0 && pattern[n-1] == '/' && !mux.m[pattern[0:n-1]].explicit { |
| |
| path := pattern |
| if pattern[0] != '/' { |
| |
| path = pattern[strings.Index(pattern, "/"):] |
| } |
| url := &url.URL{Path: path} |
| mux.m[pattern[0:n-1]] = muxEntry{h: RedirectHandler(url.String(), StatusMovedPermanently), pattern: pattern} |
| } |
| } |
| |
| // HandleFunc registers the handler function for the given pattern. |
| func (mux *ServeMux) HandleFunc(pattern string, handler func(ResponseWriter, *Request)) { |
| mux.Handle(pattern, HandlerFunc(handler)) |
| } |
| |
| // Handle registers the handler for the given pattern |
| // in the DefaultServeMux. |
| // The documentation for ServeMux explains how patterns are matched. |
| func Handle(pattern string, handler Handler) { DefaultServeMux.Handle(pattern, handler) } |
| |
| // HandleFunc registers the handler function for the given pattern |
| // in the DefaultServeMux. |
| // The documentation for ServeMux explains how patterns are matched. |
| func HandleFunc(pattern string, handler func(ResponseWriter, *Request)) { |
| DefaultServeMux.HandleFunc(pattern, handler) |
| } |
| |
| // Serve accepts incoming HTTP connections on the listener l, |
| // creating a new service goroutine for each. The service goroutines |
| // read requests and then call handler to reply to them. |
| // Handler is typically nil, in which case the DefaultServeMux is used. |
| func Serve(l net.Listener, handler Handler) error { |
| srv := &Server{Handler: handler} |
| return srv.Serve(l) |
| } |
| |
| // A Server defines parameters for running an HTTP server. |
| // The zero value for Server is a valid configuration. |
| type Server struct { |
| Addr string // TCP address to listen on, ":http" if empty |
| Handler Handler // handler to invoke, http.DefaultServeMux if nil |
| ReadTimeout time.Duration // maximum duration before timing out read of the request |
| WriteTimeout time.Duration // maximum duration before timing out write of the response |
| MaxHeaderBytes int // maximum size of request headers, DefaultMaxHeaderBytes if 0 |
| TLSConfig *tls.Config // optional TLS config, used by ListenAndServeTLS |
| |
| // TLSNextProto optionally specifies a function to take over |
| // ownership of the provided TLS connection when an NPN |
| // protocol upgrade has occurred. The map key is the protocol |
| // name negotiated. The Handler argument should be used to |
| // handle HTTP requests and will initialize the Request's TLS |
| // and RemoteAddr if not already set. The connection is |
| // automatically closed when the function returns. |
| TLSNextProto map[string]func(*Server, *tls.Conn, Handler) |
| |
| // ConnState specifies an optional callback function that is |
| // called when a client connection changes state. See the |
| // ConnState type and associated constants for details. |
| ConnState func(net.Conn, ConnState) |
| |
| // ErrorLog specifies an optional logger for errors accepting |
| // connections and unexpected behavior from handlers. |
| // If nil, logging goes to os.Stderr via the log package's |
| // standard logger. |
| ErrorLog *log.Logger |
| |
| disableKeepAlives int32 // accessed atomically. |
| } |
| |
| // A ConnState represents the state of a client connection to a server. |
| // It's used by the optional Server.ConnState hook. |
| type ConnState int |
| |
| const ( |
| // StateNew represents a new connection that is expected to |
| // send a request immediately. Connections begin at this |
| // state and then transition to either StateActive or |
| // StateClosed. |
| StateNew ConnState = iota |
| |
| // StateActive represents a connection that has read 1 or more |
| // bytes of a request. The Server.ConnState hook for |
| // StateActive fires before the request has entered a handler |
| // and doesn't fire again until the request has been |
| // handled. After the request is handled, the state |
| // transitions to StateClosed, StateHijacked, or StateIdle. |
| StateActive |
| |
| // StateIdle represents a connection that has finished |
| // handling a request and is in the keep-alive state, waiting |
| // for a new request. Connections transition from StateIdle |
| // to either StateActive or StateClosed. |
| StateIdle |
| |
| // StateHijacked represents a hijacked connection. |
| // This is a terminal state. It does not transition to StateClosed. |
| StateHijacked |
| |
| // StateClosed represents a closed connection. |
| // This is a terminal state. Hijacked connections do not |
| // transition to StateClosed. |
| StateClosed |
| ) |
| |
| var stateName = map[ConnState]string{ |
| StateNew: "new", |
| StateActive: "active", |
| StateIdle: "idle", |
| StateHijacked: "hijacked", |
| StateClosed: "closed", |
| } |
| |
| func (c ConnState) String() string { |
| return stateName[c] |
| } |
| |
| // serverHandler delegates to either the server's Handler or |
| // DefaultServeMux and also handles "OPTIONS *" requests. |
| type serverHandler struct { |
| srv *Server |
| } |
| |
| func (sh serverHandler) ServeHTTP(rw ResponseWriter, req *Request) { |
| handler := sh.srv.Handler |
| if handler == nil { |
| handler = DefaultServeMux |
| } |
| if req.RequestURI == "*" && req.Method == "OPTIONS" { |
| handler = globalOptionsHandler{} |
| } |
| handler.ServeHTTP(rw, req) |
| } |
| |
| // ListenAndServe listens on the TCP network address srv.Addr and then |
| // calls Serve to handle requests on incoming connections. If |
| // srv.Addr is blank, ":http" is used. |
| func (srv *Server) ListenAndServe() error { |
| addr := srv.Addr |
| if addr == "" { |
| addr = ":http" |
| } |
| ln, err := net.Listen("tcp", addr) |
| if err != nil { |
| return err |
| } |
| return srv.Serve(tcpKeepAliveListener{ln.(*net.TCPListener)}) |
| } |
| |
| // Serve accepts incoming connections on the Listener l, creating a |
| // new service goroutine for each. The service goroutines read requests and |
| // then call srv.Handler to reply to them. |
| func (srv *Server) Serve(l net.Listener) error { |
| defer l.Close() |
| var tempDelay time.Duration // how long to sleep on accept failure |
| for { |
| rw, e := l.Accept() |
| if e != nil { |
| if ne, ok := e.(net.Error); ok && ne.Temporary() { |
| if tempDelay == 0 { |
| tempDelay = 5 * time.Millisecond |
| } else { |
| tempDelay *= 2 |
| } |
| if max := 1 * time.Second; tempDelay > max { |
| tempDelay = max |
| } |
| srv.logf("http: Accept error: %v; retrying in %v", e, tempDelay) |
| time.Sleep(tempDelay) |
| continue |
| } |
| return e |
| } |
| tempDelay = 0 |
| c, err := srv.newConn(rw) |
| if err != nil { |
| continue |
| } |
| c.setState(c.rwc, StateNew) |
| go c.serve() |
| } |
| } |
| |
| func (s *Server) doKeepAlives() bool { |
| return atomic.LoadInt32(&s.disableKeepAlives) == 0 |
| } |
| |
| // SetKeepAlivesEnabled controls whether HTTP keep-alives are enabled. |
| // By default, keep-alives are always enabled. Only very |
| // resource-constrained environments or servers in the process of |
| // shutting down should disable them. |
| func (srv *Server) SetKeepAlivesEnabled(v bool) { |
| if v { |
| atomic.StoreInt32(&srv.disableKeepAlives, 0) |
| } else { |
| atomic.StoreInt32(&srv.disableKeepAlives, 1) |
| } |
| } |
| |
| func (s *Server) logf(format string, args ...interface{}) { |
| if s.ErrorLog != nil { |
| s.ErrorLog.Printf(format, args...) |
| } else { |
| log.Printf(format, args...) |
| } |
| } |
| |
| // ListenAndServe listens on the TCP network address addr |
| // and then calls Serve with handler to handle requests |
| // on incoming connections. Handler is typically nil, |
| // in which case the DefaultServeMux is used. |
| // |
| // A trivial example server is: |
| // |
| // package main |
| // |
| // import ( |
| // "io" |
| // "net/http" |
| // "log" |
| // ) |
| // |
| // // hello world, the web server |
| // func HelloServer(w http.ResponseWriter, req *http.Request) { |
| // io.WriteString(w, "hello, world!\n") |
| // } |
| // |
| // func main() { |
| // http.HandleFunc("/hello", HelloServer) |
| // err := http.ListenAndServe(":12345", nil) |
| // if err != nil { |
| // log.Fatal("ListenAndServe: ", err) |
| // } |
| // } |
| func ListenAndServe(addr string, handler Handler) error { |
| server := &Server{Addr: addr, Handler: handler} |
| return server.ListenAndServe() |
| } |
| |
| // ListenAndServeTLS acts identically to ListenAndServe, except that it |
| // expects HTTPS connections. Additionally, files containing a certificate and |
| // matching private key for the server must be provided. If the certificate |
| // is signed by a certificate authority, the certFile should be the concatenation |
| // of the server's certificate, any intermediates, and the CA's certificate. |
| // |
| // A trivial example server is: |
| // |
| // import ( |
| // "log" |
| // "net/http" |
| // ) |
| // |
| // func handler(w http.ResponseWriter, req *http.Request) { |
| // w.Header().Set("Content-Type", "text/plain") |
| // w.Write([]byte("This is an example server.\n")) |
| // } |
| // |
| // func main() { |
| // http.HandleFunc("/", handler) |
| // log.Printf("About to listen on 10443. Go to https://127.0.0.1:10443/") |
| // err := http.ListenAndServeTLS(":10443", "cert.pem", "key.pem", nil) |
| // if err != nil { |
| // log.Fatal(err) |
| // } |
| // } |
| // |
| // One can use generate_cert.go in crypto/tls to generate cert.pem and key.pem. |
| func ListenAndServeTLS(addr string, certFile string, keyFile string, handler Handler) error { |
| server := &Server{Addr: addr, Handler: handler} |
| return server.ListenAndServeTLS(certFile, keyFile) |
| } |
| |
| // ListenAndServeTLS listens on the TCP network address srv.Addr and |
| // then calls Serve to handle requests on incoming TLS connections. |
| // |
| // Filenames containing a certificate and matching private key for the |
| // server must be provided if the Server's TLSConfig.Certificates is |
| // not populated. If the certificate is signed by a certificate |
| // authority, the certFile should be the concatenation of the server's |
| // certificate, any intermediates, and the CA's certificate. |
| // |
| // If srv.Addr is blank, ":https" is used. |
| func (srv *Server) ListenAndServeTLS(certFile, keyFile string) error { |
| addr := srv.Addr |
| if addr == "" { |
| addr = ":https" |
| } |
| config := cloneTLSConfig(srv.TLSConfig) |
| if config.NextProtos == nil { |
| config.NextProtos = []string{"http/1.1"} |
| } |
| |
| if len(config.Certificates) == 0 || certFile != "" || keyFile != "" { |
| var err error |
| config.Certificates = make([]tls.Certificate, 1) |
| config.Certificates[0], err = tls.LoadX509KeyPair(certFile, keyFile) |
| if err != nil { |
| return err |
| } |
| } |
| |
| ln, err := net.Listen("tcp", addr) |
| if err != nil { |
| return err |
| } |
| |
| tlsListener := tls.NewListener(tcpKeepAliveListener{ln.(*net.TCPListener)}, config) |
| return srv.Serve(tlsListener) |
| } |
| |
| // TimeoutHandler returns a Handler that runs h with the given time limit. |
| // |
| // The new Handler calls h.ServeHTTP to handle each request, but if a |
| // call runs for longer than its time limit, the handler responds with |
| // a 503 Service Unavailable error and the given message in its body. |
| // (If msg is empty, a suitable default message will be sent.) |
| // After such a timeout, writes by h to its ResponseWriter will return |
| // ErrHandlerTimeout. |
| func TimeoutHandler(h Handler, dt time.Duration, msg string) Handler { |
| f := func() <-chan time.Time { |
| return time.After(dt) |
| } |
| return &timeoutHandler{h, f, msg} |
| } |
| |
| // ErrHandlerTimeout is returned on ResponseWriter Write calls |
| // in handlers which have timed out. |
| var ErrHandlerTimeout = errors.New("http: Handler timeout") |
| |
| type timeoutHandler struct { |
| handler Handler |
| timeout func() <-chan time.Time // returns channel producing a timeout |
| body string |
| } |
| |
| func (h *timeoutHandler) errorBody() string { |
| if h.body != "" { |
| return h.body |
| } |
| return "<html><head><title>Timeout</title></head><body><h1>Timeout</h1></body></html>" |
| } |
| |
| func (h *timeoutHandler) ServeHTTP(w ResponseWriter, r *Request) { |
| done := make(chan bool, 1) |
| tw := &timeoutWriter{w: w} |
| go func() { |
| h.handler.ServeHTTP(tw, r) |
| done <- true |
| }() |
| select { |
| case <-done: |
| return |
| case <-h.timeout(): |
| tw.mu.Lock() |
| defer tw.mu.Unlock() |
| if !tw.wroteHeader { |
| tw.w.WriteHeader(StatusServiceUnavailable) |
| tw.w.Write([]byte(h.errorBody())) |
| } |
| tw.timedOut = true |
| } |
| } |
| |
| type timeoutWriter struct { |
| w ResponseWriter |
| |
| mu sync.Mutex |
| timedOut bool |
| wroteHeader bool |
| } |
| |
| func (tw *timeoutWriter) Header() Header { |
| return tw.w.Header() |
| } |
| |
| func (tw *timeoutWriter) Write(p []byte) (int, error) { |
| tw.mu.Lock() |
| defer tw.mu.Unlock() |
| tw.wroteHeader = true |
| if tw.timedOut { |
| return 0, ErrHandlerTimeout |
| } |
| return tw.w.Write(p) |
| } |
| |
| func (tw *timeoutWriter) WriteHeader(code int) { |
| tw.mu.Lock() |
| defer tw.mu.Unlock() |
| if tw.timedOut || tw.wroteHeader { |
| return |
| } |
| tw.wroteHeader = true |
| tw.w.WriteHeader(code) |
| } |
| |
| // tcpKeepAliveListener sets TCP keep-alive timeouts on accepted |
| // connections. It's used by ListenAndServe and ListenAndServeTLS so |
| // dead TCP connections (e.g. closing laptop mid-download) eventually |
| // go away. |
| type tcpKeepAliveListener struct { |
| *net.TCPListener |
| } |
| |
| func (ln tcpKeepAliveListener) Accept() (c net.Conn, err error) { |
| tc, err := ln.AcceptTCP() |
| if err != nil { |
| return |
| } |
| tc.SetKeepAlive(true) |
| tc.SetKeepAlivePeriod(3 * time.Minute) |
| return tc, nil |
| } |
| |
| // globalOptionsHandler responds to "OPTIONS *" requests. |
| type globalOptionsHandler struct{} |
| |
| func (globalOptionsHandler) ServeHTTP(w ResponseWriter, r *Request) { |
| w.Header().Set("Content-Length", "0") |
| if r.ContentLength != 0 { |
| |
| mb := MaxBytesReader(w, r.Body, 4<<10) |
| io.Copy(io.Discard, mb) |
| } |
| } |
| |
| type eofReaderWithWriteTo struct{} |
| |
| func (eofReaderWithWriteTo) WriteTo(io.Writer) (int64, error) { return 0, nil } |
| |
| func (eofReaderWithWriteTo) Read([]byte) (int, error) { return 0, io.EOF } |
| |
| // eofReader is a non-nil io.ReadCloser that always returns EOF. |
| // It has a WriteTo method so io.Copy won't need a buffer. |
| var eofReader = &struct { |
| eofReaderWithWriteTo |
| io.Closer |
| }{ |
| eofReaderWithWriteTo{}, |
| io.NopCloser(nil), |
| } |
| |
| // Verify that an io.Copy from an eofReader won't require a buffer. |
| var _ io.WriterTo = eofReader |
| |
| // initNPNRequest is an HTTP handler that initializes certain |
| // uninitialized fields in its *Request. Such partially-initialized |
| // Requests come from NPN protocol handlers. |
| type initNPNRequest struct { |
| c *tls.Conn |
| h serverHandler |
| } |
| |
| func (h initNPNRequest) ServeHTTP(rw ResponseWriter, req *Request) { |
| if req.TLS == nil { |
| req.TLS = &tls.ConnectionState{} |
| *req.TLS = h.c.ConnectionState() |
| } |
| if req.Body == nil { |
| req.Body = eofReader |
| } |
| if req.RemoteAddr == "" { |
| req.RemoteAddr = h.c.RemoteAddr().String() |
| } |
| h.h.ServeHTTP(rw, req) |
| } |
| |
| // loggingConn is used for debugging. |
| type loggingConn struct { |
| name string |
| net.Conn |
| } |
| |
| var ( |
| uniqNameMu sync.Mutex |
| uniqNameNext = make(map[string]int) |
| ) |
| |
| func newLoggingConn(baseName string, c net.Conn) net.Conn { |
| uniqNameMu.Lock() |
| defer uniqNameMu.Unlock() |
| uniqNameNext[baseName]++ |
| return &loggingConn{ |
| name: fmt.Sprintf("%s-%d", baseName, uniqNameNext[baseName]), |
| Conn: c, |
| } |
| } |
| |
| func (c *loggingConn) Write(p []byte) (n int, err error) { |
| log.Printf("%s.Write(%d) = ....", c.name, len(p)) |
| n, err = c.Conn.Write(p) |
| log.Printf("%s.Write(%d) = %d, %v", c.name, len(p), n, err) |
| return |
| } |
| |
| func (c *loggingConn) Read(p []byte) (n int, err error) { |
| log.Printf("%s.Read(%d) = ....", c.name, len(p)) |
| n, err = c.Conn.Read(p) |
| log.Printf("%s.Read(%d) = %d, %v", c.name, len(p), n, err) |
| return |
| } |
| |
| func (c *loggingConn) Close() (err error) { |
| log.Printf("%s.Close() = ...", c.name) |
| err = c.Conn.Close() |
| log.Printf("%s.Close() = %v", c.name, err) |
| return |
| } |
| |
| // checkConnErrorWriter writes to c.rwc and records any write errors to c.werr. |
| // It only contains one field (and a pointer field at that), so it |
| // fits in an interface value without an extra allocation. |
| type checkConnErrorWriter struct { |
| c *conn |
| } |
| |
| func (w checkConnErrorWriter) Write(p []byte) (n int, err error) { |
| n, err = w.c.w.Write(p) |
| if err != nil && w.c.werr == nil { |
| w.c.werr = err |
| } |
| return |
| } |
| |
| func numLeadingCRorLF(v []byte) (n int) { |
| for _, b := range v { |
| if b == '\r' || b == '\n' { |
| n++ |
| continue |
| } |
| break |
| } |
| return |
| |
| } |
| |
| // The algorithm uses at most sniffLen bytes to make its decision. |
| const sniffLen = 512 |
| |
| // DetectContentType implements the algorithm described |
| // at http://mimesniff.spec.whatwg.org/ to determine the |
| // Content-Type of the given data. It considers at most the |
| // first 512 bytes of data. DetectContentType always returns |
| // a valid MIME type: if it cannot determine a more specific one, it |
| // returns "application/octet-stream". |
| func DetectContentType(data []byte) string { |
| if len(data) > sniffLen { |
| data = data[:sniffLen] |
| } |
| |
| firstNonWS := 0 |
| for ; firstNonWS < len(data) && isWS(data[firstNonWS]); firstNonWS++ { |
| } |
| |
| for _, sig := range sniffSignatures { |
| if ct := sig.match(data, firstNonWS); ct != "" { |
| return ct |
| } |
| } |
| |
| return "application/octet-stream" |
| } |
| |
| func isWS(b byte) bool { |
| switch b { |
| case '\t', '\n', '\x0c', '\r', ' ': |
| return true |
| } |
| return false |
| } |
| |
| type sniffSig interface { |
| // match returns the MIME type of the data, or "" if unknown. |
| match(data []byte, firstNonWS int) string |
| } |
| |
| // Data matching the table in section 6. |
| var sniffSignatures = []sniffSig{ |
| htmlSig("<!DOCTYPE HTML"), |
| htmlSig("<HTML"), |
| htmlSig("<HEAD"), |
| htmlSig("<SCRIPT"), |
| htmlSig("<IFRAME"), |
| htmlSig("<H1"), |
| htmlSig("<DIV"), |
| htmlSig("<FONT"), |
| htmlSig("<TABLE"), |
| htmlSig("<A"), |
| htmlSig("<STYLE"), |
| htmlSig("<TITLE"), |
| htmlSig("<B"), |
| htmlSig("<BODY"), |
| htmlSig("<BR"), |
| htmlSig("<P"), |
| htmlSig("<!--"), |
| |
| &maskedSig{mask: []byte("\xFF\xFF\xFF\xFF\xFF"), pat: []byte("<?xml"), skipWS: true, ct: "text/xml; charset=utf-8"}, |
| |
| &exactSig{[]byte("%PDF-"), "application/pdf"}, |
| &exactSig{[]byte("%!PS-Adobe-"), "application/postscript"}, |
| |
| &maskedSig{mask: []byte("\xFF\xFF\x00\x00"), pat: []byte("\xFE\xFF\x00\x00"), ct: "text/plain; charset=utf-16be"}, |
| &maskedSig{mask: []byte("\xFF\xFF\x00\x00"), pat: []byte("\xFF\xFE\x00\x00"), ct: "text/plain; charset=utf-16le"}, |
| &maskedSig{mask: []byte("\xFF\xFF\xFF\x00"), pat: []byte("\xEF\xBB\xBF\x00"), ct: "text/plain; charset=utf-8"}, |
| |
| &exactSig{[]byte("GIF87a"), "image/gif"}, |
| &exactSig{[]byte("GIF89a"), "image/gif"}, |
| &exactSig{[]byte("\x89\x50\x4E\x47\x0D\x0A\x1A\x0A"), "image/png"}, |
| &exactSig{[]byte("\xFF\xD8\xFF"), "image/jpeg"}, |
| &exactSig{[]byte("BM"), "image/bmp"}, |
| &maskedSig{ |
| mask: []byte("\xFF\xFF\xFF\xFF\x00\x00\x00\x00\xFF\xFF\xFF\xFF\xFF\xFF"), |
| pat: []byte("RIFF\x00\x00\x00\x00WEBPVP"), |
| ct: "image/webp", |
| }, |
| &exactSig{[]byte("\x00\x00\x01\x00"), "image/vnd.microsoft.icon"}, |
| &exactSig{[]byte("\x4F\x67\x67\x53\x00"), "application/ogg"}, |
| &maskedSig{ |
| mask: []byte("\xFF\xFF\xFF\xFF\x00\x00\x00\x00\xFF\xFF\xFF\xFF"), |
| pat: []byte("RIFF\x00\x00\x00\x00WAVE"), |
| ct: "audio/wave", |
| }, |
| &exactSig{[]byte("\x1A\x45\xDF\xA3"), "video/webm"}, |
| &exactSig{[]byte("\x52\x61\x72\x20\x1A\x07\x00"), "application/x-rar-compressed"}, |
| &exactSig{[]byte("\x50\x4B\x03\x04"), "application/zip"}, |
| &exactSig{[]byte("\x1F\x8B\x08"), "application/x-gzip"}, |
| |
| textSig(0), |
| } |
| |
| type exactSig struct { |
| sig []byte |
| ct string |
| } |
| |
| func (e *exactSig) match(data []byte, firstNonWS int) string { |
| if bytes.HasPrefix(data, e.sig) { |
| return e.ct |
| } |
| return "" |
| } |
| |
| type maskedSig struct { |
| mask, pat []byte |
| skipWS bool |
| ct string |
| } |
| |
| func (m *maskedSig) match(data []byte, firstNonWS int) string { |
| if m.skipWS { |
| data = data[firstNonWS:] |
| } |
| if len(data) < len(m.mask) { |
| return "" |
| } |
| for i, mask := range m.mask { |
| db := data[i] & mask |
| if db != m.pat[i] { |
| return "" |
| } |
| } |
| return m.ct |
| } |
| |
| type htmlSig []byte |
| |
| func (h htmlSig) match(data []byte, firstNonWS int) string { |
| data = data[firstNonWS:] |
| if len(data) < len(h)+1 { |
| return "" |
| } |
| for i, b := range h { |
| db := data[i] |
| if 'A' <= b && b <= 'Z' { |
| db &= 0xDF |
| } |
| if b != db { |
| return "" |
| } |
| } |
| |
| if db := data[len(h)]; db != ' ' && db != '>' { |
| return "" |
| } |
| return "text/html; charset=utf-8" |
| } |
| |
| var mp4ftype = []byte("ftyp") |
| |
| type mp4Sig int |
| |
| func (mp4Sig) match(data []byte, firstNonWS int) string { |
| |
| if len(data) < 8 { |
| return "" |
| } |
| boxSize := int(binary.BigEndian.Uint32(data[:4])) |
| if boxSize%4 != 0 || len(data) < boxSize { |
| return "" |
| } |
| if !bytes.Equal(data[4:8], mp4ftype) { |
| return "" |
| } |
| for st := 8; st < boxSize; st += 4 { |
| if st == 12 { |
| |
| continue |
| } |
| seg := string(data[st : st+3]) |
| switch seg { |
| case "mp4", "iso", "M4V", "M4P", "M4B": |
| return "video/mp4" |
| |
| } |
| } |
| return "" |
| } |
| |
| type textSig int |
| |
| func (textSig) match(data []byte, firstNonWS int) string { |
| |
| for _, b := range data[firstNonWS:] { |
| switch { |
| case 0x00 <= b && b <= 0x08, |
| b == 0x0B, |
| 0x0E <= b && b <= 0x1A, |
| 0x1C <= b && b <= 0x1F: |
| return "" |
| } |
| } |
| return "text/plain; charset=utf-8" |
| } |
| |
| // HTTP status codes, defined in RFC 2616. |
| const ( |
| StatusContinue = 100 |
| StatusSwitchingProtocols = 101 |
| |
| StatusOK = 200 |
| StatusCreated = 201 |
| StatusAccepted = 202 |
| StatusNonAuthoritativeInfo = 203 |
| StatusNoContent = 204 |
| StatusResetContent = 205 |
| StatusPartialContent = 206 |
| |
| StatusMultipleChoices = 300 |
| StatusMovedPermanently = 301 |
| StatusFound = 302 |
| StatusSeeOther = 303 |
| StatusNotModified = 304 |
| StatusUseProxy = 305 |
| StatusTemporaryRedirect = 307 |
| |
| StatusBadRequest = 400 |
| StatusUnauthorized = 401 |
| StatusPaymentRequired = 402 |
| StatusForbidden = 403 |
| StatusNotFound = 404 |
| StatusMethodNotAllowed = 405 |
| StatusNotAcceptable = 406 |
| StatusProxyAuthRequired = 407 |
| StatusRequestTimeout = 408 |
| StatusConflict = 409 |
| StatusGone = 410 |
| StatusLengthRequired = 411 |
| StatusPreconditionFailed = 412 |
| StatusRequestEntityTooLarge = 413 |
| StatusRequestURITooLong = 414 |
| StatusUnsupportedMediaType = 415 |
| StatusRequestedRangeNotSatisfiable = 416 |
| StatusExpectationFailed = 417 |
| StatusTeapot = 418 |
| |
| StatusInternalServerError = 500 |
| StatusNotImplemented = 501 |
| StatusBadGateway = 502 |
| StatusServiceUnavailable = 503 |
| StatusGatewayTimeout = 504 |
| StatusHTTPVersionNotSupported = 505 |
| |
| // New HTTP status codes from RFC 6585. Not exported yet in Go 1.1. |
| // See discussion at https://codereview.appspot.com/7678043/ |
| statusPreconditionRequired = 428 |
| statusTooManyRequests = 429 |
| statusRequestHeaderFieldsTooLarge = 431 |
| statusNetworkAuthenticationRequired = 511 |
| ) |
| |
| var statusText = map[int]string{ |
| StatusContinue: "Continue", |
| StatusSwitchingProtocols: "Switching Protocols", |
| |
| StatusOK: "OK", |
| StatusCreated: "Created", |
| StatusAccepted: "Accepted", |
| StatusNonAuthoritativeInfo: "Non-Authoritative Information", |
| StatusNoContent: "No Content", |
| StatusResetContent: "Reset Content", |
| StatusPartialContent: "Partial Content", |
| |
| StatusMultipleChoices: "Multiple Choices", |
| StatusMovedPermanently: "Moved Permanently", |
| StatusFound: "Found", |
| StatusSeeOther: "See Other", |
| StatusNotModified: "Not Modified", |
| StatusUseProxy: "Use Proxy", |
| StatusTemporaryRedirect: "Temporary Redirect", |
| |
| StatusBadRequest: "Bad Request", |
| StatusUnauthorized: "Unauthorized", |
| StatusPaymentRequired: "Payment Required", |
| StatusForbidden: "Forbidden", |
| StatusNotFound: "Not Found", |
| StatusMethodNotAllowed: "Method Not Allowed", |
| StatusNotAcceptable: "Not Acceptable", |
| StatusProxyAuthRequired: "Proxy Authentication Required", |
| StatusRequestTimeout: "Request Timeout", |
| StatusConflict: "Conflict", |
| StatusGone: "Gone", |
| StatusLengthRequired: "Length Required", |
| StatusPreconditionFailed: "Precondition Failed", |
| StatusRequestEntityTooLarge: "Request Entity Too Large", |
| StatusRequestURITooLong: "Request URI Too Long", |
| StatusUnsupportedMediaType: "Unsupported Media Type", |
| StatusRequestedRangeNotSatisfiable: "Requested Range Not Satisfiable", |
| StatusExpectationFailed: "Expectation Failed", |
| StatusTeapot: "I'm a teapot", |
| |
| StatusInternalServerError: "Internal Server Error", |
| StatusNotImplemented: "Not Implemented", |
| StatusBadGateway: "Bad Gateway", |
| StatusServiceUnavailable: "Service Unavailable", |
| StatusGatewayTimeout: "Gateway Timeout", |
| StatusHTTPVersionNotSupported: "HTTP Version Not Supported", |
| |
| statusPreconditionRequired: "Precondition Required", |
| statusTooManyRequests: "Too Many Requests", |
| statusRequestHeaderFieldsTooLarge: "Request Header Fields Too Large", |
| statusNetworkAuthenticationRequired: "Network Authentication Required", |
| } |
| |
| // StatusText returns a text for the HTTP status code. It returns the empty |
| // string if the code is unknown. |
| func StatusText(code int) string { |
| return statusText[code] |
| } |
| |
| // ErrLineTooLong is returned when reading request or response bodies |
| // with malformed chunked encoding. |
| var ErrLineTooLong = internal.ErrLineTooLong |
| |
| type errorReader struct { |
| err error |
| } |
| |
| func (r errorReader) Read(p []byte) (n int, err error) { |
| return 0, r.err |
| } |
| |
| // transferWriter inspects the fields of a user-supplied Request or Response, |
| // sanitizes them without changing the user object and provides methods for |
| // writing the respective header, body and trailer in wire format. |
| type transferWriter struct { |
| Method string |
| Body io.Reader |
| BodyCloser io.Closer |
| ResponseToHEAD bool |
| ContentLength int64 // -1 means unknown, 0 means exactly none |
| Close bool |
| TransferEncoding []string |
| Trailer Header |
| IsResponse bool |
| } |
| |
| func newTransferWriter(r interface{}) (t *transferWriter, err error) { |
| t = &transferWriter{} |
| |
| atLeastHTTP11 := false |
| switch rr := r.(type) { |
| case *Request: |
| if rr.ContentLength != 0 && rr.Body == nil { |
| return nil, fmt.Errorf("http: Request.ContentLength=%d with nil Body", rr.ContentLength) |
| } |
| t.Method = rr.Method |
| t.Body = rr.Body |
| t.BodyCloser = rr.Body |
| t.ContentLength = rr.ContentLength |
| t.Close = rr.Close |
| t.TransferEncoding = rr.TransferEncoding |
| t.Trailer = rr.Trailer |
| atLeastHTTP11 = rr.ProtoAtLeast(1, 1) |
| if t.Body != nil && len(t.TransferEncoding) == 0 && atLeastHTTP11 { |
| if t.ContentLength == 0 { |
| // Test to see if it's actually zero or just unset. |
| var buf [1]byte |
| n, rerr := io.ReadFull(t.Body, buf[:]) |
| if rerr != nil && rerr != io.EOF { |
| t.ContentLength = -1 |
| t.Body = errorReader{rerr} |
| } else if n == 1 { |
| |
| t.ContentLength = -1 |
| t.Body = io.MultiReader(bytes.NewReader(buf[:]), t.Body) |
| } else { |
| |
| t.Body = nil |
| t.BodyCloser = nil |
| } |
| } |
| if t.ContentLength < 0 { |
| t.TransferEncoding = []string{"chunked"} |
| } |
| } |
| case *Response: |
| t.IsResponse = true |
| if rr.Request != nil { |
| t.Method = rr.Request.Method |
| } |
| t.Body = rr.Body |
| t.BodyCloser = rr.Body |
| t.ContentLength = rr.ContentLength |
| t.Close = rr.Close |
| t.TransferEncoding = rr.TransferEncoding |
| t.Trailer = rr.Trailer |
| atLeastHTTP11 = rr.ProtoAtLeast(1, 1) |
| t.ResponseToHEAD = noBodyExpected(t.Method) |
| } |
| |
| if t.ResponseToHEAD { |
| t.Body = nil |
| if chunked(t.TransferEncoding) { |
| t.ContentLength = -1 |
| } |
| } else { |
| if !atLeastHTTP11 || t.Body == nil { |
| t.TransferEncoding = nil |
| } |
| if chunked(t.TransferEncoding) { |
| t.ContentLength = -1 |
| } else if t.Body == nil { |
| t.ContentLength = 0 |
| } |
| } |
| |
| if !chunked(t.TransferEncoding) { |
| t.Trailer = nil |
| } |
| |
| return t, nil |
| } |
| |
| func noBodyExpected(requestMethod string) bool { |
| return requestMethod == "HEAD" |
| } |
| |
| func (t *transferWriter) shouldSendContentLength() bool { |
| if chunked(t.TransferEncoding) { |
| return false |
| } |
| if t.ContentLength > 0 { |
| return true |
| } |
| if t.ContentLength < 0 { |
| return false |
| } |
| |
| if t.Method == "POST" || t.Method == "PUT" { |
| return true |
| } |
| if t.ContentLength == 0 && isIdentity(t.TransferEncoding) { |
| if t.Method == "GET" || t.Method == "HEAD" { |
| return false |
| } |
| return true |
| } |
| |
| return false |
| } |
| |
| func (t *transferWriter) WriteHeader(w io.Writer) error { |
| if t.Close { |
| if _, err := io.WriteString(w, "Connection: close\r\n"); err != nil { |
| return err |
| } |
| } |
| |
| if t.shouldSendContentLength() { |
| if _, err := io.WriteString(w, "Content-Length: "); err != nil { |
| return err |
| } |
| if _, err := io.WriteString(w, strconv.FormatInt(t.ContentLength, 10)+"\r\n"); err != nil { |
| return err |
| } |
| } else if chunked(t.TransferEncoding) { |
| if _, err := io.WriteString(w, "Transfer-Encoding: chunked\r\n"); err != nil { |
| return err |
| } |
| } |
| |
| if t.Trailer != nil { |
| keys := make([]string, 0, len(t.Trailer)) |
| for k := range t.Trailer { |
| k = CanonicalHeaderKey(k) |
| switch k { |
| case "Transfer-Encoding", "Trailer", "Content-Length": |
| return &badStringError{"invalid Trailer key", k} |
| } |
| keys = append(keys, k) |
| } |
| if len(keys) > 0 { |
| sort.Strings(keys) |
| |
| if _, err := io.WriteString(w, "Trailer: "+strings.Join(keys, ",")+"\r\n"); err != nil { |
| return err |
| } |
| } |
| } |
| |
| return nil |
| } |
| |
| func (t *transferWriter) WriteBody(w io.Writer) error { |
| var err error |
| var ncopy int64 |
| |
| if t.Body != nil { |
| if chunked(t.TransferEncoding) { |
| if bw, ok := w.(*bufio.Writer); ok && !t.IsResponse { |
| w = &internal.FlushAfterChunkWriter{bw} |
| } |
| cw := internal.NewChunkedWriter(w) |
| _, err = io.Copy(cw, t.Body) |
| if err == nil { |
| err = cw.Close() |
| } |
| } else if t.ContentLength == -1 { |
| ncopy, err = io.Copy(w, t.Body) |
| } else { |
| ncopy, err = io.Copy(w, io.LimitReader(t.Body, t.ContentLength)) |
| if err != nil { |
| return err |
| } |
| var nextra int64 |
| nextra, err = io.Copy(io.Discard, t.Body) |
| ncopy += nextra |
| } |
| if err != nil { |
| return err |
| } |
| if err = t.BodyCloser.Close(); err != nil { |
| return err |
| } |
| } |
| |
| if !t.ResponseToHEAD && t.ContentLength != -1 && t.ContentLength != ncopy { |
| return fmt.Errorf("http: ContentLength=%d with Body length %d", |
| t.ContentLength, ncopy) |
| } |
| |
| if chunked(t.TransferEncoding) { |
| |
| if t.Trailer != nil { |
| if err := t.Trailer.Write(w); err != nil { |
| return err |
| } |
| } |
| |
| _, err = io.WriteString(w, "\r\n") |
| } |
| return err |
| } |
| |
| type transferReader struct { |
| // Input |
| Header Header |
| StatusCode int |
| RequestMethod string |
| ProtoMajor int |
| ProtoMinor int |
| // Output |
| Body io.ReadCloser |
| ContentLength int64 |
| TransferEncoding []string |
| Close bool |
| Trailer Header |
| } |
| |
| // bodyAllowedForStatus reports whether a given response status code |
| // permits a body. See RFC2616, section 4.4. |
| func bodyAllowedForStatus(status int) bool { |
| switch { |
| case status >= 100 && status <= 199: |
| return false |
| case status == 204: |
| return false |
| case status == 304: |
| return false |
| } |
| return true |
| } |
| |
| var ( |
| suppressedHeaders304 = []string{"Content-Type", "Content-Length", "Transfer-Encoding"} |
| suppressedHeadersNoBody = []string{"Content-Length", "Transfer-Encoding"} |
| ) |
| |
| func suppressedHeaders(status int) []string { |
| switch { |
| case status == 304: |
| |
| return suppressedHeaders304 |
| case !bodyAllowedForStatus(status): |
| return suppressedHeadersNoBody |
| } |
| return nil |
| } |
| |
| // msg is *Request or *Response. |
| func readTransfer(msg interface{}, r *bufio.Reader) (err error) { |
| t := &transferReader{RequestMethod: "GET"} |
| |
| isResponse := false |
| switch rr := msg.(type) { |
| case *Response: |
| t.Header = rr.Header |
| t.StatusCode = rr.StatusCode |
| t.ProtoMajor = rr.ProtoMajor |
| t.ProtoMinor = rr.ProtoMinor |
| t.Close = shouldClose(t.ProtoMajor, t.ProtoMinor, t.Header, true) |
| isResponse = true |
| if rr.Request != nil { |
| t.RequestMethod = rr.Request.Method |
| } |
| case *Request: |
| t.Header = rr.Header |
| t.RequestMethod = rr.Method |
| t.ProtoMajor = rr.ProtoMajor |
| t.ProtoMinor = rr.ProtoMinor |
| |
| t.StatusCode = 200 |
| t.Close = rr.Close |
| default: |
| panic("unexpected type") |
| } |
| |
| if t.ProtoMajor == 0 && t.ProtoMinor == 0 { |
| t.ProtoMajor, t.ProtoMinor = 1, 1 |
| } |
| |
| t.TransferEncoding, err = fixTransferEncoding(isResponse, t.RequestMethod, t.Header) |
| if err != nil { |
| return err |
| } |
| |
| realLength, err := fixLength(isResponse, t.StatusCode, t.RequestMethod, t.Header, t.TransferEncoding) |
| if err != nil { |
| return err |
| } |
| if isResponse && t.RequestMethod == "HEAD" { |
| if n, err := parseContentLength(t.Header.get("Content-Length")); err != nil { |
| return err |
| } else { |
| t.ContentLength = n |
| } |
| } else { |
| t.ContentLength = realLength |
| } |
| |
| t.Trailer, err = fixTrailer(t.Header, t.TransferEncoding) |
| if err != nil { |
| return err |
| } |
| |
| switch msg.(type) { |
| case *Response: |
| if realLength == -1 && |
| !chunked(t.TransferEncoding) && |
| bodyAllowedForStatus(t.StatusCode) { |
| |
| t.Close = true |
| } |
| } |
| |
| switch { |
| case chunked(t.TransferEncoding): |
| if noBodyExpected(t.RequestMethod) { |
| t.Body = eofReader |
| } else { |
| t.Body = &body{src: internal.NewChunkedReader(r), hdr: msg, r: r, closing: t.Close} |
| } |
| case realLength == 0: |
| t.Body = eofReader |
| case realLength > 0: |
| t.Body = &body{src: io.LimitReader(r, realLength), closing: t.Close} |
| default: |
| |
| if t.Close { |
| |
| t.Body = &body{src: r, closing: t.Close} |
| } else { |
| |
| t.Body = eofReader |
| } |
| } |
| |
| switch rr := msg.(type) { |
| case *Request: |
| rr.Body = t.Body |
| rr.ContentLength = t.ContentLength |
| rr.TransferEncoding = t.TransferEncoding |
| rr.Close = t.Close |
| rr.Trailer = t.Trailer |
| case *Response: |
| rr.Body = t.Body |
| rr.ContentLength = t.ContentLength |
| rr.TransferEncoding = t.TransferEncoding |
| rr.Close = t.Close |
| rr.Trailer = t.Trailer |
| } |
| |
| return nil |
| } |
| |
| // Checks whether chunked is part of the encodings stack |
| func chunked(te []string) bool { return len(te) > 0 && te[0] == "chunked" } |
| |
| // Checks whether the encoding is explicitly "identity". |
| func isIdentity(te []string) bool { return len(te) == 1 && te[0] == "identity" } |
| |
| // Sanitize transfer encoding |
| func fixTransferEncoding(isResponse bool, requestMethod string, header Header) ([]string, error) { |
| raw, present := header["Transfer-Encoding"] |
| if !present { |
| return nil, nil |
| } |
| delete(header, "Transfer-Encoding") |
| |
| encodings := strings.Split(raw[0], ",") |
| te := make([]string, 0, len(encodings)) |
| |
| for _, encoding := range encodings { |
| encoding = strings.ToLower(strings.TrimSpace(encoding)) |
| |
| if encoding == "identity" { |
| break |
| } |
| if encoding != "chunked" { |
| return nil, &badStringError{"unsupported transfer encoding", encoding} |
| } |
| te = te[0 : len(te)+1] |
| te[len(te)-1] = encoding |
| } |
| if len(te) > 1 { |
| return nil, &badStringError{"too many transfer encodings", strings.Join(te, ",")} |
| } |
| if len(te) > 0 { |
| |
| delete(header, "Content-Length") |
| return te, nil |
| } |
| |
| return nil, nil |
| } |
| |
| // Determine the expected body length, using RFC 2616 Section 4.4. This |
| // function is not a method, because ultimately it should be shared by |
| // ReadResponse and ReadRequest. |
| func fixLength(isResponse bool, status int, requestMethod string, header Header, te []string) (int64, error) { |
| contentLens := header["Content-Length"] |
| isRequest := !isResponse |
| |
| if noBodyExpected(requestMethod) { |
| |
| if isRequest && len(contentLens) > 0 && !(len(contentLens) == 1 && contentLens[0] == "0") { |
| return 0, fmt.Errorf("http: method cannot contain a Content-Length; got %q", contentLens) |
| } |
| return 0, nil |
| } |
| if status/100 == 1 { |
| return 0, nil |
| } |
| switch status { |
| case 204, 304: |
| return 0, nil |
| } |
| |
| if len(contentLens) > 1 { |
| |
| return 0, errors.New("http: message cannot contain multiple Content-Length headers") |
| } |
| |
| if chunked(te) { |
| return -1, nil |
| } |
| |
| // Logic based on Content-Length |
| var cl string |
| if len(contentLens) == 1 { |
| cl = strings.TrimSpace(contentLens[0]) |
| } |
| if cl != "" { |
| n, err := parseContentLength(cl) |
| if err != nil { |
| return -1, err |
| } |
| return n, nil |
| } else { |
| header.Del("Content-Length") |
| } |
| |
| if !isResponse { |
| |
| return 0, nil |
| } |
| |
| return -1, nil |
| } |
| |
| // Determine whether to hang up after sending a request and body, or |
| // receiving a response and body |
| // 'header' is the request headers |
| func shouldClose(major, minor int, header Header, removeCloseHeader bool) bool { |
| if major < 1 { |
| return true |
| } else if major == 1 && minor == 0 { |
| vv := header["Connection"] |
| if headerValuesContainsToken(vv, "close") || !headerValuesContainsToken(vv, "keep-alive") { |
| return true |
| } |
| return false |
| } else { |
| if headerValuesContainsToken(header["Connection"], "close") { |
| if removeCloseHeader { |
| header.Del("Connection") |
| } |
| return true |
| } |
| } |
| return false |
| } |
| |
| // Parse the trailer header |
| func fixTrailer(header Header, te []string) (Header, error) { |
| raw := header.get("Trailer") |
| if raw == "" { |
| return nil, nil |
| } |
| |
| header.Del("Trailer") |
| trailer := make(Header) |
| keys := strings.Split(raw, ",") |
| for _, key := range keys { |
| key = CanonicalHeaderKey(strings.TrimSpace(key)) |
| switch key { |
| case "Transfer-Encoding", "Trailer", "Content-Length": |
| return nil, &badStringError{"bad trailer key", key} |
| } |
| trailer[key] = nil |
| } |
| if len(trailer) == 0 { |
| return nil, nil |
| } |
| if !chunked(te) { |
| |
| return nil, ErrUnexpectedTrailer |
| } |
| return trailer, nil |
| } |
| |
| // body turns a Reader into a ReadCloser. |
| // Close ensures that the body has been fully read |
| // and then reads the trailer if necessary. |
| type body struct { |
| src io.Reader |
| hdr interface{} // non-nil (Response or Request) value means read trailer |
| r *bufio.Reader // underlying wire-format reader for the trailer |
| closing bool // is the connection to be closed after reading body? |
| doEarlyClose bool // whether Close should stop early |
| |
| mu sync.Mutex // guards closed, and calls to Read and Close |
| sawEOF bool |
| closed bool |
| earlyClose bool // Close called and we didn't read to the end of src |
| } |
| |
| // ErrBodyReadAfterClose is returned when reading a Request or Response |
| // Body after the body has been closed. This typically happens when the body is |
| // read after an HTTP Handler calls WriteHeader or Write on its |
| // ResponseWriter. |
| var ErrBodyReadAfterClose = errors.New("http: invalid Read on closed Body") |
| |
| func (b *body) Read(p []byte) (n int, err error) { |
| b.mu.Lock() |
| defer b.mu.Unlock() |
| if b.closed { |
| return 0, ErrBodyReadAfterClose |
| } |
| return b.readLocked(p) |
| } |
| |
| // Must hold b.mu. |
| func (b *body) readLocked(p []byte) (n int, err error) { |
| if b.sawEOF { |
| return 0, io.EOF |
| } |
| n, err = b.src.Read(p) |
| |
| if err == io.EOF { |
| b.sawEOF = true |
| |
| if b.hdr != nil { |
| if e := b.readTrailer(); e != nil { |
| err = e |
| |
| b.sawEOF = false |
| b.closed = true |
| } |
| b.hdr = nil |
| } else { |
| |
| if lr, ok := b.src.(*io.LimitedReader); ok && lr.N > 0 { |
| err = io.ErrUnexpectedEOF |
| } |
| } |
| } |
| |
| if err == nil && n > 0 { |
| if lr, ok := b.src.(*io.LimitedReader); ok && lr.N == 0 { |
| err = io.EOF |
| b.sawEOF = true |
| } |
| } |
| |
| return n, err |
| } |
| |
| var ( |
| singleCRLF = []byte("\r\n") |
| doubleCRLF = []byte("\r\n\r\n") |
| ) |
| |
| func seeUpcomingDoubleCRLF(r *bufio.Reader) bool { |
| for peekSize := 4; ; peekSize++ { |
| |
| buf, err := r.Peek(peekSize) |
| if bytes.HasSuffix(buf, doubleCRLF) { |
| return true |
| } |
| if err != nil { |
| break |
| } |
| } |
| return false |
| } |
| |
| var errTrailerEOF = errors.New("http: unexpected EOF reading trailer") |
| |
| func (b *body) readTrailer() error { |
| |
| buf, err := b.r.Peek(2) |
| if bytes.Equal(buf, singleCRLF) { |
| b.r.Discard(2) |
| return nil |
| } |
| if len(buf) < 2 { |
| return errTrailerEOF |
| } |
| if err != nil { |
| return err |
| } |
| |
| if !seeUpcomingDoubleCRLF(b.r) { |
| return errors.New("http: suspiciously long trailer after chunked body") |
| } |
| |
| hdr, err := textproto.NewReader(b.r).ReadMIMEHeader() |
| if err != nil { |
| if err == io.EOF { |
| return errTrailerEOF |
| } |
| return err |
| } |
| switch rr := b.hdr.(type) { |
| case *Request: |
| mergeSetHeader(&rr.Trailer, Header(hdr)) |
| case *Response: |
| mergeSetHeader(&rr.Trailer, Header(hdr)) |
| } |
| return nil |
| } |
| |
| func mergeSetHeader(dst *Header, src Header) { |
| if *dst == nil { |
| *dst = src |
| return |
| } |
| for k, vv := range src { |
| (*dst)[k] = vv |
| } |
| } |
| |
| // unreadDataSizeLocked returns the number of bytes of unread input. |
| // It returns -1 if unknown. |
| // b.mu must be held. |
| func (b *body) unreadDataSizeLocked() int64 { |
| if lr, ok := b.src.(*io.LimitedReader); ok { |
| return lr.N |
| } |
| return -1 |
| } |
| |
| func (b *body) Close() error { |
| b.mu.Lock() |
| defer b.mu.Unlock() |
| if b.closed { |
| return nil |
| } |
| var err error |
| switch { |
| case b.sawEOF: |
| |
| case b.hdr == nil && b.closing: |
| |
| case b.doEarlyClose: |
| |
| if lr, ok := b.src.(*io.LimitedReader); ok && lr.N > maxPostHandlerReadBytes { |
| |
| b.earlyClose = true |
| } else { |
| var n int64 |
| |
| n, err = io.CopyN(io.Discard, bodyLocked{b}, maxPostHandlerReadBytes) |
| if err == io.EOF { |
| err = nil |
| } |
| if n == maxPostHandlerReadBytes { |
| b.earlyClose = true |
| } |
| } |
| default: |
| |
| _, err = io.Copy(io.Discard, bodyLocked{b}) |
| } |
| b.closed = true |
| return err |
| } |
| |
| func (b *body) didEarlyClose() bool { |
| b.mu.Lock() |
| defer b.mu.Unlock() |
| return b.earlyClose |
| } |
| |
| // bodyLocked is a io.Reader reading from a *body when its mutex is |
| // already held. |
| type bodyLocked struct { |
| b *body |
| } |
| |
| func (bl bodyLocked) Read(p []byte) (n int, err error) { |
| if bl.b.closed { |
| return 0, ErrBodyReadAfterClose |
| } |
| return bl.b.readLocked(p) |
| } |
| |
| // parseContentLength trims whitespace from s and returns -1 if no value |
| // is set, or the value if it's >= 0. |
| func parseContentLength(cl string) (int64, error) { |
| cl = strings.TrimSpace(cl) |
| if cl == "" { |
| return -1, nil |
| } |
| n, err := strconv.ParseInt(cl, 10, 64) |
| if err != nil || n < 0 { |
| return 0, &badStringError{"bad Content-Length", cl} |
| } |
| return n, nil |
| |
| } |
| |
| // DefaultTransport is the default implementation of Transport and is |
| // used by DefaultClient. It establishes network connections as needed |
| // and caches them for reuse by subsequent calls. It uses HTTP proxies |
| // as directed by the $HTTP_PROXY and $NO_PROXY (or $http_proxy and |
| // $no_proxy) environment variables. |
| var DefaultTransport RoundTripper = &Transport{ |
| Proxy: ProxyFromEnvironment, |
| Dial: (&net.Dialer{ |
| Timeout: 30 * time.Second, |
| KeepAlive: 30 * time.Second, |
| }).Dial, |
| TLSHandshakeTimeout: 10 * time.Second, |
| } |
| |
| // DefaultMaxIdleConnsPerHost is the default value of Transport's |
| // MaxIdleConnsPerHost. |
| const DefaultMaxIdleConnsPerHost = 2 |
| |
| // Transport is an implementation of RoundTripper that supports HTTP, |
| // HTTPS, and HTTP proxies (for either HTTP or HTTPS with CONNECT). |
| // Transport can also cache connections for future re-use. |
| type Transport struct { |
| idleMu sync.Mutex |
| wantIdle bool // user has requested to close all idle conns |
| idleConn map[connectMethodKey][]*persistConn |
| idleConnCh map[connectMethodKey]chan *persistConn |
| |
| reqMu sync.Mutex |
| reqCanceler map[*Request]func() |
| |
| altMu sync.RWMutex |
| altProto map[string]RoundTripper // nil or map of URI scheme => RoundTripper |
| |
| // Proxy specifies a function to return a proxy for a given |
| // Request. If the function returns a non-nil error, the |
| // request is aborted with the provided error. |
| // If Proxy is nil or returns a nil *URL, no proxy is used. |
| Proxy func(*Request) (*url.URL, error) |
| |
| // Dial specifies the dial function for creating unencrypted |
| // TCP connections. |
| // If Dial is nil, net.Dial is used. |
| Dial func(network, addr string) (net.Conn, error) |
| |
| // DialTLS specifies an optional dial function for creating |
| // TLS connections for non-proxied HTTPS requests. |
| // |
| // If DialTLS is nil, Dial and TLSClientConfig are used. |
| // |
| // If DialTLS is set, the Dial hook is not used for HTTPS |
| // requests and the TLSClientConfig and TLSHandshakeTimeout |
| // are ignored. The returned net.Conn is assumed to already be |
| // past the TLS handshake. |
| DialTLS func(network, addr string) (net.Conn, error) |
| |
| // TLSClientConfig specifies the TLS configuration to use with |
| // tls.Client. If nil, the default configuration is used. |
| TLSClientConfig *tls.Config |
| |
| // TLSHandshakeTimeout specifies the maximum amount of time waiting to |
| // wait for a TLS handshake. Zero means no timeout. |
| TLSHandshakeTimeout time.Duration |
| |
| // DisableKeepAlives, if true, prevents re-use of TCP connections |
| // between different HTTP requests. |
| DisableKeepAlives bool |
| |
| // DisableCompression, if true, prevents the Transport from |
| // requesting compression with an "Accept-Encoding: gzip" |
| // request header when the Request contains no existing |
| // Accept-Encoding value. If the Transport requests gzip on |
| // its own and gets a gzipped response, it's transparently |
| // decoded in the Response.Body. However, if the user |
| // explicitly requested gzip it is not automatically |
| // uncompressed. |
| DisableCompression bool |
| |
| // MaxIdleConnsPerHost, if non-zero, controls the maximum idle |
| // (keep-alive) to keep per-host. If zero, |
| // DefaultMaxIdleConnsPerHost is used. |
| MaxIdleConnsPerHost int |
| |
| // ResponseHeaderTimeout, if non-zero, specifies the amount of |
| // time to wait for a server's response headers after fully |
| // writing the request (including its body, if any). This |
| // time does not include the time to read the response body. |
| ResponseHeaderTimeout time.Duration |
| } |
| |
| // ProxyFromEnvironment returns the URL of the proxy to use for a |
| // given request, as indicated by the environment variables |
| // HTTP_PROXY, HTTPS_PROXY and NO_PROXY (or the lowercase versions |
| // thereof). HTTPS_PROXY takes precedence over HTTP_PROXY for https |
| // requests. |
| // |
| // The environment values may be either a complete URL or a |
| // "host[:port]", in which case the "http" scheme is assumed. |
| // An error is returned if the value is a different form. |
| // |
| // A nil URL and nil error are returned if no proxy is defined in the |
| // environment, or a proxy should not be used for the given request, |
| // as defined by NO_PROXY. |
| // |
| // As a special case, if req.URL.Host is "localhost" (with or without |
| // a port number), then a nil URL and nil error will be returned. |
| func ProxyFromEnvironment(req *Request) (*url.URL, error) { |
| var proxy string |
| if req.URL.Scheme == "https" { |
| proxy = httpsProxyEnv.Get() |
| } |
| if proxy == "" { |
| proxy = httpProxyEnv.Get() |
| } |
| if proxy == "" { |
| return nil, nil |
| } |
| if !useProxy(canonicalAddr(req.URL)) { |
| return nil, nil |
| } |
| proxyURL, err := url.Parse(proxy) |
| if err != nil || !strings.HasPrefix(proxyURL.Scheme, "http") { |
| |
| if proxyURL, err := url.Parse("http://" + proxy); err == nil { |
| return proxyURL, nil |
| } |
| } |
| if err != nil { |
| return nil, fmt.Errorf("invalid proxy address %q: %v", proxy, err) |
| } |
| return proxyURL, nil |
| } |
| |
| // ProxyURL returns a proxy function (for use in a Transport) |
| // that always returns the same URL. |
| func ProxyURL(fixedURL *url.URL) func(*Request) (*url.URL, error) { |
| return func(*Request) (*url.URL, error) { |
| return fixedURL, nil |
| } |
| } |
| |
| // transportRequest is a wrapper around a *Request that adds |
| // optional extra headers to write. |
| type transportRequest struct { |
| *Request // original request, not to be mutated |
| extra Header // extra headers to write, or nil |
| } |
| |
| func (tr *transportRequest) extraHeaders() Header { |
| if tr.extra == nil { |
| tr.extra = make(Header) |
| } |
| return tr.extra |
| } |
| |
| // RoundTrip implements the RoundTripper interface. |
| // |
| // For higher-level HTTP client support (such as handling of cookies |
| // and redirects), see Get, Post, and the Client type. |
| func (t *Transport) RoundTrip(req *Request) (resp *Response, err error) { |
| if req.URL == nil { |
| req.closeBody() |
| return nil, errors.New("http: nil Request.URL") |
| } |
| if req.Header == nil { |
| req.closeBody() |
| return nil, errors.New("http: nil Request.Header") |
| } |
| if req.URL.Scheme != "http" && req.URL.Scheme != "https" { |
| t.altMu.RLock() |
| var rt RoundTripper |
| if t.altProto != nil { |
| rt = t.altProto[req.URL.Scheme] |
| } |
| t.altMu.RUnlock() |
| if rt == nil { |
| req.closeBody() |
| return nil, &badStringError{"unsupported protocol scheme", req.URL.Scheme} |
| } |
| return rt.RoundTrip(req) |
| } |
| if req.URL.Host == "" { |
| req.closeBody() |
| return nil, errors.New("http: no Host in request URL") |
| } |
| treq := &transportRequest{Request: req} |
| cm, err := t.connectMethodForRequest(treq) |
| if err != nil { |
| req.closeBody() |
| return nil, err |
| } |
| |
| pconn, err := t.getConn(req, cm) |
| if err != nil { |
| t.setReqCanceler(req, nil) |
| req.closeBody() |
| return nil, err |
| } |
| |
| return pconn.roundTrip(treq) |
| } |
| |
| // RegisterProtocol registers a new protocol with scheme. |
| // The Transport will pass requests using the given scheme to rt. |
| // It is rt's responsibility to simulate HTTP request semantics. |
| // |
| // RegisterProtocol can be used by other packages to provide |
| // implementations of protocol schemes like "ftp" or "file". |
| func (t *Transport) RegisterProtocol(scheme string, rt RoundTripper) { |
| if scheme == "http" || scheme == "https" { |
| panic("protocol " + scheme + " already registered") |
| } |
| t.altMu.Lock() |
| defer t.altMu.Unlock() |
| if t.altProto == nil { |
| t.altProto = make(map[string]RoundTripper) |
| } |
| if _, exists := t.altProto[scheme]; exists { |
| panic("protocol " + scheme + " already registered") |
| } |
| t.altProto[scheme] = rt |
| } |
| |
| // CloseIdleConnections closes any connections which were previously |
| // connected from previous requests but are now sitting idle in |
| // a "keep-alive" state. It does not interrupt any connections currently |
| // in use. |
| func (t *Transport) CloseIdleConnections() { |
| t.idleMu.Lock() |
| m := t.idleConn |
| t.idleConn = nil |
| t.idleConnCh = nil |
| t.wantIdle = true |
| t.idleMu.Unlock() |
| for _, conns := range m { |
| for _, pconn := range conns { |
| pconn.close() |
| } |
| } |
| } |
| |
| // CancelRequest cancels an in-flight request by closing its connection. |
| // CancelRequest should only be called after RoundTrip has returned. |
| func (t *Transport) CancelRequest(req *Request) { |
| t.reqMu.Lock() |
| cancel := t.reqCanceler[req] |
| delete(t.reqCanceler, req) |
| t.reqMu.Unlock() |
| if cancel != nil { |
| cancel() |
| } |
| } |
| |
| var ( |
| httpProxyEnv = &envOnce{ |
| names: []string{"HTTP_PROXY", "http_proxy"}, |
| } |
| httpsProxyEnv = &envOnce{ |
| names: []string{"HTTPS_PROXY", "https_proxy"}, |
| } |
| noProxyEnv = &envOnce{ |
| names: []string{"NO_PROXY", "no_proxy"}, |
| } |
| ) |
| |
| // envOnce looks up an environment variable (optionally by multiple |
| // names) once. It mitigates expensive lookups on some platforms |
| // (e.g. Windows). |
| type envOnce struct { |
| names []string |
| once sync.Once |
| val string |
| } |
| |
| func (e *envOnce) Get() string { |
| e.once.Do(e.init) |
| return e.val |
| } |
| |
| func (e *envOnce) init() { |
| for _, n := range e.names { |
| e.val = os.Getenv(n) |
| if e.val != "" { |
| return |
| } |
| } |
| } |
| |
| // reset is used by tests |
| func (e *envOnce) reset() { |
| e.once = sync.Once{} |
| e.val = "" |
| } |
| |
| func (t *Transport) connectMethodForRequest(treq *transportRequest) (cm connectMethod, err error) { |
| cm.targetScheme = treq.URL.Scheme |
| cm.targetAddr = canonicalAddr(treq.URL) |
| if t.Proxy != nil { |
| cm.proxyURL, err = t.Proxy(treq.Request) |
| } |
| return cm, err |
| } |
| |
| // proxyAuth returns the Proxy-Authorization header to set |
| // on requests, if applicable. |
| func (cm *connectMethod) proxyAuth() string { |
| if cm.proxyURL == nil { |
| return "" |
| } |
| if u := cm.proxyURL.User; u != nil { |
| username := u.Username() |
| password, _ := u.Password() |
| return "Basic " + basicAuth(username, password) |
| } |
| return "" |
| } |
| |
| // putIdleConn adds pconn to the list of idle persistent connections awaiting |
| // a new request. |
| // If pconn is no longer needed or not in a good state, putIdleConn |
| // returns false. |
| func (t *Transport) putIdleConn(pconn *persistConn) bool { |
| if t.DisableKeepAlives || t.MaxIdleConnsPerHost < 0 { |
| pconn.close() |
| return false |
| } |
| if pconn.isBroken() { |
| return false |
| } |
| key := pconn.cacheKey |
| max := t.MaxIdleConnsPerHost |
| if max == 0 { |
| max = DefaultMaxIdleConnsPerHost |
| } |
| t.idleMu.Lock() |
| |
| waitingDialer := t.idleConnCh[key] |
| select { |
| case waitingDialer <- pconn: |
| |
| t.idleMu.Unlock() |
| return true |
| default: |
| if waitingDialer != nil { |
| |
| delete(t.idleConnCh, key) |
| } |
| } |
| if t.wantIdle { |
| t.idleMu.Unlock() |
| pconn.close() |
| return false |
| } |
| if t.idleConn == nil { |
| t.idleConn = make(map[connectMethodKey][]*persistConn) |
| } |
| if len(t.idleConn[key]) >= max { |
| t.idleMu.Unlock() |
| pconn.close() |
| return false |
| } |
| for _, exist := range t.idleConn[key] { |
| if exist == pconn { |
| log.Fatalf("dup idle pconn %p in freelist", pconn) |
| } |
| } |
| t.idleConn[key] = append(t.idleConn[key], pconn) |
| t.idleMu.Unlock() |
| return true |
| } |
| |
| // getIdleConnCh returns a channel to receive and return idle |
| // persistent connection for the given connectMethod. |
| // It may return nil, if persistent connections are not being used. |
| func (t *Transport) getIdleConnCh(cm connectMethod) chan *persistConn { |
| if t.DisableKeepAlives { |
| return nil |
| } |
| key := cm.key() |
| t.idleMu.Lock() |
| defer t.idleMu.Unlock() |
| t.wantIdle = false |
| if t.idleConnCh == nil { |
| t.idleConnCh = make(map[connectMethodKey]chan *persistConn) |
| } |
| ch, ok := t.idleConnCh[key] |
| if !ok { |
| ch = make(chan *persistConn) |
| t.idleConnCh[key] = ch |
| } |
| return ch |
| } |
| |
| func (t *Transport) getIdleConn(cm connectMethod) (pconn *persistConn) { |
| key := cm.key() |
| t.idleMu.Lock() |
| defer t.idleMu.Unlock() |
| if t.idleConn == nil { |
| return nil |
| } |
| for { |
| pconns, ok := t.idleConn[key] |
| if !ok { |
| return nil |
| } |
| if len(pconns) == 1 { |
| pconn = pconns[0] |
| delete(t.idleConn, key) |
| } else { |
| |
| pconn = pconns[len(pconns)-1] |
| t.idleConn[key] = pconns[:len(pconns)-1] |
| } |
| if !pconn.isBroken() { |
| return |
| } |
| } |
| } |
| |
| func (t *Transport) setReqCanceler(r *Request, fn func()) { |
| t.reqMu.Lock() |
| defer t.reqMu.Unlock() |
| if t.reqCanceler == nil { |
| t.reqCanceler = make(map[*Request]func()) |
| } |
| if fn != nil { |
| t.reqCanceler[r] = fn |
| } else { |
| delete(t.reqCanceler, r) |
| } |
| } |
| |
| // replaceReqCanceler replaces an existing cancel function. If there is no cancel function |
| // for the request, we don't set the function and return false. |
| // Since CancelRequest will clear the canceler, we can use the return value to detect if |
| // the request was canceled since the last setReqCancel call. |
| func (t *Transport) replaceReqCanceler(r *Request, fn func()) bool { |
| t.reqMu.Lock() |
| defer t.reqMu.Unlock() |
| _, ok := t.reqCanceler[r] |
| if !ok { |
| return false |
| } |
| if fn != nil { |
| t.reqCanceler[r] = fn |
| } else { |
| delete(t.reqCanceler, r) |
| } |
| return true |
| } |
| |
| func (t *Transport) dial(network, addr string) (c net.Conn, err error) { |
| if t.Dial != nil { |
| return t.Dial(network, addr) |
| } |
| return net.Dial(network, addr) |
| } |
| |
| // Testing hooks: |
| var prePendingDial, postPendingDial func() |
| |
| // getConn dials and creates a new persistConn to the target as |
| // specified in the connectMethod. This includes doing a proxy CONNECT |
| // and/or setting up TLS. If this doesn't return an error, the persistConn |
| // is ready to write requests to. |
| func (t *Transport) getConn(req *Request, cm connectMethod) (*persistConn, error) { |
| if pc := t.getIdleConn(cm); pc != nil { |
| |
| t.setReqCanceler(req, func() {}) |
| return pc, nil |
| } |
| |
| type dialRes struct { |
| pc *persistConn |
| err error |
| } |
| dialc := make(chan dialRes) |
| |
| prePendingDial := prePendingDial |
| postPendingDial := postPendingDial |
| |
| handlePendingDial := func() { |
| if prePendingDial != nil { |
| prePendingDial() |
| } |
| go func() { |
| if v := <-dialc; v.err == nil { |
| t.putIdleConn(v.pc) |
| } |
| if postPendingDial != nil { |
| postPendingDial() |
| } |
| }() |
| } |
| |
| cancelc := make(chan struct{}) |
| t.setReqCanceler(req, func() { close(cancelc) }) |
| |
| go func() { |
| pc, err := t.dialConn(cm) |
| dialc <- dialRes{pc, err} |
| }() |
| |
| idleConnCh := t.getIdleConnCh(cm) |
| select { |
| case v := <-dialc: |
| |
| return v.pc, v.err |
| case pc := <-idleConnCh: |
| |
| handlePendingDial() |
| return pc, nil |
| case <-req.Cancel: |
| handlePendingDial() |
| return nil, errors.New("net/http: request canceled while waiting for connection") |
| case <-cancelc: |
| handlePendingDial() |
| return nil, errors.New("net/http: request canceled while waiting for connection") |
| } |
| } |
| |
| func (t *Transport) dialConn(cm connectMethod) (*persistConn, error) { |
| pconn := &persistConn{ |
| t: t, |
| cacheKey: cm.key(), |
| reqch: make(chan requestAndChan, 1), |
| writech: make(chan writeRequest, 1), |
| closech: make(chan struct{}), |
| writeErrCh: make(chan error, 1), |
| } |
| tlsDial := t.DialTLS != nil && cm.targetScheme == "https" && cm.proxyURL == nil |
| if tlsDial { |
| var err error |
| pconn.conn, err = t.DialTLS("tcp", cm.addr()) |
| if err != nil { |
| return nil, err |
| } |
| if tc, ok := pconn.conn.(*tls.Conn); ok { |
| cs := tc.ConnectionState() |
| pconn.tlsState = &cs |
| } |
| } else { |
| conn, err := t.dial("tcp", cm.addr()) |
| if err != nil { |
| if cm.proxyURL != nil { |
| err = fmt.Errorf("http: error connecting to proxy %s: %v", cm.proxyURL, err) |
| } |
| return nil, err |
| } |
| pconn.conn = conn |
| } |
| |
| switch { |
| case cm.proxyURL == nil: |
| |
| case cm.targetScheme == "http": |
| pconn.isProxy = true |
| if pa := cm.proxyAuth(); pa != "" { |
| pconn.mutateHeaderFunc = func(h Header) { |
| h.Set("Proxy-Authorization", pa) |
| } |
| } |
| case cm.targetScheme == "https": |
| conn := pconn.conn |
| connectReq := &Request{ |
| Method: "CONNECT", |
| URL: &url.URL{Opaque: cm.targetAddr}, |
| Host: cm.targetAddr, |
| Header: make(Header), |
| } |
| if pa := cm.proxyAuth(); pa != "" { |
| connectReq.Header.Set("Proxy-Authorization", pa) |
| } |
| connectReq.Write(conn) |
| |
| br := bufio.NewReader(conn) |
| resp, err := ReadResponse(br, connectReq) |
| if err != nil { |
| conn.Close() |
| return nil, err |
| } |
| if resp.StatusCode != 200 { |
| f := strings.SplitN(resp.Status, " ", 2) |
| conn.Close() |
| return nil, errors.New(f[1]) |
| } |
| } |
| |
| if cm.targetScheme == "https" && !tlsDial { |
| |
| cfg := cloneTLSClientConfig(t.TLSClientConfig) |
| if cfg.ServerName == "" { |
| cfg.ServerName = cm.tlsHost() |
| } |
| plainConn := pconn.conn |
| tlsConn := tls.Client(plainConn, cfg) |
| errc := make(chan error, 2) |
| var timer *time.Timer // for canceling TLS handshake |
| if d := t.TLSHandshakeTimeout; d != 0 { |
| timer = time.AfterFunc(d, func() { |
| errc <- tlsHandshakeTimeoutError{} |
| }) |
| } |
| go func() { |
| err := tlsConn.Handshake() |
| if timer != nil { |
| timer.Stop() |
| } |
| errc <- err |
| }() |
| if err := <-errc; err != nil { |
| plainConn.Close() |
| return nil, err |
| } |
| if !cfg.InsecureSkipVerify { |
| if err := tlsConn.VerifyHostname(cfg.ServerName); err != nil { |
| plainConn.Close() |
| return nil, err |
| } |
| } |
| cs := tlsConn.ConnectionState() |
| pconn.tlsState = &cs |
| pconn.conn = tlsConn |
| } |
| |
| pconn.br = bufio.NewReader(noteEOFReader{pconn.conn, &pconn.sawEOF}) |
| pconn.bw = bufio.NewWriter(pconn.conn) |
| go pconn.readLoop() |
| go pconn.writeLoop() |
| return pconn, nil |
| } |
| |
| // useProxy reports whether requests to addr should use a proxy, |
| // according to the NO_PROXY or no_proxy environment variable. |
| // addr is always a canonicalAddr with a host and port. |
| func useProxy(addr string) bool { |
| if len(addr) == 0 { |
| return true |
| } |
| host, _, err := net.SplitHostPort(addr) |
| if err != nil { |
| return false |
| } |
| if host == "localhost" { |
| return false |
| } |
| if ip := net.ParseIP(host); ip != nil { |
| if ip.IsLoopback() { |
| return false |
| } |
| } |
| |
| no_proxy := noProxyEnv.Get() |
| if no_proxy == "*" { |
| return false |
| } |
| |
| addr = strings.ToLower(strings.TrimSpace(addr)) |
| if hasPort(addr) { |
| addr = addr[:strings.LastIndex(addr, ":")] |
| } |
| |
| for _, p := range strings.Split(no_proxy, ",") { |
| p = strings.ToLower(strings.TrimSpace(p)) |
| if len(p) == 0 { |
| continue |
| } |
| if hasPort(p) { |
| p = p[:strings.LastIndex(p, ":")] |
| } |
| if addr == p { |
| return false |
| } |
| if p[0] == '.' && (strings.HasSuffix(addr, p) || addr == p[1:]) { |
| |
| return false |
| } |
| if p[0] != '.' && strings.HasSuffix(addr, p) && addr[len(addr)-len(p)-1] == '.' { |
| |
| return false |
| } |
| } |
| return true |
| } |
| |
| // connectMethod is the map key (in its String form) for keeping persistent |
| // TCP connections alive for subsequent HTTP requests. |
| // |
| // A connect method may be of the following types: |
| // |
| // Cache key form Description |
| // ----------------- ------------------------- |
| // |http|foo.com http directly to server, no proxy |
| // |https|foo.com https directly to server, no proxy |
| // http://proxy.com|https|foo.com http to proxy, then CONNECT to foo.com |
| // http://proxy.com|http http to proxy, http to anywhere after that |
| // |
| // Note: no support to https to the proxy yet. |
| // |
| type connectMethod struct { |
| proxyURL *url.URL // nil for no proxy, else full proxy URL |
| targetScheme string // "http" or "https" |
| targetAddr string // Not used if proxy + http targetScheme (4th example in table) |
| } |
| |
| func (cm *connectMethod) key() connectMethodKey { |
| proxyStr := "" |
| targetAddr := cm.targetAddr |
| if cm.proxyURL != nil { |
| proxyStr = cm.proxyURL.String() |
| if cm.targetScheme == "http" { |
| targetAddr = "" |
| } |
| } |
| return connectMethodKey{ |
| proxy: proxyStr, |
| scheme: cm.targetScheme, |
| addr: targetAddr, |
| } |
| } |
| |
| // addr returns the first hop "host:port" to which we need to TCP connect. |
| func (cm *connectMethod) addr() string { |
| if cm.proxyURL != nil { |
| return canonicalAddr(cm.proxyURL) |
| } |
| return cm.targetAddr |
| } |
| |
| // tlsHost returns the host name to match against the peer's |
| // TLS certificate. |
| func (cm *connectMethod) tlsHost() string { |
| h := cm.targetAddr |
| if hasPort(h) { |
| h = h[:strings.LastIndex(h, ":")] |
| } |
| return h |
| } |
| |
| // connectMethodKey is the map key version of connectMethod, with a |
| // stringified proxy URL (or the empty string) instead of a pointer to |
| // a URL. |
| type connectMethodKey struct { |
| proxy, scheme, addr string |
| } |
| |
| func (k connectMethodKey) String() string { |
| |
| return fmt.Sprintf("%s|%s|%s", k.proxy, k.scheme, k.addr) |
| } |
| |
| // persistConn wraps a connection, usually a persistent one |
| // (but may be used for non-keep-alive requests as well) |
| type persistConn struct { |
| t *Transport |
| cacheKey connectMethodKey |
| conn net.Conn |
| tlsState *tls.ConnectionState |
| br *bufio.Reader // from conn |
| sawEOF bool // whether we've seen EOF from conn; owned by readLoop |
| bw *bufio.Writer // to conn |
| reqch chan requestAndChan // written by roundTrip; read by readLoop |
| writech chan writeRequest // written by roundTrip; read by writeLoop |
| closech chan struct{} // closed when conn closed |
| isProxy bool |
| // writeErrCh passes the request write error (usually nil) |
| // from the writeLoop goroutine to the readLoop which passes |
| // it off to the res.Body reader, which then uses it to decide |
| // whether or not a connection can be reused. Issue 7569. |
| writeErrCh chan error |
| |
| lk sync.Mutex // guards following fields |
| numExpectedResponses int |
| closed bool // whether conn has been closed |
| broken bool // an error has happened on this connection; marked broken so it's not reused. |
| canceled bool // whether this conn was broken due a CancelRequest |
| // mutateHeaderFunc is an optional func to modify extra |
| // headers on each outbound request before it's written. (the |
| // original Request given to RoundTrip is not modified) |
| mutateHeaderFunc func(Header) |
| } |
| |
| // isBroken reports whether this connection is in a known broken state. |
| func (pc *persistConn) isBroken() bool { |
| pc.lk.Lock() |
| b := pc.broken |
| pc.lk.Unlock() |
| return b |
| } |
| |
| // isCanceled reports whether this connection was closed due to CancelRequest. |
| func (pc *persistConn) isCanceled() bool { |
| pc.lk.Lock() |
| defer pc.lk.Unlock() |
| return pc.canceled |
| } |
| |
| func (pc *persistConn) cancelRequest() { |
| pc.lk.Lock() |
| defer pc.lk.Unlock() |
| pc.canceled = true |
| pc.closeLocked() |
| } |
| |
| func (pc *persistConn) readLoop() { |
| |
| eofc := make(chan struct{}) |
| defer close(eofc) |
| |
| testHookMu.Lock() |
| testHookReadLoopBeforeNextRead := testHookReadLoopBeforeNextRead |
| testHookMu.Unlock() |
| |
| alive := true |
| for alive { |
| pb, err := pc.br.Peek(1) |
| |
| pc.lk.Lock() |
| if pc.numExpectedResponses == 0 { |
| if !pc.closed { |
| pc.closeLocked() |
| if len(pb) > 0 { |
| log.Printf("Unsolicited response received on idle HTTP channel starting with %q; err=%v", |
| string(pb), err) |
| } |
| } |
| pc.lk.Unlock() |
| return |
| } |
| pc.lk.Unlock() |
| |
| rc := <-pc.reqch |
| |
| var resp *Response |
| if err == nil { |
| resp, err = ReadResponse(pc.br, rc.req) |
| if err == nil && resp.StatusCode == 100 { |
| |
| resp, err = ReadResponse(pc.br, rc.req) |
| } |
| } |
| |
| if resp != nil { |
| resp.TLS = pc.tlsState |
| } |
| |
| hasBody := resp != nil && rc.req.Method != "HEAD" && resp.ContentLength != 0 |
| |
| if err != nil { |
| pc.close() |
| } else { |
| if rc.addedGzip && hasBody && resp.Header.Get("Content-Encoding") == "gzip" { |
| resp.Header.Del("Content-Encoding") |
| resp.Header.Del("Content-Length") |
| resp.ContentLength = -1 |
| resp.Body = &gzipReader{body: resp.Body} |
| } |
| resp.Body = &bodyEOFSignal{body: resp.Body} |
| } |
| |
| if err != nil || resp.Close || rc.req.Close || resp.StatusCode <= 199 { |
| |
| alive = false |
| } |
| |
| var waitForBodyRead chan bool // channel is nil when there's no body |
| if hasBody { |
| waitForBodyRead = make(chan bool, 2) |
| resp.Body.(*bodyEOFSignal).earlyCloseFn = func() error { |
| waitForBodyRead <- false |
| return nil |
| } |
| resp.Body.(*bodyEOFSignal).fn = func(err error) error { |
| isEOF := err == io.EOF |
| waitForBodyRead <- isEOF |
| if isEOF { |
| <-eofc |
| } else if err != nil && pc.isCanceled() { |
| return errRequestCanceled |
| } |
| return err |
| } |
| } else { |
| |
| pc.t.setReqCanceler(rc.req, nil) |
| } |
| |
| pc.lk.Lock() |
| pc.numExpectedResponses-- |
| pc.lk.Unlock() |
| |
| rc.ch <- responseAndError{resp, err} |
| |
| if hasBody { |
| |
| select { |
| case <-rc.req.Cancel: |
| alive = false |
| pc.t.CancelRequest(rc.req) |
| case bodyEOF := <-waitForBodyRead: |
| pc.t.setReqCanceler(rc.req, nil) |
| alive = alive && |
| bodyEOF && |
| !pc.sawEOF && |
| pc.wroteRequest() && |
| pc.t.putIdleConn(pc) |
| if bodyEOF { |
| eofc <- struct{}{} |
| } |
| case <-pc.closech: |
| alive = false |
| } |
| } else { |
| alive = alive && |
| !pc.sawEOF && |
| pc.wroteRequest() && |
| pc.t.putIdleConn(pc) |
| } |
| |
| if hook := testHookReadLoopBeforeNextRead; hook != nil { |
| hook() |
| } |
| } |
| pc.close() |
| } |
| |
| func (pc *persistConn) writeLoop() { |
| for { |
| select { |
| case wr := <-pc.writech: |
| if pc.isBroken() { |
| wr.ch <- errors.New("http: can't write HTTP request on broken connection") |
| continue |
| } |
| err := wr.req.Request.write(pc.bw, pc.isProxy, wr.req.extra) |
| if err == nil { |
| err = pc.bw.Flush() |
| } |
| if err != nil { |
| pc.markBroken() |
| wr.req.Request.closeBody() |
| } |
| pc.writeErrCh <- err |
| wr.ch <- err |
| case <-pc.closech: |
| return |
| } |
| } |
| } |
| |
| // wroteRequest is a check before recycling a connection that the previous write |
| // (from writeLoop above) happened and was successful. |
| func (pc *persistConn) wroteRequest() bool { |
| select { |
| case err := <-pc.writeErrCh: |
| |
| return err == nil |
| default: |
| |
| select { |
| case err := <-pc.writeErrCh: |
| return err == nil |
| case <-time.After(50 * time.Millisecond): |
| return false |
| } |
| } |
| } |
| |
| type responseAndError struct { |
| res *Response |
| err error |
| } |
| |
| type requestAndChan struct { |
| req *Request |
| ch chan responseAndError |
| |
| // did the Transport (as opposed to the client code) add an |
| // Accept-Encoding gzip header? only if it we set it do |
| // we transparently decode the gzip. |
| addedGzip bool |
| } |
| |
| // A writeRequest is sent by the readLoop's goroutine to the |
| // writeLoop's goroutine to write a request while the read loop |
| // concurrently waits on both the write response and the server's |
| // reply. |
| type writeRequest struct { |
| req *transportRequest |
| ch chan<- error |
| } |
| |
| type httpError struct { |
| err string |
| timeout bool |
| } |
| |
| func (e *httpError) Error() string { return e.err } |
| |
| func (e *httpError) Timeout() bool { return e.timeout } |
| |
| func (e *httpError) Temporary() bool { return true } |
| |
| var errTimeout error = &httpError{err: "net/http: timeout awaiting response headers", timeout: true} |
| |
| var errClosed error = &httpError{err: "net/http: transport closed before response was received"} |
| |
| var errRequestCanceled = errors.New("net/http: request canceled") |
| |
| // nil except for tests |
| var ( |
| testHookPersistConnClosedGotRes func() |
| testHookEnterRoundTrip func() |
| testHookMu sync.Locker = fakeLocker{} // guards following |
| testHookReadLoopBeforeNextRead func() |
| ) |
| |
| func (pc *persistConn) roundTrip(req *transportRequest) (resp *Response, err error) { |
| if hook := testHookEnterRoundTrip; hook != nil { |
| hook() |
| } |
| if !pc.t.replaceReqCanceler(req.Request, pc.cancelRequest) { |
| pc.t.putIdleConn(pc) |
| return nil, errRequestCanceled |
| } |
| pc.lk.Lock() |
| pc.numExpectedResponses++ |
| headerFn := pc.mutateHeaderFunc |
| pc.lk.Unlock() |
| |
| if headerFn != nil { |
| headerFn(req.extraHeaders()) |
| } |
| |
| requestedGzip := false |
| if !pc.t.DisableCompression && |
| req.Header.Get("Accept-Encoding") == "" && |
| req.Header.Get("Range") == "" && |
| req.Method != "HEAD" { |
| |
| requestedGzip = true |
| req.extraHeaders().Set("Accept-Encoding", "gzip") |
| } |
| |
| if pc.t.DisableKeepAlives { |
| req.extraHeaders().Set("Connection", "close") |
| } |
| |
| writeErrCh := make(chan error, 1) |
| pc.writech <- writeRequest{req, writeErrCh} |
| |
| resc := make(chan responseAndError, 1) |
| pc.reqch <- requestAndChan{req.Request, resc, requestedGzip} |
| |
| var re responseAndError |
| var respHeaderTimer <-chan time.Time |
| cancelChan := req.Request.Cancel |
| WaitResponse: |
| for { |
| select { |
| case err := <-writeErrCh: |
| if isNetWriteError(err) { |
| |
| select { |
| case re = <-resc: |
| pc.close() |
| break WaitResponse |
| case <-time.After(50 * time.Millisecond): |
| |
| } |
| } |
| if err != nil { |
| re = responseAndError{nil, err} |
| pc.close() |
| break WaitResponse |
| } |
| if d := pc.t.ResponseHeaderTimeout; d > 0 { |
| timer := time.NewTimer(d) |
| defer timer.Stop() |
| respHeaderTimer = timer.C |
| } |
| case <-pc.closech: |
| |
| select { |
| case re = <-resc: |
| if fn := testHookPersistConnClosedGotRes; fn != nil { |
| fn() |
| } |
| default: |
| re = responseAndError{err: errClosed} |
| if pc.isCanceled() { |
| re = responseAndError{err: errRequestCanceled} |
| } |
| } |
| break WaitResponse |
| case <-respHeaderTimer: |
| pc.close() |
| re = responseAndError{err: errTimeout} |
| break WaitResponse |
| case re = <-resc: |
| break WaitResponse |
| case <-cancelChan: |
| pc.t.CancelRequest(req.Request) |
| cancelChan = nil |
| } |
| } |
| |
| if re.err != nil { |
| pc.t.setReqCanceler(req.Request, nil) |
| } |
| return re.res, re.err |
| } |
| |
| // markBroken marks a connection as broken (so it's not reused). |
| // It differs from close in that it doesn't close the underlying |
| // connection for use when it's still being read. |
| func (pc *persistConn) markBroken() { |
| pc.lk.Lock() |
| defer pc.lk.Unlock() |
| pc.broken = true |
| } |
| |
| func (pc *persistConn) close() { |
| pc.lk.Lock() |
| defer pc.lk.Unlock() |
| pc.closeLocked() |
| } |
| |
| func (pc *persistConn) closeLocked() { |
| pc.broken = true |
| if !pc.closed { |
| pc.conn.Close() |
| pc.closed = true |
| close(pc.closech) |
| } |
| pc.mutateHeaderFunc = nil |
| } |
| |
| var portMap = map[string]string{ |
| "http": "80", |
| "https": "443", |
| } |
| |
| // canonicalAddr returns url.Host but always with a ":port" suffix |
| func canonicalAddr(url *url.URL) string { |
| addr := url.Host |
| if !hasPort(addr) { |
| return addr + ":" + portMap[url.Scheme] |
| } |
| return addr |
| } |
| |
| // bodyEOFSignal wraps a ReadCloser but runs fn (if non-nil) at most |
| // once, right before its final (error-producing) Read or Close call |
| // returns. fn should return the new error to return from Read or Close. |
| // |
| // If earlyCloseFn is non-nil and Close is called before io.EOF is |
| // seen, earlyCloseFn is called instead of fn, and its return value is |
| // the return value from Close. |
| type bodyEOFSignal struct { |
| body io.ReadCloser |
| mu sync.Mutex // guards following 4 fields |
| closed bool // whether Close has been called |
| rerr error // sticky Read error |
| fn func(error) error // err will be nil on Read io.EOF |
| earlyCloseFn func() error // optional alt Close func used if io.EOF not seen |
| } |
| |
| func (es *bodyEOFSignal) Read(p []byte) (n int, err error) { |
| es.mu.Lock() |
| closed, rerr := es.closed, es.rerr |
| es.mu.Unlock() |
| if closed { |
| return 0, errors.New("http: read on closed response body") |
| } |
| if rerr != nil { |
| return 0, rerr |
| } |
| |
| n, err = es.body.Read(p) |
| if err != nil { |
| es.mu.Lock() |
| defer es.mu.Unlock() |
| if es.rerr == nil { |
| es.rerr = err |
| } |
| err = es.condfn(err) |
| } |
| return |
| } |
| |
| func (es *bodyEOFSignal) Close() error { |
| es.mu.Lock() |
| defer es.mu.Unlock() |
| if es.closed { |
| return nil |
| } |
| es.closed = true |
| if es.earlyCloseFn != nil && es.rerr != io.EOF { |
| return es.earlyCloseFn() |
| } |
| err := es.body.Close() |
| return es.condfn(err) |
| } |
| |
| // caller must hold es.mu. |
| func (es *bodyEOFSignal) condfn(err error) error { |
| if es.fn == nil { |
| return err |
| } |
| err = es.fn(err) |
| es.fn = nil |
| return err |
| } |
| |
| // gzipReader wraps a response body so it can lazily |
| // call gzip.NewReader on the first call to Read |
| type gzipReader struct { |
| body io.ReadCloser // underlying Response.Body |
| zr io.Reader // lazily-initialized gzip reader |
| } |
| |
| func (gz *gzipReader) Read(p []byte) (n int, err error) { |
| if gz.zr == nil { |
| gz.zr, err = gzip.NewReader(gz.body) |
| if err != nil { |
| return 0, err |
| } |
| } |
| return gz.zr.Read(p) |
| } |
| |
| func (gz *gzipReader) Close() error { |
| return gz.body.Close() |
| } |
| |
| type readerAndCloser struct { |
| io.Reader |
| io.Closer |
| } |
| |
| type tlsHandshakeTimeoutError struct{} |
| |
| func (tlsHandshakeTimeoutError) Timeout() bool { return true } |
| |
| func (tlsHandshakeTimeoutError) Temporary() bool { return true } |
| |
| func (tlsHandshakeTimeoutError) Error() string { return "net/http: TLS handshake timeout" } |
| |
| type noteEOFReader struct { |
| r io.Reader |
| sawEOF *bool |
| } |
| |
| func (nr noteEOFReader) Read(p []byte) (n int, err error) { |
| n, err = nr.r.Read(p) |
| if err == io.EOF { |
| *nr.sawEOF = true |
| } |
| return |
| } |
| |
| // fakeLocker is a sync.Locker which does nothing. It's used to guard |
| // test-only fields when not under test, to avoid runtime atomic |
| // overhead. |
| type fakeLocker struct{} |
| |
| func (fakeLocker) Lock() {} |
| |
| func (fakeLocker) Unlock() {} |
| |
| func isNetWriteError(err error) bool { |
| switch e := err.(type) { |
| case *url.Error: |
| return isNetWriteError(e.Err) |
| case *net.OpError: |
| return e.Op == "write" |
| default: |
| return false |
| } |
| } |
| |
| // cloneTLSConfig returns a shallow clone of the exported |
| // fields of cfg, ignoring the unexported sync.Once, which |
| // contains a mutex and must not be copied. |
| // |
| // The cfg must not be in active use by tls.Server, or else |
| // there can still be a race with tls.Server updating SessionTicketKey |
| // and our copying it, and also a race with the server setting |
| // SessionTicketsDisabled=false on failure to set the random |
| // ticket key. |
| // |
| // If cfg is nil, a new zero tls.Config is returned. |
| func cloneTLSConfig(cfg *tls.Config) *tls.Config { |
| if cfg == nil { |
| return &tls.Config{} |
| } |
| return &tls.Config{ |
| Rand: cfg.Rand, |
| Time: cfg.Time, |
| Certificates: cfg.Certificates, |
| NameToCertificate: cfg.NameToCertificate, |
| GetCertificate: cfg.GetCertificate, |
| RootCAs: cfg.RootCAs, |
| NextProtos: cfg.NextProtos, |
| ServerName: cfg.ServerName, |
| ClientAuth: cfg.ClientAuth, |
| ClientCAs: cfg.ClientCAs, |
| InsecureSkipVerify: cfg.InsecureSkipVerify, |
| CipherSuites: cfg.CipherSuites, |
| PreferServerCipherSuites: cfg.PreferServerCipherSuites, |
| SessionTicketsDisabled: cfg.SessionTicketsDisabled, |
| SessionTicketKey: cfg.SessionTicketKey, |
| ClientSessionCache: cfg.ClientSessionCache, |
| MinVersion: cfg.MinVersion, |
| MaxVersion: cfg.MaxVersion, |
| CurvePreferences: cfg.CurvePreferences, |
| } |
| } |
| |
| // cloneTLSClientConfig is like cloneTLSConfig but omits |
| // the fields SessionTicketsDisabled and SessionTicketKey. |
| // This makes it safe to call cloneTLSClientConfig on a config |
| // in active use by a server. |
| func cloneTLSClientConfig(cfg *tls.Config) *tls.Config { |
| if cfg == nil { |
| return &tls.Config{} |
| } |
| return &tls.Config{ |
| Rand: cfg.Rand, |
| Time: cfg.Time, |
| Certificates: cfg.Certificates, |
| NameToCertificate: cfg.NameToCertificate, |
| GetCertificate: cfg.GetCertificate, |
| RootCAs: cfg.RootCAs, |
| NextProtos: cfg.NextProtos, |
| ServerName: cfg.ServerName, |
| ClientAuth: cfg.ClientAuth, |
| ClientCAs: cfg.ClientCAs, |
| InsecureSkipVerify: cfg.InsecureSkipVerify, |
| CipherSuites: cfg.CipherSuites, |
| PreferServerCipherSuites: cfg.PreferServerCipherSuites, |
| ClientSessionCache: cfg.ClientSessionCache, |
| MinVersion: cfg.MinVersion, |
| MaxVersion: cfg.MaxVersion, |
| CurvePreferences: cfg.CurvePreferences, |
| } |
| } |
| ` |