| package main |
| |
| var src = ` |
| // Code generated by golang.org/x/tools/cmd/bundle command: |
| // $ bundle net/http http |
| |
| // Package http provides HTTP client and server implementations. |
| // |
| // Get, Head, Post, and PostForm make HTTP (or HTTPS) requests: |
| // |
| // resp, err := http.Get("http://example.com/") |
| // ... |
| // resp, err := http.Post("http://example.com/upload", "image/jpeg", &buf) |
| // ... |
| // resp, err := http.PostForm("http://example.com/form", |
| // url.Values{"key": {"Value"}, "id": {"123"}}) |
| // |
| // The client must close the response body when finished with it: |
| // |
| // resp, err := http.Get("http://example.com/") |
| // if err != nil { |
| // // handle error |
| // } |
| // defer resp.Body.Close() |
| // body, err := io.ReadAll(resp.Body) |
| // // ... |
| // |
| // For control over HTTP client headers, redirect policy, and other |
| // settings, create a Client: |
| // |
| // client := &http.Client{ |
| // CheckRedirect: redirectPolicyFunc, |
| // } |
| // |
| // resp, err := client.Get("http://example.com") |
| // // ... |
| // |
| // req, err := http.NewRequest("GET", "http://example.com", nil) |
| // // ... |
| // req.Header.Add("If-None-Match", ` + "`" + `W/"wyzzy"` + "`" + `) |
| // resp, err := client.Do(req) |
| // // ... |
| // |
| // For control over proxies, TLS configuration, keep-alives, |
| // compression, and other settings, create a Transport: |
| // |
| // tr := &http.Transport{ |
| // TLSClientConfig: &tls.Config{RootCAs: pool}, |
| // DisableCompression: true, |
| // } |
| // client := &http.Client{Transport: tr} |
| // resp, err := client.Get("https://example.com") |
| // |
| // Clients and Transports are safe for concurrent use by multiple |
| // goroutines and for efficiency should only be created once and re-used. |
| // |
| // ListenAndServe starts an HTTP server with a given address and handler. |
| // The handler is usually nil, which means to use DefaultServeMux. |
| // Handle and HandleFunc add handlers to DefaultServeMux: |
| // |
| // http.Handle("/foo", fooHandler) |
| // |
| // http.HandleFunc("/bar", func(w http.ResponseWriter, r *http.Request) { |
| // fmt.Fprintf(w, "Hello, %q", html.EscapeString(r.URL.Path)) |
| // }) |
| // |
| // log.Fatal(http.ListenAndServe(":8080", nil)) |
| // |
| // More control over the server's behavior is available by creating a |
| // custom Server: |
| // |
| // s := &http.Server{ |
| // Addr: ":8080", |
| // Handler: myHandler, |
| // ReadTimeout: 10 * time.Second, |
| // WriteTimeout: 10 * time.Second, |
| // MaxHeaderBytes: 1 << 20, |
| // } |
| // log.Fatal(s.ListenAndServe()) |
| // |
| package http |
| |
| import ( |
| "bufio" |
| "bytes" |
| "compress/gzip" |
| "crypto/tls" |
| "encoding/base64" |
| "encoding/binary" |
| "errors" |
| "fmt" |
| "io" |
| "log" |
| "mime" |
| "mime/multipart" |
| "net" |
| "net/http/internal" |
| "net/textproto" |
| "net/url" |
| "os" |
| "path" |
| "path/filepath" |
| "runtime" |
| "sort" |
| "strconv" |
| "strings" |
| "sync" |
| "sync/atomic" |
| "time" |
| "unicode/utf8" |
| ) |
| |
| // A Client is an HTTP client. Its zero value (DefaultClient) is a |
| // usable client that uses DefaultTransport. |
| // |
| // The Client's Transport typically has internal state (cached TCP |
| // connections), so Clients should be reused instead of created as |
| // needed. Clients are safe for concurrent use by multiple goroutines. |
| // |
| // A Client is higher-level than a RoundTripper (such as Transport) |
| // and additionally handles HTTP details such as cookies and |
| // redirects. |
| type Client struct { |
| // Transport specifies the mechanism by which individual |
| // HTTP requests are made. |
| // If nil, DefaultTransport is used. |
| Transport RoundTripper |
| |
| // CheckRedirect specifies the policy for handling redirects. |
| // If CheckRedirect is not nil, the client calls it before |
| // following an HTTP redirect. The arguments req and via are |
| // the upcoming request and the requests made already, oldest |
| // first. If CheckRedirect returns an error, the Client's Get |
| // method returns both the previous Response and |
| // CheckRedirect's error (wrapped in a url.Error) instead of |
| // issuing the Request req. |
| // |
| // If CheckRedirect is nil, the Client uses its default policy, |
| // which is to stop after 10 consecutive requests. |
| CheckRedirect func(req *Request, via []*Request) error |
| |
| // Jar specifies the cookie jar. |
| // If Jar is nil, cookies are not sent in requests and ignored |
| // in responses. |
| Jar CookieJar |
| |
| // Timeout specifies a time limit for requests made by this |
| // Client. The timeout includes connection time, any |
| // redirects, and reading the response body. The timer remains |
| // running after Get, Head, Post, or Do return and will |
| // interrupt reading of the Response.Body. |
| // |
| // A Timeout of zero means no timeout. |
| // |
| // The Client's Transport must support the CancelRequest |
| // method or Client will return errors when attempting to make |
| // a request with Get, Head, Post, or Do. Client's default |
| // Transport (DefaultTransport) supports CancelRequest. |
| Timeout time.Duration |
| } |
| |
| // DefaultClient is the default Client and is used by Get, Head, and Post. |
| var DefaultClient = &Client{} |
| |
| // RoundTripper is an interface representing the ability to execute a |
| // single HTTP transaction, obtaining the Response for a given Request. |
| // |
| // A RoundTripper must be safe for concurrent use by multiple |
| // goroutines. |
| type RoundTripper interface { |
| // RoundTrip executes a single HTTP transaction, returning |
| // the Response for the request req. RoundTrip should not |
| // attempt to interpret the response. In particular, |
| // RoundTrip must return err == nil if it obtained a response, |
| // regardless of the response's HTTP status code. A non-nil |
| // err should be reserved for failure to obtain a response. |
| // Similarly, RoundTrip should not attempt to handle |
| // higher-level protocol details such as redirects, |
| // authentication, or cookies. |
| // |
| // RoundTrip should not modify the request, except for |
| // consuming and closing the Body, including on errors. The |
| // request's URL and Header fields are guaranteed to be |
| // initialized. |
| RoundTrip(*Request) (*Response, error) |
| } |
| |
| // Given a string of the form "host", "host:port", or "[ipv6::address]:port", |
| // return true if the string includes a port. |
| func hasPort(s string) bool { return strings.LastIndex(s, ":") > strings.LastIndex(s, "]") } |
| |
| // refererForURL returns a referer without any authentication info or |
| // an empty string if lastReq scheme is https and newReq scheme is http. |
| func refererForURL(lastReq, newReq *url.URL) string { |
| |
| if lastReq.Scheme == "https" && newReq.Scheme == "http" { |
| return "" |
| } |
| referer := lastReq.String() |
| if lastReq.User != nil { |
| |
| auth := lastReq.User.String() + "@" |
| referer = strings.Replace(referer, auth, "", 1) |
| } |
| return referer |
| } |
| |
| // Used in Send to implement io.ReadCloser by bundling together the |
| // bufio.Reader through which we read the response, and the underlying |
| // network connection. |
| type readClose struct { |
| io.Reader |
| io.Closer |
| } |
| |
| func (c *Client) send(req *Request) (*Response, error) { |
| if c.Jar != nil { |
| for _, cookie := range c.Jar.Cookies(req.URL) { |
| req.AddCookie(cookie) |
| } |
| } |
| resp, err := send(req, c.transport()) |
| if err != nil { |
| return nil, err |
| } |
| if c.Jar != nil { |
| if rc := resp.Cookies(); len(rc) > 0 { |
| c.Jar.SetCookies(req.URL, rc) |
| } |
| } |
| return resp, err |
| } |
| |
| // Do sends an HTTP request and returns an HTTP response, following |
| // policy (e.g. redirects, cookies, auth) as configured on the client. |
| // |
| // An error is returned if caused by client policy (such as |
| // CheckRedirect), or if there was an HTTP protocol error. |
| // A non-2xx response doesn't cause an error. |
| // |
| // When err is nil, resp always contains a non-nil resp.Body. |
| // |
| // Callers should close resp.Body when done reading from it. If |
| // resp.Body is not closed, the Client's underlying RoundTripper |
| // (typically Transport) may not be able to re-use a persistent TCP |
| // connection to the server for a subsequent "keep-alive" request. |
| // |
| // The request Body, if non-nil, will be closed by the underlying |
| // Transport, even on errors. |
| // |
| // Generally Get, Post, or PostForm will be used instead of Do. |
| func (c *Client) Do(req *Request) (resp *Response, err error) { |
| if req.Method == "GET" || req.Method == "HEAD" { |
| return c.doFollowingRedirects(req, shouldRedirectGet) |
| } |
| if req.Method == "POST" || req.Method == "PUT" { |
| return c.doFollowingRedirects(req, shouldRedirectPost) |
| } |
| return c.send(req) |
| } |
| |
| func (c *Client) transport() RoundTripper { |
| if c.Transport != nil { |
| return c.Transport |
| } |
| return DefaultTransport |
| } |
| |
| // send issues an HTTP request. |
| // Caller should close resp.Body when done reading from it. |
| func send(req *Request, t RoundTripper) (resp *Response, err error) { |
| if t == nil { |
| req.closeBody() |
| return nil, errors.New("http: no Client.Transport or DefaultTransport") |
| } |
| |
| if req.URL == nil { |
| req.closeBody() |
| return nil, errors.New("http: nil Request.URL") |
| } |
| |
| if req.RequestURI != "" { |
| req.closeBody() |
| return nil, errors.New("http: Request.RequestURI can't be set in client requests.") |
| } |
| |
| if req.Header == nil { |
| req.Header = make(Header) |
| } |
| |
| if u := req.URL.User; u != nil && req.Header.Get("Authorization") == "" { |
| username := u.Username() |
| password, _ := u.Password() |
| req.Header.Set("Authorization", "Basic "+basicAuth(username, password)) |
| } |
| resp, err = t.RoundTrip(req) |
| if err != nil { |
| if resp != nil { |
| log.Printf("RoundTripper returned a response & error; ignoring response") |
| } |
| return nil, err |
| } |
| return resp, nil |
| } |
| |
| // See 2 (end of page 4) http://www.ietf.org/rfc/rfc2617.txt |
| // "To receive authorization, the client sends the userid and password, |
| // separated by a single colon (":") character, within a base64 |
| // encoded string in the credentials." |
| // It is not meant to be urlencoded. |
| func basicAuth(username, password string) string { |
| auth := username + ":" + password |
| return base64.StdEncoding.EncodeToString([]byte(auth)) |
| } |
| |
| // True if the specified HTTP status code is one for which the Get utility should |
| // automatically redirect. |
| func shouldRedirectGet(statusCode int) bool { |
| switch statusCode { |
| case StatusMovedPermanently, StatusFound, StatusSeeOther, StatusTemporaryRedirect: |
| return true |
| } |
| return false |
| } |
| |
| // True if the specified HTTP status code is one for which the Post utility should |
| // automatically redirect. |
| func shouldRedirectPost(statusCode int) bool { |
| switch statusCode { |
| case StatusFound, StatusSeeOther: |
| return true |
| } |
| return false |
| } |
| |
| // Get issues a GET to the specified URL. If the response is one of |
| // the following redirect codes, Get follows the redirect, up to a |
| // maximum of 10 redirects: |
| // |
| // 301 (Moved Permanently) |
| // 302 (Found) |
| // 303 (See Other) |
| // 307 (Temporary Redirect) |
| // |
| // An error is returned if there were too many redirects or if there |
| // was an HTTP protocol error. A non-2xx response doesn't cause an |
| // error. |
| // |
| // When err is nil, resp always contains a non-nil resp.Body. |
| // Caller should close resp.Body when done reading from it. |
| // |
| // Get is a wrapper around DefaultClient.Get. |
| // |
| // To make a request with custom headers, use NewRequest and |
| // DefaultClient.Do. |
| func Get(url string) (resp *Response, err error) { |
| return DefaultClient.Get(url) |
| } |
| |
| // Get issues a GET to the specified URL. If the response is one of the |
| // following redirect codes, Get follows the redirect after calling the |
| // Client's CheckRedirect function: |
| // |
| // 301 (Moved Permanently) |
| // 302 (Found) |
| // 303 (See Other) |
| // 307 (Temporary Redirect) |
| // |
| // An error is returned if the Client's CheckRedirect function fails |
| // or if there was an HTTP protocol error. A non-2xx response doesn't |
| // cause an error. |
| // |
| // When err is nil, resp always contains a non-nil resp.Body. |
| // Caller should close resp.Body when done reading from it. |
| // |
| // To make a request with custom headers, use NewRequest and Client.Do. |
| func (c *Client) Get(url string) (resp *Response, err error) { |
| req, err := NewRequest("GET", url, nil) |
| if err != nil { |
| return nil, err |
| } |
| return c.doFollowingRedirects(req, shouldRedirectGet) |
| } |
| |
| func alwaysFalse() bool { return false } |
| |
| func (c *Client) doFollowingRedirects(ireq *Request, shouldRedirect func(int) bool) (resp *Response, err error) { |
| var base *url.URL |
| redirectChecker := c.CheckRedirect |
| if redirectChecker == nil { |
| redirectChecker = defaultCheckRedirect |
| } |
| var via []*Request |
| |
| if ireq.URL == nil { |
| ireq.closeBody() |
| return nil, errors.New("http: nil Request.URL") |
| } |
| |
| var reqmu sync.Mutex // guards req |
| req := ireq |
| |
| var timer *time.Timer |
| var atomicWasCanceled int32 // atomic bool (1 or 0) |
| var wasCanceled = alwaysFalse |
| if c.Timeout > 0 { |
| wasCanceled = func() bool { return atomic.LoadInt32(&atomicWasCanceled) != 0 } |
| type canceler interface { |
| CancelRequest(*Request) |
| } |
| tr, ok := c.transport().(canceler) |
| if !ok { |
| return nil, fmt.Errorf("net/http: Client Transport of type %T doesn't support CancelRequest; Timeout not supported", c.transport()) |
| } |
| timer = time.AfterFunc(c.Timeout, func() { |
| atomic.StoreInt32(&atomicWasCanceled, 1) |
| reqmu.Lock() |
| defer reqmu.Unlock() |
| tr.CancelRequest(req) |
| }) |
| } |
| |
| urlStr := "" |
| redirectFailed := false |
| for redirect := 0; ; redirect++ { |
| if redirect != 0 { |
| nreq := new(Request) |
| nreq.Method = ireq.Method |
| if ireq.Method == "POST" || ireq.Method == "PUT" { |
| nreq.Method = "GET" |
| } |
| nreq.Header = make(Header) |
| nreq.URL, err = base.Parse(urlStr) |
| if err != nil { |
| break |
| } |
| if len(via) > 0 { |
| |
| lastReq := via[len(via)-1] |
| if ref := refererForURL(lastReq.URL, nreq.URL); ref != "" { |
| nreq.Header.Set("Referer", ref) |
| } |
| |
| err = redirectChecker(nreq, via) |
| if err != nil { |
| redirectFailed = true |
| break |
| } |
| } |
| reqmu.Lock() |
| req = nreq |
| reqmu.Unlock() |
| } |
| |
| urlStr = req.URL.String() |
| if resp, err = c.send(req); err != nil { |
| if wasCanceled() { |
| err = &httpError{ |
| err: err.Error() + " (Client.Timeout exceeded while awaiting headers)", |
| timeout: true, |
| } |
| } |
| break |
| } |
| |
| if shouldRedirect(resp.StatusCode) { |
| // Read the body if small so underlying TCP connection will be re-used. |
| // No need to check for errors: if it fails, Transport won't reuse it anyway. |
| const maxBodySlurpSize = 2 << 10 |
| if resp.ContentLength == -1 || resp.ContentLength <= maxBodySlurpSize { |
| io.CopyN(io.Discard, resp.Body, maxBodySlurpSize) |
| } |
| resp.Body.Close() |
| if urlStr = resp.Header.Get("Location"); urlStr == "" { |
| err = fmt.Errorf("%d response missing Location header", resp.StatusCode) |
| break |
| } |
| base = req.URL |
| via = append(via, req) |
| continue |
| } |
| if timer != nil { |
| resp.Body = &cancelTimerBody{ |
| t: timer, |
| rc: resp.Body, |
| reqWasCanceled: wasCanceled, |
| } |
| } |
| return resp, nil |
| } |
| |
| method := ireq.Method |
| urlErr := &url.Error{ |
| Op: method[0:1] + strings.ToLower(method[1:]), |
| URL: urlStr, |
| Err: err, |
| } |
| |
| if redirectFailed { |
| |
| return resp, urlErr |
| } |
| |
| if resp != nil { |
| resp.Body.Close() |
| } |
| return nil, urlErr |
| } |
| |
| func defaultCheckRedirect(req *Request, via []*Request) error { |
| if len(via) >= 10 { |
| return errors.New("stopped after 10 redirects") |
| } |
| return nil |
| } |
| |
| // Post issues a POST to the specified URL. |
| // |
| // Caller should close resp.Body when done reading from it. |
| // |
| // If the provided body is an io.Closer, it is closed after the |
| // request. |
| // |
| // Post is a wrapper around DefaultClient.Post. |
| // |
| // To set custom headers, use NewRequest and DefaultClient.Do. |
| func Post(url string, bodyType string, body io.Reader) (resp *Response, err error) { |
| return DefaultClient.Post(url, bodyType, body) |
| } |
| |
| // Post issues a POST to the specified URL. |
| // |
| // Caller should close resp.Body when done reading from it. |
| // |
| // If the provided body is an io.Closer, it is closed after the |
| // request. |
| // |
| // To set custom headers, use NewRequest and Client.Do. |
| func (c *Client) Post(url string, bodyType string, body io.Reader) (resp *Response, err error) { |
| req, err := NewRequest("POST", url, body) |
| if err != nil { |
| return nil, err |
| } |
| req.Header.Set("Content-Type", bodyType) |
| return c.doFollowingRedirects(req, shouldRedirectPost) |
| } |
| |
| // PostForm issues a POST to the specified URL, with data's keys and |
| // values URL-encoded as the request body. |
| // |
| // The Content-Type header is set to application/x-www-form-urlencoded. |
| // To set other headers, use NewRequest and DefaultClient.Do. |
| // |
| // When err is nil, resp always contains a non-nil resp.Body. |
| // Caller should close resp.Body when done reading from it. |
| // |
| // PostForm is a wrapper around DefaultClient.PostForm. |
| func PostForm(url string, data url.Values) (resp *Response, err error) { |
| return DefaultClient.PostForm(url, data) |
| } |
| |
| // PostForm issues a POST to the specified URL, |
| // with data's keys and values URL-encoded as the request body. |
| // |
| // The Content-Type header is set to application/x-www-form-urlencoded. |
| // To set other headers, use NewRequest and DefaultClient.Do. |
| // |
| // When err is nil, resp always contains a non-nil resp.Body. |
| // Caller should close resp.Body when done reading from it. |
| func (c *Client) PostForm(url string, data url.Values) (resp *Response, err error) { |
| return c.Post(url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode())) |
| } |
| |
| // Head issues a HEAD to the specified URL. If the response is one of |
| // the following redirect codes, Head follows the redirect, up to a |
| // maximum of 10 redirects: |
| // |
| // 301 (Moved Permanently) |
| // 302 (Found) |
| // 303 (See Other) |
| // 307 (Temporary Redirect) |
| // |
| // Head is a wrapper around DefaultClient.Head |
| func Head(url string) (resp *Response, err error) { |
| return DefaultClient.Head(url) |
| } |
| |
| // Head issues a HEAD to the specified URL. If the response is one of the |
| // following redirect codes, Head follows the redirect after calling the |
| // Client's CheckRedirect function: |
| // |
| // 301 (Moved Permanently) |
| // 302 (Found) |
| // 303 (See Other) |
| // 307 (Temporary Redirect) |
| func (c *Client) Head(url string) (resp *Response, err error) { |
| req, err := NewRequest("HEAD", url, nil) |
| if err != nil { |
| return nil, err |
| } |
| return c.doFollowingRedirects(req, shouldRedirectGet) |
| } |
| |
| // cancelTimerBody is an io.ReadCloser that wraps rc with two features: |
| // 1) on Read EOF or Close, the timer t is Stopped, |
| // 2) On Read failure, if reqWasCanceled is true, the error is wrapped and |
| // marked as net.Error that hit its timeout. |
| type cancelTimerBody struct { |
| t *time.Timer |
| rc io.ReadCloser |
| reqWasCanceled func() bool |
| } |
| |
| func (b *cancelTimerBody) Read(p []byte) (n int, err error) { |
| n, err = b.rc.Read(p) |
| if err == io.EOF { |
| b.t.Stop() |
| } else if err != nil && b.reqWasCanceled() { |
| return n, &httpError{ |
| err: err.Error() + " (Client.Timeout exceeded while reading body)", |
| timeout: true, |
| } |
| } |
| return |
| } |
| |
| func (b *cancelTimerBody) Close() error { |
| err := b.rc.Close() |
| b.t.Stop() |
| return err |
| } |
| |
| // A Cookie represents an HTTP cookie as sent in the Set-Cookie header of an |
| // HTTP response or the Cookie header of an HTTP request. |
| // |
| // See http://tools.ietf.org/html/rfc6265 for details. |
| type Cookie struct { |
| Name string |
| Value string |
| |
| Path string // optional |
| Domain string // optional |
| Expires time.Time // optional |
| RawExpires string // for reading cookies only |
| |
| // MaxAge=0 means no 'Max-Age' attribute specified. |
| // MaxAge<0 means delete cookie now, equivalently 'Max-Age: 0' |
| // MaxAge>0 means Max-Age attribute present and given in seconds |
| MaxAge int |
| Secure bool |
| HttpOnly bool |
| Raw string |
| Unparsed []string // Raw text of unparsed attribute-value pairs |
| } |
| |
| // readSetCookies parses all "Set-Cookie" values from |
| // the header h and returns the successfully parsed Cookies. |
| func readSetCookies(h Header) []*Cookie { |
| cookies := []*Cookie{} |
| for _, line := range h["Set-Cookie"] { |
| parts := strings.Split(strings.TrimSpace(line), ";") |
| if len(parts) == 1 && parts[0] == "" { |
| continue |
| } |
| parts[0] = strings.TrimSpace(parts[0]) |
| j := strings.Index(parts[0], "=") |
| if j < 0 { |
| continue |
| } |
| name, value := parts[0][:j], parts[0][j+1:] |
| if !isCookieNameValid(name) { |
| continue |
| } |
| value, success := parseCookieValue(value, true) |
| if !success { |
| continue |
| } |
| c := &Cookie{ |
| Name: name, |
| Value: value, |
| Raw: line, |
| } |
| for i := 1; i < len(parts); i++ { |
| parts[i] = strings.TrimSpace(parts[i]) |
| if len(parts[i]) == 0 { |
| continue |
| } |
| |
| attr, val := parts[i], "" |
| if j := strings.Index(attr, "="); j >= 0 { |
| attr, val = attr[:j], attr[j+1:] |
| } |
| lowerAttr := strings.ToLower(attr) |
| val, success = parseCookieValue(val, false) |
| if !success { |
| c.Unparsed = append(c.Unparsed, parts[i]) |
| continue |
| } |
| switch lowerAttr { |
| case "secure": |
| c.Secure = true |
| continue |
| case "httponly": |
| c.HttpOnly = true |
| continue |
| case "domain": |
| c.Domain = val |
| continue |
| case "max-age": |
| secs, err := strconv.Atoi(val) |
| if err != nil || secs != 0 && val[0] == '0' { |
| break |
| } |
| if secs <= 0 { |
| c.MaxAge = -1 |
| } else { |
| c.MaxAge = secs |
| } |
| continue |
| case "expires": |
| c.RawExpires = val |
| exptime, err := time.Parse(time.RFC1123, val) |
| if err != nil { |
| exptime, err = time.Parse("Mon, 02-Jan-2006 15:04:05 MST", val) |
| if err != nil { |
| c.Expires = time.Time{} |
| break |
| } |
| } |
| c.Expires = exptime.UTC() |
| continue |
| case "path": |
| c.Path = val |
| continue |
| } |
| c.Unparsed = append(c.Unparsed, parts[i]) |
| } |
| cookies = append(cookies, c) |
| } |
| return cookies |
| } |
| |
| // SetCookie adds a Set-Cookie header to the provided ResponseWriter's headers. |
| // The provided cookie must have a valid Name. Invalid cookies may be |
| // silently dropped. |
| func SetCookie(w ResponseWriter, cookie *Cookie) { |
| if v := cookie.String(); v != "" { |
| w.Header().Add("Set-Cookie", v) |
| } |
| } |
| |
| // String returns the serialization of the cookie for use in a Cookie |
| // header (if only Name and Value are set) or a Set-Cookie response |
| // header (if other fields are set). |
| // If c is nil or c.Name is invalid, the empty string is returned. |
| func (c *Cookie) String() string { |
| if c == nil || !isCookieNameValid(c.Name) { |
| return "" |
| } |
| var b bytes.Buffer |
| fmt.Fprintf(&b, "%s=%s", sanitizeCookieName(c.Name), sanitizeCookieValue(c.Value)) |
| if len(c.Path) > 0 { |
| fmt.Fprintf(&b, "; Path=%s", sanitizeCookiePath(c.Path)) |
| } |
| if len(c.Domain) > 0 { |
| if validCookieDomain(c.Domain) { |
| |
| d := c.Domain |
| if d[0] == '.' { |
| d = d[1:] |
| } |
| fmt.Fprintf(&b, "; Domain=%s", d) |
| } else { |
| log.Printf("net/http: invalid Cookie.Domain %q; dropping domain attribute", |
| c.Domain) |
| } |
| } |
| if c.Expires.Unix() > 0 { |
| fmt.Fprintf(&b, "; Expires=%s", c.Expires.UTC().Format(TimeFormat)) |
| } |
| if c.MaxAge > 0 { |
| fmt.Fprintf(&b, "; Max-Age=%d", c.MaxAge) |
| } else if c.MaxAge < 0 { |
| fmt.Fprintf(&b, "; Max-Age=0") |
| } |
| if c.HttpOnly { |
| fmt.Fprintf(&b, "; HttpOnly") |
| } |
| if c.Secure { |
| fmt.Fprintf(&b, "; Secure") |
| } |
| return b.String() |
| } |
| |
| // readCookies parses all "Cookie" values from the header h and |
| // returns the successfully parsed Cookies. |
| // |
| // if filter isn't empty, only cookies of that name are returned |
| func readCookies(h Header, filter string) []*Cookie { |
| cookies := []*Cookie{} |
| lines, ok := h["Cookie"] |
| if !ok { |
| return cookies |
| } |
| |
| for _, line := range lines { |
| parts := strings.Split(strings.TrimSpace(line), ";") |
| if len(parts) == 1 && parts[0] == "" { |
| continue |
| } |
| |
| parsedPairs := 0 |
| for i := 0; i < len(parts); i++ { |
| parts[i] = strings.TrimSpace(parts[i]) |
| if len(parts[i]) == 0 { |
| continue |
| } |
| name, val := parts[i], "" |
| if j := strings.Index(name, "="); j >= 0 { |
| name, val = name[:j], name[j+1:] |
| } |
| if !isCookieNameValid(name) { |
| continue |
| } |
| if filter != "" && filter != name { |
| continue |
| } |
| val, success := parseCookieValue(val, true) |
| if !success { |
| continue |
| } |
| cookies = append(cookies, &Cookie{Name: name, Value: val}) |
| parsedPairs++ |
| } |
| } |
| return cookies |
| } |
| |
| // validCookieDomain returns whether v is a valid cookie domain-value. |
| func validCookieDomain(v string) bool { |
| if isCookieDomainName(v) { |
| return true |
| } |
| if net.ParseIP(v) != nil && !strings.Contains(v, ":") { |
| return true |
| } |
| return false |
| } |
| |
| // isCookieDomainName returns whether s is a valid domain name or a valid |
| // domain name with a leading dot '.'. It is almost a direct copy of |
| // package net's isDomainName. |
| func isCookieDomainName(s string) bool { |
| if len(s) == 0 { |
| return false |
| } |
| if len(s) > 255 { |
| return false |
| } |
| |
| if s[0] == '.' { |
| |
| s = s[1:] |
| } |
| last := byte('.') |
| ok := false |
| partlen := 0 |
| for i := 0; i < len(s); i++ { |
| c := s[i] |
| switch { |
| default: |
| return false |
| case 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z': |
| |
| ok = true |
| partlen++ |
| case '0' <= c && c <= '9': |
| |
| partlen++ |
| case c == '-': |
| |
| if last == '.' { |
| return false |
| } |
| partlen++ |
| case c == '.': |
| |
| if last == '.' || last == '-' { |
| return false |
| } |
| if partlen > 63 || partlen == 0 { |
| return false |
| } |
| partlen = 0 |
| } |
| last = c |
| } |
| if last == '-' || partlen > 63 { |
| return false |
| } |
| |
| return ok |
| } |
| |
| var cookieNameSanitizer = strings.NewReplacer("\n", "-", "\r", "-") |
| |
| func sanitizeCookieName(n string) string { |
| return cookieNameSanitizer.Replace(n) |
| } |
| |
| // http://tools.ietf.org/html/rfc6265#section-4.1.1 |
| // cookie-value = *cookie-octet / ( DQUOTE *cookie-octet DQUOTE ) |
| // cookie-octet = %x21 / %x23-2B / %x2D-3A / %x3C-5B / %x5D-7E |
| // ; US-ASCII characters excluding CTLs, |
| // ; whitespace DQUOTE, comma, semicolon, |
| // ; and backslash |
| // We loosen this as spaces and commas are common in cookie values |
| // but we produce a quoted cookie-value in when value starts or ends |
| // with a comma or space. |
| // See https://golang.org/issue/7243 for the discussion. |
| func sanitizeCookieValue(v string) string { |
| v = sanitizeOrWarn("Cookie.Value", validCookieValueByte, v) |
| if len(v) == 0 { |
| return v |
| } |
| if v[0] == ' ' || v[0] == ',' || v[len(v)-1] == ' ' || v[len(v)-1] == ',' { |
| return ` + "`" + `"` + "`" + ` + v + ` + "`" + `"` + "`" + ` |
| } |
| return v |
| } |
| |
| func validCookieValueByte(b byte) bool { |
| return 0x20 <= b && b < 0x7f && b != '"' && b != ';' && b != '\\' |
| } |
| |
| // path-av = "Path=" path-value |
| // path-value = <any CHAR except CTLs or ";"> |
| func sanitizeCookiePath(v string) string { |
| return sanitizeOrWarn("Cookie.Path", validCookiePathByte, v) |
| } |
| |
| func validCookiePathByte(b byte) bool { |
| return 0x20 <= b && b < 0x7f && b != ';' |
| } |
| |
| func sanitizeOrWarn(fieldName string, valid func(byte) bool, v string) string { |
| ok := true |
| for i := 0; i < len(v); i++ { |
| if valid(v[i]) { |
| continue |
| } |
| log.Printf("net/http: invalid byte %q in %s; dropping invalid bytes", v[i], fieldName) |
| ok = false |
| break |
| } |
| if ok { |
| return v |
| } |
| buf := make([]byte, 0, len(v)) |
| for i := 0; i < len(v); i++ { |
| if b := v[i]; valid(b) { |
| buf = append(buf, b) |
| } |
| } |
| return string(buf) |
| } |
| |
| func parseCookieValue(raw string, allowDoubleQuote bool) (string, bool) { |
| |
| if allowDoubleQuote && len(raw) > 1 && raw[0] == '"' && raw[len(raw)-1] == '"' { |
| raw = raw[1 : len(raw)-1] |
| } |
| for i := 0; i < len(raw); i++ { |
| if !validCookieValueByte(raw[i]) { |
| return "", false |
| } |
| } |
| return raw, true |
| } |
| |
| func isCookieNameValid(raw string) bool { |
| if raw == "" { |
| return false |
| } |
| return strings.IndexFunc(raw, isNotToken) < 0 |
| } |
| |
| // fileTransport implements RoundTripper for the 'file' protocol. |
| type fileTransport struct { |
| fh fileHandler |
| } |
| |
| // NewFileTransport returns a new RoundTripper, serving the provided |
| // FileSystem. The returned RoundTripper ignores the URL host in its |
| // incoming requests, as well as most other properties of the |
| // request. |
| // |
| // The typical use case for NewFileTransport is to register the "file" |
| // protocol with a Transport, as in: |
| // |
| // t := &http.Transport{} |
| // t.RegisterProtocol("file", http.NewFileTransport(http.Dir("/"))) |
| // c := &http.Client{Transport: t} |
| // res, err := c.Get("file:///etc/passwd") |
| // ... |
| func NewFileTransport(fs FileSystem) RoundTripper { |
| return fileTransport{fileHandler{fs}} |
| } |
| |
| func (t fileTransport) RoundTrip(req *Request) (resp *Response, err error) { |
| |
| rw, resc := newPopulateResponseWriter() |
| go func() { |
| t.fh.ServeHTTP(rw, req) |
| rw.finish() |
| }() |
| return <-resc, nil |
| } |
| |
| func newPopulateResponseWriter() (*populateResponse, <-chan *Response) { |
| pr, pw := io.Pipe() |
| rw := &populateResponse{ |
| ch: make(chan *Response), |
| pw: pw, |
| res: &Response{ |
| Proto: "HTTP/1.0", |
| ProtoMajor: 1, |
| Header: make(Header), |
| Close: true, |
| Body: pr, |
| }, |
| } |
| return rw, rw.ch |
| } |
| |
| // populateResponse is a ResponseWriter that populates the *Response |
| // in res, and writes its body to a pipe connected to the response |
| // body. Once writes begin or finish() is called, the response is sent |
| // on ch. |
| type populateResponse struct { |
| res *Response |
| ch chan *Response |
| wroteHeader bool |
| hasContent bool |
| sentResponse bool |
| pw *io.PipeWriter |
| } |
| |
| func (pr *populateResponse) finish() { |
| if !pr.wroteHeader { |
| pr.WriteHeader(500) |
| } |
| if !pr.sentResponse { |
| pr.sendResponse() |
| } |
| pr.pw.Close() |
| } |
| |
| func (pr *populateResponse) sendResponse() { |
| if pr.sentResponse { |
| return |
| } |
| pr.sentResponse = true |
| |
| if pr.hasContent { |
| pr.res.ContentLength = -1 |
| } |
| pr.ch <- pr.res |
| } |
| |
| func (pr *populateResponse) Header() Header { |
| return pr.res.Header |
| } |
| |
| func (pr *populateResponse) WriteHeader(code int) { |
| if pr.wroteHeader { |
| return |
| } |
| pr.wroteHeader = true |
| |
| pr.res.StatusCode = code |
| pr.res.Status = fmt.Sprintf("%d %s", code, StatusText(code)) |
| } |
| |
| func (pr *populateResponse) Write(p []byte) (n int, err error) { |
| if !pr.wroteHeader { |
| pr.WriteHeader(StatusOK) |
| } |
| pr.hasContent = true |
| if !pr.sentResponse { |
| pr.sendResponse() |
| } |
| return pr.pw.Write(p) |
| } |
| |
| // A Dir implements FileSystem using the native file system restricted to a |
| // specific directory tree. |
| // |
| // While the FileSystem.Open method takes '/'-separated paths, a Dir's string |
| // value is a filename on the native file system, not a URL, so it is separated |
| // by filepath.Separator, which isn't necessarily '/'. |
| // |
| // An empty Dir is treated as ".". |
| type Dir string |
| |
| func (d Dir) Open(name string) (File, error) { |
| if filepath.Separator != '/' && strings.IndexRune(name, filepath.Separator) >= 0 || |
| strings.Contains(name, "\x00") { |
| return nil, errors.New("http: invalid character in file path") |
| } |
| dir := string(d) |
| if dir == "" { |
| dir = "." |
| } |
| f, err := os.Open(filepath.Join(dir, filepath.FromSlash(path.Clean("/"+name)))) |
| if err != nil { |
| return nil, err |
| } |
| return f, nil |
| } |
| |
| // A FileSystem implements access to a collection of named files. |
| // The elements in a file path are separated by slash ('/', U+002F) |
| // characters, regardless of host operating system convention. |
| type FileSystem interface { |
| Open(name string) (File, error) |
| } |
| |
| // A File is returned by a FileSystem's Open method and can be |
| // served by the FileServer implementation. |
| // |
| // The methods should behave the same as those on an *os.File. |
| type File interface { |
| io.Closer |
| io.Reader |
| Readdir(count int) ([]os.FileInfo, error) |
| Seek(offset int64, whence int) (int64, error) |
| Stat() (os.FileInfo, error) |
| } |
| |
| func dirList(w ResponseWriter, f File) { |
| w.Header().Set("Content-Type", "text/html; charset=utf-8") |
| fmt.Fprintf(w, "<pre>\n") |
| for { |
| dirs, err := f.Readdir(100) |
| if err != nil || len(dirs) == 0 { |
| break |
| } |
| for _, d := range dirs { |
| name := d.Name() |
| if d.IsDir() { |
| name += "/" |
| } |
| |
| url := url.URL{Path: name} |
| fmt.Fprintf(w, "<a href=\"%s\">%s</a>\n", url.String(), htmlReplacer.Replace(name)) |
| } |
| } |
| fmt.Fprintf(w, "</pre>\n") |
| } |
| |
| // ServeContent replies to the request using the content in the |
| // provided ReadSeeker. The main benefit of ServeContent over io.Copy |
| // is that it handles Range requests properly, sets the MIME type, and |
| // handles If-Modified-Since requests. |
| // |
| // If the response's Content-Type header is not set, ServeContent |
| // first tries to deduce the type from name's file extension and, |
| // if that fails, falls back to reading the first block of the content |
| // and passing it to DetectContentType. |
| // The name is otherwise unused; in particular it can be empty and is |
| // never sent in the response. |
| // |
| // If modtime is not the zero time or Unix epoch, ServeContent |
| // includes it in a Last-Modified header in the response. If the |
| // request includes an If-Modified-Since header, ServeContent uses |
| // modtime to decide whether the content needs to be sent at all. |
| // |
| // The content's Seek method must work: ServeContent uses |
| // a seek to the end of the content to determine its size. |
| // |
| // If the caller has set w's ETag header, ServeContent uses it to |
| // handle requests using If-Range and If-None-Match. |
| // |
| // Note that *os.File implements the io.ReadSeeker interface. |
| func ServeContent(w ResponseWriter, req *Request, name string, modtime time.Time, content io.ReadSeeker) { |
| sizeFunc := func() (int64, error) { |
| size, err := content.Seek(0, os.SEEK_END) |
| if err != nil { |
| return 0, errSeeker |
| } |
| _, err = content.Seek(0, os.SEEK_SET) |
| if err != nil { |
| return 0, errSeeker |
| } |
| return size, nil |
| } |
| serveContent(w, req, name, modtime, sizeFunc, content) |
| } |
| |
| // errSeeker is returned by ServeContent's sizeFunc when the content |
| // doesn't seek properly. The underlying Seeker's error text isn't |
| // included in the sizeFunc reply so it's not sent over HTTP to end |
| // users. |
| var errSeeker = errors.New("seeker can't seek") |
| |
| // if name is empty, filename is unknown. (used for mime type, before sniffing) |
| // if modtime.IsZero(), modtime is unknown. |
| // content must be seeked to the beginning of the file. |
| // The sizeFunc is called at most once. Its error, if any, is sent in the HTTP response. |
| func serveContent(w ResponseWriter, r *Request, name string, modtime time.Time, sizeFunc func() (int64, error), content io.ReadSeeker) { |
| if checkLastModified(w, r, modtime) { |
| return |
| } |
| rangeReq, done := checkETag(w, r, modtime) |
| if done { |
| return |
| } |
| |
| code := StatusOK |
| |
| ctypes, haveType := w.Header()["Content-Type"] |
| var ctype string |
| if !haveType { |
| ctype = mime.TypeByExtension(filepath.Ext(name)) |
| if ctype == "" { |
| // read a chunk to decide between utf-8 text and binary |
| var buf [sniffLen]byte |
| n, _ := io.ReadFull(content, buf[:]) |
| ctype = DetectContentType(buf[:n]) |
| _, err := content.Seek(0, os.SEEK_SET) |
| if err != nil { |
| Error(w, "seeker can't seek", StatusInternalServerError) |
| return |
| } |
| } |
| w.Header().Set("Content-Type", ctype) |
| } else if len(ctypes) > 0 { |
| ctype = ctypes[0] |
| } |
| |
| size, err := sizeFunc() |
| if err != nil { |
| Error(w, err.Error(), StatusInternalServerError) |
| return |
| } |
| |
| sendSize := size |
| var sendContent io.Reader = content |
| if size >= 0 { |
| ranges, err := parseRange(rangeReq, size) |
| if err != nil { |
| Error(w, err.Error(), StatusRequestedRangeNotSatisfiable) |
| return |
| } |
| if sumRangesSize(ranges) > size { |
| |
| ranges = nil |
| } |
| switch { |
| case len(ranges) == 1: |
| |
| ra := ranges[0] |
| if _, err := content.Seek(ra.start, os.SEEK_SET); err != nil { |
| Error(w, err.Error(), StatusRequestedRangeNotSatisfiable) |
| return |
| } |
| sendSize = ra.length |
| code = StatusPartialContent |
| w.Header().Set("Content-Range", ra.contentRange(size)) |
| case len(ranges) > 1: |
| sendSize = rangesMIMESize(ranges, ctype, size) |
| code = StatusPartialContent |
| |
| pr, pw := io.Pipe() |
| mw := multipart.NewWriter(pw) |
| w.Header().Set("Content-Type", "multipart/byteranges; boundary="+mw.Boundary()) |
| sendContent = pr |
| defer pr.Close() |
| go func() { |
| for _, ra := range ranges { |
| part, err := mw.CreatePart(ra.mimeHeader(ctype, size)) |
| if err != nil { |
| pw.CloseWithError(err) |
| return |
| } |
| if _, err := content.Seek(ra.start, os.SEEK_SET); err != nil { |
| pw.CloseWithError(err) |
| return |
| } |
| if _, err := io.CopyN(part, content, ra.length); err != nil { |
| pw.CloseWithError(err) |
| return |
| } |
| } |
| mw.Close() |
| pw.Close() |
| }() |
| } |
| |
| w.Header().Set("Accept-Ranges", "bytes") |
| if w.Header().Get("Content-Encoding") == "" { |
| w.Header().Set("Content-Length", strconv.FormatInt(sendSize, 10)) |
| } |
| } |
| |
| w.WriteHeader(code) |
| |
| if r.Method != "HEAD" { |
| io.CopyN(w, sendContent, sendSize) |
| } |
| } |
| |
| var unixEpochTime = time.Unix(0, 0) |
| |
| // modtime is the modification time of the resource to be served, or IsZero(). |
| // return value is whether this request is now complete. |
| func checkLastModified(w ResponseWriter, r *Request, modtime time.Time) bool { |
| if modtime.IsZero() || modtime.Equal(unixEpochTime) { |
| |
| return false |
| } |
| |
| if t, err := time.Parse(TimeFormat, r.Header.Get("If-Modified-Since")); err == nil && modtime.Before(t.Add(1*time.Second)) { |
| h := w.Header() |
| delete(h, "Content-Type") |
| delete(h, "Content-Length") |
| w.WriteHeader(StatusNotModified) |
| return true |
| } |
| w.Header().Set("Last-Modified", modtime.UTC().Format(TimeFormat)) |
| return false |
| } |
| |
| // checkETag implements If-None-Match and If-Range checks. |
| // |
| // The ETag or modtime must have been previously set in the |
| // ResponseWriter's headers. The modtime is only compared at second |
| // granularity and may be the zero value to mean unknown. |
| // |
| // The return value is the effective request "Range" header to use and |
| // whether this request is now considered done. |
| func checkETag(w ResponseWriter, r *Request, modtime time.Time) (rangeReq string, done bool) { |
| etag := w.Header().get("Etag") |
| rangeReq = r.Header.get("Range") |
| |
| if ir := r.Header.get("If-Range"); ir != "" && ir != etag { |
| |
| timeMatches := false |
| if !modtime.IsZero() { |
| if t, err := ParseTime(ir); err == nil && t.Unix() == modtime.Unix() { |
| timeMatches = true |
| } |
| } |
| if !timeMatches { |
| rangeReq = "" |
| } |
| } |
| |
| if inm := r.Header.get("If-None-Match"); inm != "" { |
| |
| if etag == "" { |
| return rangeReq, false |
| } |
| |
| if r.Method != "GET" && r.Method != "HEAD" { |
| return rangeReq, false |
| } |
| |
| if inm == etag || inm == "*" { |
| h := w.Header() |
| delete(h, "Content-Type") |
| delete(h, "Content-Length") |
| w.WriteHeader(StatusNotModified) |
| return "", true |
| } |
| } |
| return rangeReq, false |
| } |
| |
| // name is '/'-separated, not filepath.Separator. |
| func serveFile(w ResponseWriter, r *Request, fs FileSystem, name string, redirect bool) { |
| const indexPage = "/index.html" |
| |
| if strings.HasSuffix(r.URL.Path, indexPage) { |
| localRedirect(w, r, "./") |
| return |
| } |
| |
| f, err := fs.Open(name) |
| if err != nil { |
| msg, code := toHTTPError(err) |
| Error(w, msg, code) |
| return |
| } |
| defer f.Close() |
| |
| d, err1 := f.Stat() |
| if err1 != nil { |
| msg, code := toHTTPError(err) |
| Error(w, msg, code) |
| return |
| } |
| |
| if redirect { |
| |
| url := r.URL.Path |
| if d.IsDir() { |
| if url[len(url)-1] != '/' { |
| localRedirect(w, r, path.Base(url)+"/") |
| return |
| } |
| } else { |
| if url[len(url)-1] == '/' { |
| localRedirect(w, r, "../"+path.Base(url)) |
| return |
| } |
| } |
| } |
| |
| if d.IsDir() { |
| index := strings.TrimSuffix(name, "/") + indexPage |
| ff, err := fs.Open(index) |
| if err == nil { |
| defer ff.Close() |
| dd, err := ff.Stat() |
| if err == nil { |
| name = index |
| d = dd |
| f = ff |
| } |
| } |
| } |
| |
| if d.IsDir() { |
| if checkLastModified(w, r, d.ModTime()) { |
| return |
| } |
| dirList(w, f) |
| return |
| } |
| |
| sizeFunc := func() (int64, error) { return d.Size(), nil } |
| serveContent(w, r, d.Name(), d.ModTime(), sizeFunc, f) |
| } |
| |
| // toHTTPError returns a non-specific HTTP error message and status code |
| // for a given non-nil error value. It's important that toHTTPError does not |
| // actually return err.Error(), since msg and httpStatus are returned to users, |
| // and historically Go's ServeContent always returned just "404 Not Found" for |
| // all errors. We don't want to start leaking information in error messages. |
| func toHTTPError(err error) (msg string, httpStatus int) { |
| if os.IsNotExist(err) { |
| return "404 page not found", StatusNotFound |
| } |
| if os.IsPermission(err) { |
| return "403 Forbidden", StatusForbidden |
| } |
| |
| return "500 Internal Server Error", StatusInternalServerError |
| } |
| |
| // localRedirect gives a Moved Permanently response. |
| // It does not convert relative paths to absolute paths like Redirect does. |
| func localRedirect(w ResponseWriter, r *Request, newPath string) { |
| if q := r.URL.RawQuery; q != "" { |
| newPath += "?" + q |
| } |
| w.Header().Set("Location", newPath) |
| w.WriteHeader(StatusMovedPermanently) |
| } |
| |
| // ServeFile replies to the request with the contents of the named |
| // file or directory. |
| // |
| // As a special case, ServeFile redirects any request where r.URL.Path |
| // ends in "/index.html" to the same path, without the final |
| // "index.html". To avoid such redirects either modify the path or |
| // use ServeContent. |
| func ServeFile(w ResponseWriter, r *Request, name string) { |
| dir, file := filepath.Split(name) |
| serveFile(w, r, Dir(dir), file, false) |
| } |
| |
| type fileHandler struct { |
| root FileSystem |
| } |
| |
| // FileServer returns a handler that serves HTTP requests |
| // with the contents of the file system rooted at root. |
| // |
| // To use the operating system's file system implementation, |
| // use http.Dir: |
| // |
| // http.Handle("/", http.FileServer(http.Dir("/tmp"))) |
| // |
| // As a special case, the returned file server redirects any request |
| // ending in "/index.html" to the same path, without the final |
| // "index.html". |
| func FileServer(root FileSystem) Handler { |
| return &fileHandler{root} |
| } |
| |
| func (f *fileHandler) ServeHTTP(w ResponseWriter, r *Request) { |
| upath := r.URL.Path |
| if !strings.HasPrefix(upath, "/") { |
| upath = "/" + upath |
| r.URL.Path = upath |
| } |
| serveFile(w, r, f.root, path.Clean(upath), true) |
| } |
| |
| // httpRange specifies the byte range to be sent to the client. |
| type httpRange struct { |
| start, length int64 |
| } |
| |
| func (r httpRange) contentRange(size int64) string { |
| return fmt.Sprintf("bytes %d-%d/%d", r.start, r.start+r.length-1, size) |
| } |
| |
| func (r httpRange) mimeHeader(contentType string, size int64) textproto.MIMEHeader { |
| return textproto.MIMEHeader{ |
| "Content-Range": {r.contentRange(size)}, |
| "Content-Type": {contentType}, |
| } |
| } |
| |
| // parseRange parses a Range header string as per RFC 2616. |
| func parseRange(s string, size int64) ([]httpRange, error) { |
| if s == "" { |
| return nil, nil |
| } |
| const b = "bytes=" |
| if !strings.HasPrefix(s, b) { |
| return nil, errors.New("invalid range") |
| } |
| var ranges []httpRange |
| for _, ra := range strings.Split(s[len(b):], ",") { |
| ra = strings.TrimSpace(ra) |
| if ra == "" { |
| continue |
| } |
| i := strings.Index(ra, "-") |
| if i < 0 { |
| return nil, errors.New("invalid range") |
| } |
| start, end := strings.TrimSpace(ra[:i]), strings.TrimSpace(ra[i+1:]) |
| var r httpRange |
| if start == "" { |
| |
| i, err := strconv.ParseInt(end, 10, 64) |
| if err != nil { |
| return nil, errors.New("invalid range") |
| } |
| if i > size { |
| i = size |
| } |
| r.start = size - i |
| r.length = size - r.start |
| } else { |
| i, err := strconv.ParseInt(start, 10, 64) |
| if err != nil || i >= size || i < 0 { |
| return nil, errors.New("invalid range") |
| } |
| r.start = i |
| if end == "" { |
| |
| r.length = size - r.start |
| } else { |
| i, err := strconv.ParseInt(end, 10, 64) |
| if err != nil || r.start > i { |
| return nil, errors.New("invalid range") |
| } |
| if i >= size { |
| i = size - 1 |
| } |
| r.length = i - r.start + 1 |
| } |
| } |
| ranges = append(ranges, r) |
| } |
| return ranges, nil |
| } |
| |
| // countingWriter counts how many bytes have been written to it. |
| type countingWriter int64 |
| |
| func (w *countingWriter) Write(p []byte) (n int, err error) { |
| *w += countingWriter(len(p)) |
| return len(p), nil |
| } |
| |
| // rangesMIMESize returns the number of bytes it takes to encode the |
| // provided ranges as a multipart response. |
| func rangesMIMESize(ranges []httpRange, contentType string, contentSize int64) (encSize int64) { |
| var w countingWriter |
| mw := multipart.NewWriter(&w) |
| for _, ra := range ranges { |
| mw.CreatePart(ra.mimeHeader(contentType, contentSize)) |
| encSize += ra.length |
| } |
| mw.Close() |
| encSize += int64(w) |
| return |
| } |
| |
| func sumRangesSize(ranges []httpRange) (size int64) { |
| for _, ra := range ranges { |
| size += ra.length |
| } |
| return |
| } |
| |
| var raceEnabled = false // set by race.go |
| |
| // A Header represents the key-value pairs in an HTTP header. |
| type Header map[string][]string |
| |
| // Add adds the key, value pair to the header. |
| // It appends to any existing values associated with key. |
| func (h Header) Add(key, value string) { |
| textproto.MIMEHeader(h).Add(key, value) |
| } |
| |
| // Set sets the header entries associated with key to |
| // the single element value. It replaces any existing |
| // values associated with key. |
| func (h Header) Set(key, value string) { |
| textproto.MIMEHeader(h).Set(key, value) |
| } |
| |
| // Get gets the first value associated with the given key. |
| // If there are no values associated with the key, Get returns "". |
| // To access multiple values of a key, access the map directly |
| // with CanonicalHeaderKey. |
| func (h Header) Get(key string) string { |
| return textproto.MIMEHeader(h).Get(key) |
| } |
| |
| // get is like Get, but key must already be in CanonicalHeaderKey form. |
| func (h Header) get(key string) string { |
| if v := h[key]; len(v) > 0 { |
| return v[0] |
| } |
| return "" |
| } |
| |
| // Del deletes the values associated with key. |
| func (h Header) Del(key string) { |
| textproto.MIMEHeader(h).Del(key) |
| } |
| |
| // Write writes a header in wire format. |
| func (h Header) Write(w io.Writer) error { |
| return h.WriteSubset(w, nil) |
| } |
| |
| func (h Header) clone() Header { |
| h2 := make(Header, len(h)) |
| for k, vv := range h { |
| vv2 := make([]string, len(vv)) |
| copy(vv2, vv) |
| h2[k] = vv2 |
| } |
| return h2 |
| } |
| |
| var timeFormats = []string{ |
| TimeFormat, |
| time.RFC850, |
| time.ANSIC, |
| } |
| |
| // ParseTime parses a time header (such as the Date: header), |
| // trying each of the three formats allowed by HTTP/1.1: |
| // TimeFormat, time.RFC850, and time.ANSIC. |
| func ParseTime(text string) (t time.Time, err error) { |
| for _, layout := range timeFormats { |
| t, err = time.Parse(layout, text) |
| if err == nil { |
| return |
| } |
| } |
| return |
| } |
| |
| var headerNewlineToSpace = strings.NewReplacer("\n", " ", "\r", " ") |
| |
| type writeStringer interface { |
| WriteString(string) (int, error) |
| } |
| |
| // stringWriter implements WriteString on a Writer. |
| type stringWriter struct { |
| w io.Writer |
| } |
| |
| func (w stringWriter) WriteString(s string) (n int, err error) { |
| return w.w.Write([]byte(s)) |
| } |
| |
| type keyValues struct { |
| key string |
| values []string |
| } |
| |
| // A headerSorter implements sort.Interface by sorting a []keyValues |
| // by key. It's used as a pointer, so it can fit in a sort.Interface |
| // interface value without allocation. |
| type headerSorter struct { |
| kvs []keyValues |
| } |
| |
| func (s *headerSorter) Len() int { return len(s.kvs) } |
| |
| func (s *headerSorter) Swap(i, j int) { s.kvs[i], s.kvs[j] = s.kvs[j], s.kvs[i] } |
| |
| func (s *headerSorter) Less(i, j int) bool { return s.kvs[i].key < s.kvs[j].key } |
| |
| var headerSorterPool = sync.Pool{ |
| New: func() interface{} { return new(headerSorter) }, |
| } |
| |
| // sortedKeyValues returns h's keys sorted in the returned kvs |
| // slice. The headerSorter used to sort is also returned, for possible |
| // return to headerSorterCache. |
| func (h Header) sortedKeyValues(exclude map[string]bool) (kvs []keyValues, hs *headerSorter) { |
| hs = headerSorterPool.Get().(*headerSorter) |
| if cap(hs.kvs) < len(h) { |
| hs.kvs = make([]keyValues, 0, len(h)) |
| } |
| kvs = hs.kvs[:0] |
| for k, vv := range h { |
| if !exclude[k] { |
| kvs = append(kvs, keyValues{k, vv}) |
| } |
| } |
| hs.kvs = kvs |
| sort.Sort(hs) |
| return kvs, hs |
| } |
| |
| // WriteSubset writes a header in wire format. |
| // If exclude is not nil, keys where exclude[key] == true are not written. |
| func (h Header) WriteSubset(w io.Writer, exclude map[string]bool) error { |
| ws, ok := w.(writeStringer) |
| if !ok { |
| ws = stringWriter{w} |
| } |
| kvs, sorter := h.sortedKeyValues(exclude) |
| for _, kv := range kvs { |
| for _, v := range kv.values { |
| v = headerNewlineToSpace.Replace(v) |
| v = textproto.TrimString(v) |
| for _, s := range []string{kv.key, ": ", v, "\r\n"} { |
| if _, err := ws.WriteString(s); err != nil { |
| return err |
| } |
| } |
| } |
| } |
| headerSorterPool.Put(sorter) |
| return nil |
| } |
| |
| // CanonicalHeaderKey returns the canonical format of the |
| // header key s. The canonicalization converts the first |
| // letter and any letter following a hyphen to upper case; |
| // the rest are converted to lowercase. For example, the |
| // canonical key for "accept-encoding" is "Accept-Encoding". |
| // If s contains a space or invalid header field bytes, it is |
| // returned without modifications. |
| func CanonicalHeaderKey(s string) string { return textproto.CanonicalMIMEHeaderKey(s) } |
| |
| // hasToken reports whether token appears with v, ASCII |
| // case-insensitive, with space or comma boundaries. |
| // token must be all lowercase. |
| // v may contain mixed cased. |
| func hasToken(v, token string) bool { |
| if len(token) > len(v) || token == "" { |
| return false |
| } |
| if v == token { |
| return true |
| } |
| for sp := 0; sp <= len(v)-len(token); sp++ { |
| |
| if b := v[sp]; b != token[0] && b|0x20 != token[0] { |
| continue |
| } |
| |
| if sp > 0 && !isTokenBoundary(v[sp-1]) { |
| continue |
| } |
| |
| if endPos := sp + len(token); endPos != len(v) && !isTokenBoundary(v[endPos]) { |
| continue |
| } |
| if strings.EqualFold(v[sp:sp+len(token)], token) { |
| return true |
| } |
| } |
| return false |
| } |
| |
| func isTokenBoundary(b byte) bool { |
| return b == ' ' || b == ',' || b == '\t' |
| } |
| |
| // A CookieJar manages storage and use of cookies in HTTP requests. |
| // |
| // Implementations of CookieJar must be safe for concurrent use by multiple |
| // goroutines. |
| // |
| // The net/http/cookiejar package provides a CookieJar implementation. |
| type CookieJar interface { |
| // SetCookies handles the receipt of the cookies in a reply for the |
| // given URL. It may or may not choose to save the cookies, depending |
| // on the jar's policy and implementation. |
| SetCookies(u *url.URL, cookies []*Cookie) |
| |
| // Cookies returns the cookies to send in a request for the given URL. |
| // It is up to the implementation to honor the standard cookie use |
| // restrictions such as in RFC 6265. |
| Cookies(u *url.URL) []*Cookie |
| } |
| |
| var isTokenTable = [127]bool{ |
| '!': true, |
| '#': true, |
| '$': true, |
| '%': true, |
| '&': true, |
| '\'': true, |
| '*': true, |
| '+': true, |
| '-': true, |
| '.': true, |
| '0': true, |
| '1': true, |
| '2': true, |
| '3': true, |
| '4': true, |
| '5': true, |
| '6': true, |
| '7': true, |
| '8': true, |
| '9': true, |
| 'A': true, |
| 'B': true, |
| 'C': true, |
| 'D': true, |
| 'E': true, |
| 'F': true, |
| 'G': true, |
| 'H': true, |
| 'I': true, |
| 'J': true, |
| 'K': true, |
| 'L': true, |
| 'M': true, |
| 'N': true, |
| 'O': true, |
| 'P': true, |
| 'Q': true, |
| 'R': true, |
| 'S': true, |
| 'T': true, |
| 'U': true, |
| 'W': true, |
| 'V': true, |
| 'X': true, |
| 'Y': true, |
| 'Z': true, |
| '^': true, |
| '_': true, |
| '` + "`" + `': true, |
| 'a': true, |
| 'b': true, |
| 'c': true, |
| 'd': true, |
| 'e': true, |
| 'f': true, |
| 'g': true, |
| 'h': true, |
| 'i': true, |
| 'j': true, |
| 'k': true, |
| 'l': true, |
| 'm': true, |
| 'n': true, |
| 'o': true, |
| 'p': true, |
| 'q': true, |
| 'r': true, |
| 's': true, |
| 't': true, |
| 'u': true, |
| 'v': true, |
| 'w': true, |
| 'x': true, |
| 'y': true, |
| 'z': true, |
| '|': true, |
| '~': true, |
| } |
| |
| func isToken(r rune) bool { |
| i := int(r) |
| return i < len(isTokenTable) && isTokenTable[i] |
| } |
| |
| func isNotToken(r rune) bool { |
| return !isToken(r) |
| } |
| |
| // headerValuesContainsToken reports whether any string in values |
| // contains the provided token, ASCII case-insensitively. |
| func headerValuesContainsToken(values []string, token string) bool { |
| for _, v := range values { |
| if headerValueContainsToken(v, token) { |
| return true |
| } |
| } |
| return false |
| } |
| |
| // isOWS reports whether b is an optional whitespace byte, as defined |
| // by RFC 7230 section 3.2.3. |
| func isOWS(b byte) bool { return b == ' ' || b == '\t' } |
| |
| // trimOWS returns x with all optional whitespace removes from the |
| // beginning and end. |
| func trimOWS(x string) string { |
| |
| for len(x) > 0 && isOWS(x[0]) { |
| x = x[1:] |
| } |
| for len(x) > 0 && isOWS(x[len(x)-1]) { |
| x = x[:len(x)-1] |
| } |
| return x |
| } |
| |
| // headerValueContainsToken reports whether v (assumed to be a |
| // 0#element, in the ABNF extension described in RFC 7230 section 7) |
| // contains token amongst its comma-separated tokens, ASCII |
| // case-insensitively. |
| func headerValueContainsToken(v string, token string) bool { |
| v = trimOWS(v) |
| if comma := strings.IndexByte(v, ','); comma != -1 { |
| return tokenEqual(trimOWS(v[:comma]), token) || headerValueContainsToken(v[comma+1:], token) |
| } |
| return tokenEqual(v, token) |
| } |
| |
| // lowerASCII returns the ASCII lowercase version of b. |
| func lowerASCII(b byte) byte { |
| if 'A' <= b && b <= 'Z' { |
| return b + ('a' - 'A') |
| } |
| return b |
| } |
| |
| // tokenEqual reports whether t1 and t2 are equal, ASCII case-insensitively. |
| func tokenEqual(t1, t2 string) bool { |
| if len(t1) != len(t2) { |
| return false |
| } |
| for i, b := range t1 { |
| if b >= utf8.RuneSelf { |
| |
| return false |
| } |
| if lowerASCII(byte(b)) != lowerASCII(t2[i]) { |
| return false |
| } |
| } |
| return true |
| } |
| |
| const ( |
| defaultMaxMemory = 32 << 20 // 32 MB |
| ) |
| |
| // ErrMissingFile is returned by FormFile when the provided file field name |
| // is either not present in the request or not a file field. |
| var ErrMissingFile = errors.New("http: no such file") |
| |
| // HTTP request parsing errors. |
| type ProtocolError struct { |
| ErrorString string |
| } |
| |
| func (err *ProtocolError) Error() string { return err.ErrorString } |
| |
| var ( |
| ErrHeaderTooLong = &ProtocolError{"header too long"} |
| ErrShortBody = &ProtocolError{"entity body too short"} |
| ErrNotSupported = &ProtocolError{"feature not supported"} |
| ErrUnexpectedTrailer = &ProtocolError{"trailer header without chunked transfer encoding"} |
| ErrMissingContentLength = &ProtocolError{"missing ContentLength in HEAD response"} |
| ErrNotMultipart = &ProtocolError{"request Content-Type isn't multipart/form-data"} |
| ErrMissingBoundary = &ProtocolError{"no multipart boundary param in Content-Type"} |
| ) |
| |
| type badStringError struct { |
| what string |
| str string |
| } |
| |
| func (e *badStringError) Error() string { return fmt.Sprintf("%s %q", e.what, e.str) } |
| |
| // Headers that Request.Write handles itself and should be skipped. |
| var reqWriteExcludeHeader = map[string]bool{ |
| "Host": true, |
| "User-Agent": true, |
| "Content-Length": true, |
| "Transfer-Encoding": true, |
| "Trailer": true, |
| } |
| |
| // A Request represents an HTTP request received by a server |
| // or to be sent by a client. |
| // |
| // The field semantics differ slightly between client and server |
| // usage. In addition to the notes on the fields below, see the |
| // documentation for Request.Write and RoundTripper. |
| type Request struct { |
| // Method specifies the HTTP method (GET, POST, PUT, etc.). |
| // For client requests an empty string means GET. |
| Method string |
| |
| // URL specifies either the URI being requested (for server |
| // requests) or the URL to access (for client requests). |
| // |
| // For server requests the URL is parsed from the URI |
| // supplied on the Request-Line as stored in RequestURI. For |
| // most requests, fields other than Path and RawQuery will be |
| // empty. (See RFC 2616, Section 5.1.2) |
| // |
| // For client requests, the URL's Host specifies the server to |
| // connect to, while the Request's Host field optionally |
| // specifies the Host header value to send in the HTTP |
| // request. |
| URL *url.URL |
| |
| // The protocol version for incoming requests. |
| // Client requests always use HTTP/1.1. |
| Proto string // "HTTP/1.0" |
| ProtoMajor int // 1 |
| ProtoMinor int // 0 |
| |
| // A header maps request lines to their values. |
| // If the header says |
| // |
| // accept-encoding: gzip, deflate |
| // Accept-Language: en-us |
| // Connection: keep-alive |
| // |
| // then |
| // |
| // Header = map[string][]string{ |
| // "Accept-Encoding": {"gzip, deflate"}, |
| // "Accept-Language": {"en-us"}, |
| // "Connection": {"keep-alive"}, |
| // } |
| // |
| // HTTP defines that header names are case-insensitive. |
| // The request parser implements this by canonicalizing the |
| // name, making the first character and any characters |
| // following a hyphen uppercase and the rest lowercase. |
| // |
| // For client requests certain headers are automatically |
| // added and may override values in Header. |
| // |
| // See the documentation for the Request.Write method. |
| Header Header |
| |
| // Body is the request's body. |
| // |
| // For client requests a nil body means the request has no |
| // body, such as a GET request. The HTTP Client's Transport |
| // is responsible for calling the Close method. |
| // |
| // For server requests the Request Body is always non-nil |
| // but will return EOF immediately when no body is present. |
| // The Server will close the request body. The ServeHTTP |
| // Handler does not need to. |
| Body io.ReadCloser |
| |
| // ContentLength records the length of the associated content. |
| // The value -1 indicates that the length is unknown. |
| // Values >= 0 indicate that the given number of bytes may |
| // be read from Body. |
| // For client requests, a value of 0 means unknown if Body is not nil. |
| ContentLength int64 |
| |
| // TransferEncoding lists the transfer encodings from outermost to |
| // innermost. An empty list denotes the "identity" encoding. |
| // TransferEncoding can usually be ignored; chunked encoding is |
| // automatically added and removed as necessary when sending and |
| // receiving requests. |
| TransferEncoding []string |
| |
| // Close indicates whether to close the connection after |
| // replying to this request (for servers) or after sending |
| // the request (for clients). |
| Close bool |
| |
| // For server requests Host specifies the host on which the |
| // URL is sought. Per RFC 2616, this is either the value of |
| // the "Host" header or the host name given in the URL itself. |
| // It may be of the form "host:port". |
| // |
| // For client requests Host optionally overrides the Host |
| // header to send. If empty, the Request.Write method uses |
| // the value of URL.Host. |
| Host string |
| |
| // Form contains the parsed form data, including both the URL |
| // field's query parameters and the POST or PUT form data. |
| // This field is only available after ParseForm is called. |
| // The HTTP client ignores Form and uses Body instead. |
| Form url.Values |
| |
| // PostForm contains the parsed form data from POST, PATCH, |
| // or PUT body parameters. |
| // |
| // This field is only available after ParseForm is called. |
| // The HTTP client ignores PostForm and uses Body instead. |
| PostForm url.Values |
| |
| // MultipartForm is the parsed multipart form, including file uploads. |
| // This field is only available after ParseMultipartForm is called. |
| // The HTTP client ignores MultipartForm and uses Body instead. |
| MultipartForm *multipart.Form |
| |
| // Trailer specifies additional headers that are sent after the request |
| // body. |
| // |
| // For server requests the Trailer map initially contains only the |
| // trailer keys, with nil values. (The client declares which trailers it |
| // will later send.) While the handler is reading from Body, it must |
| // not reference Trailer. After reading from Body returns EOF, Trailer |
| // can be read again and will contain non-nil values, if they were sent |
| // by the client. |
| // |
| // For client requests Trailer must be initialized to a map containing |
| // the trailer keys to later send. The values may be nil or their final |
| // values. The ContentLength must be 0 or -1, to send a chunked request. |
| // After the HTTP request is sent the map values can be updated while |
| // the request body is read. Once the body returns EOF, the caller must |
| // not mutate Trailer. |
| // |
| // Few HTTP clients, servers, or proxies support HTTP trailers. |
| Trailer Header |
| |
| // RemoteAddr allows HTTP servers and other software to record |
| // the network address that sent the request, usually for |
| // logging. This field is not filled in by ReadRequest and |
| // has no defined format. The HTTP server in this package |
| // sets RemoteAddr to an "IP:port" address before invoking a |
| // handler. |
| // This field is ignored by the HTTP client. |
| RemoteAddr string |
| |
| // RequestURI is the unmodified Request-URI of the |
| // Request-Line (RFC 2616, Section 5.1) as sent by the client |
| // to a server. Usually the URL field should be used instead. |
| // It is an error to set this field in an HTTP client request. |
| RequestURI string |
| |
| // TLS allows HTTP servers and other software to record |
| // information about the TLS connection on which the request |
| // was received. This field is not filled in by ReadRequest. |
| // The HTTP server in this package sets the field for |
| // TLS-enabled connections before invoking a handler; |
| // otherwise it leaves the field nil. |
| // This field is ignored by the HTTP client. |
| TLS *tls.ConnectionState |
| |
| // Cancel is an optional channel whose closure indicates that the client |
| // request should be regarded as canceled. Not all implementations of |
| // RoundTripper may support Cancel. |
| // |
| // For server requests, this field is not applicable. |
| Cancel <-chan struct{} |
| } |
| |
| // ProtoAtLeast reports whether the HTTP protocol used |
| // in the request is at least major.minor. |
| func (r *Request) ProtoAtLeast(major, minor int) bool { |
| return r.ProtoMajor > major || |
| r.ProtoMajor == major && r.ProtoMinor >= minor |
| } |
| |
| // UserAgent returns the client's User-Agent, if sent in the request. |
| func (r *Request) UserAgent() string { |
| return r.Header.Get("User-Agent") |
| } |
| |
| // Cookies parses and returns the HTTP cookies sent with the request. |
| func (r *Request) Cookies() []*Cookie { |
| return readCookies(r.Header, "") |
| } |
| |
| // ErrNoCookie is returned by Request's Cookie method when a cookie is not found. |
| var ErrNoCookie = errors.New("http: named cookie not present") |
| |
| // Cookie returns the named cookie provided in the request or |
| // ErrNoCookie if not found. |
| func (r *Request) Cookie(name string) (*Cookie, error) { |
| for _, c := range readCookies(r.Header, name) { |
| return c, nil |
| } |
| return nil, ErrNoCookie |
| } |
| |
| // AddCookie adds a cookie to the request. Per RFC 6265 section 5.4, |
| // AddCookie does not attach more than one Cookie header field. That |
| // means all cookies, if any, are written into the same line, |
| // separated by semicolon. |
| func (r *Request) AddCookie(c *Cookie) { |
| s := fmt.Sprintf("%s=%s", sanitizeCookieName(c.Name), sanitizeCookieValue(c.Value)) |
| if c := r.Header.Get("Cookie"); c != "" { |
| r.Header.Set("Cookie", c+"; "+s) |
| } else { |
| r.Header.Set("Cookie", s) |
| } |
| } |
| |
| // Referer returns the referring URL, if sent in the request. |
| // |
| // Referer is misspelled as in the request itself, a mistake from the |
| // earliest days of HTTP. This value can also be fetched from the |
| // Header map as Header["Referer"]; the benefit of making it available |
| // as a method is that the compiler can diagnose programs that use the |
| // alternate (correct English) spelling req.Referrer() but cannot |
| // diagnose programs that use Header["Referrer"]. |
| func (r *Request) Referer() string { |
| return r.Header.Get("Referer") |
| } |
| |
| // multipartByReader is a sentinel value. |
| // Its presence in Request.MultipartForm indicates that parsing of the request |
| // body has been handed off to a MultipartReader instead of ParseMultipartFrom. |
| var multipartByReader = &multipart.Form{ |
| Value: make(map[string][]string), |
| File: make(map[string][]*multipart.FileHeader), |
| } |
| |
| // MultipartReader returns a MIME multipart reader if this is a |
| // multipart/form-data POST request, else returns nil and an error. |
| // Use this function instead of ParseMultipartForm to |
| // process the request body as a stream. |
| func (r *Request) MultipartReader() (*multipart.Reader, error) { |
| if r.MultipartForm == multipartByReader { |
| return nil, errors.New("http: MultipartReader called twice") |
| } |
| if r.MultipartForm != nil { |
| return nil, errors.New("http: multipart handled by ParseMultipartForm") |
| } |
| r.MultipartForm = multipartByReader |
| return r.multipartReader() |
| } |
| |
| func (r *Request) multipartReader() (*multipart.Reader, error) { |
| v := r.Header.Get("Content-Type") |
| if v == "" { |
| return nil, ErrNotMultipart |
| } |
| d, params, err := mime.ParseMediaType(v) |
| if err != nil || d != "multipart/form-data" { |
| return nil, ErrNotMultipart |
| } |
| boundary, ok := params["boundary"] |
| if !ok { |
| return nil, ErrMissingBoundary |
| } |
| return multipart.NewReader(r.Body, boundary), nil |
| } |
| |
| // Return value if nonempty, def otherwise. |
| func valueOrDefault(value, def string) string { |
| if value != "" { |
| return value |
| } |
| return def |
| } |
| |
| // NOTE: This is not intended to reflect the actual Go version being used. |
| // It was changed at the time of Go 1.1 release because the former User-Agent |
| // had ended up on a blacklist for some intrusion detection systems. |
| // See https://codereview.appspot.com/7532043. |
| const defaultUserAgent = "Go-http-client/1.1" |
| |
| // Write writes an HTTP/1.1 request, which is the header and body, in wire format. |
| // This method consults the following fields of the request: |
| // Host |
| // URL |
| // Method (defaults to "GET") |
| // Header |
| // ContentLength |
| // TransferEncoding |
| // Body |
| // |
| // If Body is present, Content-Length is <= 0 and TransferEncoding |
| // hasn't been set to "identity", Write adds "Transfer-Encoding: |
| // chunked" to the header. Body is closed after it is sent. |
| func (r *Request) Write(w io.Writer) error { |
| return r.write(w, false, nil) |
| } |
| |
| // WriteProxy is like Write but writes the request in the form |
| // expected by an HTTP proxy. In particular, WriteProxy writes the |
| // initial Request-URI line of the request with an absolute URI, per |
| // section 5.1.2 of RFC 2616, including the scheme and host. |
| // In either case, WriteProxy also writes a Host header, using |
| // either r.Host or r.URL.Host. |
| func (r *Request) WriteProxy(w io.Writer) error { |
| return r.write(w, true, nil) |
| } |
| |
| // extraHeaders may be nil |
| func (req *Request) write(w io.Writer, usingProxy bool, extraHeaders Header) error { |
| |
| host := cleanHost(req.Host) |
| if host == "" { |
| if req.URL == nil { |
| return errors.New("http: Request.Write on Request with no Host or URL set") |
| } |
| host = cleanHost(req.URL.Host) |
| } |
| |
| host = removeZone(host) |
| |
| ruri := req.URL.RequestURI() |
| if usingProxy && req.URL.Scheme != "" && req.URL.Opaque == "" { |
| ruri = req.URL.Scheme + "://" + host + ruri |
| } else if req.Method == "CONNECT" && req.URL.Path == "" { |
| |
| ruri = host |
| } |
| |
| // Wrap the writer in a bufio Writer if it's not already buffered. |
| // Don't always call NewWriter, as that forces a bytes.Buffer |
| // and other small bufio Writers to have a minimum 4k buffer |
| // size. |
| var bw *bufio.Writer |
| if _, ok := w.(io.ByteWriter); !ok { |
| bw = bufio.NewWriter(w) |
| w = bw |
| } |
| |
| _, err := fmt.Fprintf(w, "%s %s HTTP/1.1\r\n", valueOrDefault(req.Method, "GET"), ruri) |
| if err != nil { |
| return err |
| } |
| |
| _, err = fmt.Fprintf(w, "Host: %s\r\n", host) |
| if err != nil { |
| return err |
| } |
| |
| userAgent := defaultUserAgent |
| if req.Header != nil { |
| if ua := req.Header["User-Agent"]; len(ua) > 0 { |
| userAgent = ua[0] |
| } |
| } |
| if userAgent != "" { |
| _, err = fmt.Fprintf(w, "User-Agent: %s\r\n", userAgent) |
| if err != nil { |
| return err |
| } |
| } |
| |
| tw, err := newTransferWriter(req) |
| if err != nil { |
| return err |
| } |
| err = tw.WriteHeader(w) |
| if err != nil { |
| return err |
| } |
| |
| err = req.Header.WriteSubset(w, reqWriteExcludeHeader) |
| if err != nil { |
| return err |
| } |
| |
| if extraHeaders != nil { |
| err = extraHeaders.Write(w) |
| if err != nil { |
| return err |
| } |
| } |
| |
| _, err = io.WriteString(w, "\r\n") |
| if err != nil { |
| return err |
| } |
| |
| err = tw.WriteBody(w) |
| if err != nil { |
| return err |
| } |
| |
| if bw != nil { |
| return bw.Flush() |
| } |
| return nil |
| } |
| |
| // cleanHost strips anything after '/' or ' '. |
| // Ideally we'd clean the Host header according to the spec: |
| // https://tools.ietf.org/html/rfc7230#section-5.4 (Host = uri-host [ ":" port ]") |
| // https://tools.ietf.org/html/rfc7230#section-2.7 (uri-host -> rfc3986's host) |
| // https://tools.ietf.org/html/rfc3986#section-3.2.2 (definition of host) |
| // But practically, what we are trying to avoid is the situation in |
| // issue 11206, where a malformed Host header used in the proxy context |
| // would create a bad request. So it is enough to just truncate at the |
| // first offending character. |
| func cleanHost(in string) string { |
| if i := strings.IndexAny(in, " /"); i != -1 { |
| return in[:i] |
| } |
| return in |
| } |
| |
| // removeZone removes IPv6 zone identifer from host. |
| // E.g., "[fe80::1%en0]:8080" to "[fe80::1]:8080" |
| func removeZone(host string) string { |
| if !strings.HasPrefix(host, "[") { |
| return host |
| } |
| i := strings.LastIndex(host, "]") |
| if i < 0 { |
| return host |
| } |
| j := strings.LastIndex(host[:i], "%") |
| if j < 0 { |
| return host |
| } |
| return host[:j] + host[i:] |
| } |
| |
| // ParseHTTPVersion parses a HTTP version string. |
| // "HTTP/1.0" returns (1, 0, true). |
| func ParseHTTPVersion(vers string) (major, minor int, ok bool) { |
| const Big = 1000000 // arbitrary upper bound |
| switch vers { |
| case "HTTP/1.1": |
| return 1, 1, true |
| case "HTTP/1.0": |
| return 1, 0, true |
| } |
| if !strings.HasPrefix(vers, "HTTP/") { |
| return 0, 0, false |
| } |
| dot := strings.Index(vers, ".") |
| if dot < 0 { |
| return 0, 0, false |
| } |
| major, err := strconv.Atoi(vers[5:dot]) |
| if err != nil || major < 0 || major > Big { |
| return 0, 0, false |
| } |
| minor, err = strconv.Atoi(vers[dot+1:]) |
| if err != nil || minor < 0 || minor > Big { |
| return 0, 0, false |
| } |
| return major, minor, true |
| } |
| |
| // NewRequest returns a new Request given a method, URL, and optional body. |
| // |
| // If the provided body is also an io.Closer, the returned |
| // Request.Body is set to body and will be closed by the Client |
| // methods Do, Post, and PostForm, and Transport.RoundTrip. |
| // |
| // NewRequest returns a Request suitable for use with Client.Do or |
| // Transport.RoundTrip. |
| // To create a request for use with testing a Server Handler use either |
| // ReadRequest or manually update the Request fields. See the Request |
| // type's documentation for the difference between inbound and outbound |
| // request fields. |
| func NewRequest(method, urlStr string, body io.Reader) (*Request, error) { |
| u, err := url.Parse(urlStr) |
| if err != nil { |
| return nil, err |
| } |
| rc, ok := body.(io.ReadCloser) |
| if !ok && body != nil { |
| rc = io.NopCloser(body) |
| } |
| req := &Request{ |
| Method: method, |
| URL: u, |
| Proto: "HTTP/1.1", |
| ProtoMajor: 1, |
| ProtoMinor: 1, |
| Header: make(Header), |
| Body: rc, |
| Host: u.Host, |
| } |
| if body != nil { |
| switch v := body.(type) { |
| case *bytes.Buffer: |
| req.ContentLength = int64(v.Len()) |
| case *bytes.Reader: |
| req.ContentLength = int64(v.Len()) |
| case *strings.Reader: |
| req.ContentLength = int64(v.Len()) |
| } |
| } |
| |
| return req, nil |
| } |
| |
| // BasicAuth returns the username and password provided in the request's |
| // Authorization header, if the request uses HTTP Basic Authentication. |
| // See RFC 2617, Section 2. |
| func (r *Request) BasicAuth() (username, password string, ok bool) { |
| auth := r.Header.Get("Authorization") |
| if auth == "" { |
| return |
| } |
| return parseBasicAuth(auth) |
| } |
| |
| // parseBasicAuth parses an HTTP Basic Authentication string. |
| // "Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==" returns ("Aladdin", "open sesame", true). |
| func parseBasicAuth(auth string) (username, password string, ok bool) { |
| const prefix = "Basic " |
| if !strings.HasPrefix(auth, prefix) { |
| return |
| } |
| c, err := base64.StdEncoding.DecodeString(auth[len(prefix):]) |
| if err != nil { |
| return |
| } |
| cs := string(c) |
| s := strings.IndexByte(cs, ':') |
| if s < 0 { |
| return |
| } |
| return cs[:s], cs[s+1:], true |
| } |
| |
| // SetBasicAuth sets the request's Authorization header to use HTTP |
| // Basic Authentication with the provided username and password. |
| // |
| // With HTTP Basic Authentication the provided username and password |
| // are not encrypted. |
| func (r *Request) SetBasicAuth(username, password string) { |
| r.Header.Set("Authorization", "Basic "+basicAuth(username, password)) |
| } |
| |
| // parseRequestLine parses "GET /foo HTTP/1.1" into its three parts. |
| func parseRequestLine(line string) (method, requestURI, proto string, ok bool) { |
| s1 := strings.Index(line, " ") |
| s2 := strings.Index(line[s1+1:], " ") |
| if s1 < 0 || s2 < 0 { |
| return |
| } |
| s2 += s1 + 1 |
| return line[:s1], line[s1+1 : s2], line[s2+1:], true |
| } |
| |
| var textprotoReaderPool sync.Pool |
| |
| func newTextprotoReader(br *bufio.Reader) *textproto.Reader { |
| if v := textprotoReaderPool.Get(); v != nil { |
| tr := v.(*textproto.Reader) |
| tr.R = br |
| return tr |
| } |
| return textproto.NewReader(br) |
| } |
| |
| func putTextprotoReader(r *textproto.Reader) { |
| r.R = nil |
| textprotoReaderPool.Put(r) |
| } |
| |
| // ReadRequest reads and parses an incoming request from b. |
| func ReadRequest(b *bufio.Reader) (req *Request, err error) { |
| |
| tp := newTextprotoReader(b) |
| req = new(Request) |
| |
| // First line: GET /index.html HTTP/1.0 |
| var s string |
| if s, err = tp.ReadLine(); err != nil { |
| return nil, err |
| } |
| defer func() { |
| putTextprotoReader(tp) |
| if err == io.EOF { |
| err = io.ErrUnexpectedEOF |
| } |
| }() |
| |
| var ok bool |
| req.Method, req.RequestURI, req.Proto, ok = parseRequestLine(s) |
| if !ok { |
| return nil, &badStringError{"malformed HTTP request", s} |
| } |
| rawurl := req.RequestURI |
| if req.ProtoMajor, req.ProtoMinor, ok = ParseHTTPVersion(req.Proto); !ok { |
| return nil, &badStringError{"malformed HTTP version", req.Proto} |
| } |
| |
| justAuthority := req.Method == "CONNECT" && !strings.HasPrefix(rawurl, "/") |
| if justAuthority { |
| rawurl = "http://" + rawurl |
| } |
| |
| if req.URL, err = url.ParseRequestURI(rawurl); err != nil { |
| return nil, err |
| } |
| |
| if justAuthority { |
| |
| req.URL.Scheme = "" |
| } |
| |
| mimeHeader, err := tp.ReadMIMEHeader() |
| if err != nil { |
| return nil, err |
| } |
| req.Header = Header(mimeHeader) |
| |
| req.Host = req.URL.Host |
| if req.Host == "" { |
| req.Host = req.Header.get("Host") |
| } |
| delete(req.Header, "Host") |
| |
| fixPragmaCacheControl(req.Header) |
| |
| req.Close = shouldClose(req.ProtoMajor, req.ProtoMinor, req.Header, false) |
| |
| err = readTransfer(req, b) |
| if err != nil { |
| return nil, err |
| } |
| |
| return req, nil |
| } |
| |
| // MaxBytesReader is similar to io.LimitReader but is intended for |
| // limiting the size of incoming request bodies. In contrast to |
| // io.LimitReader, MaxBytesReader's result is a ReadCloser, returns a |
| // non-EOF error for a Read beyond the limit, and closes the |
| // underlying reader when its Close method is called. |
| // |
| // MaxBytesReader prevents clients from accidentally or maliciously |
| // sending a large request and wasting server resources. |
| func MaxBytesReader(w ResponseWriter, r io.ReadCloser, n int64) io.ReadCloser { |
| return &maxBytesReader{w: w, r: r, n: n} |
| } |
| |
| type maxBytesReader struct { |
| w ResponseWriter |
| r io.ReadCloser // underlying reader |
| n int64 // max bytes remaining |
| stopped bool |
| sawEOF bool |
| } |
| |
| func (l *maxBytesReader) tooLarge() (n int, err error) { |
| if !l.stopped { |
| l.stopped = true |
| if res, ok := l.w.(*response); ok { |
| res.requestTooLarge() |
| } |
| } |
| return 0, errors.New("http: request body too large") |
| } |
| |
| func (l *maxBytesReader) Read(p []byte) (n int, err error) { |
| toRead := l.n |
| if l.n == 0 { |
| if l.sawEOF { |
| return l.tooLarge() |
| } |
| |
| toRead = 1 |
| } |
| if int64(len(p)) > toRead { |
| p = p[:toRead] |
| } |
| n, err = l.r.Read(p) |
| if err == io.EOF { |
| l.sawEOF = true |
| } |
| if l.n == 0 { |
| |
| if n > 0 { |
| return l.tooLarge() |
| } |
| return 0, err |
| } |
| l.n -= int64(n) |
| if l.n < 0 { |
| l.n = 0 |
| } |
| return |
| } |
| |
| func (l *maxBytesReader) Close() error { |
| return l.r.Close() |
| } |
| |
| func copyValues(dst, src url.Values) { |
| for k, vs := range src { |
| for _, value := range vs { |
| dst.Add(k, value) |
| } |
| } |
| } |
| |
| func parsePostForm(r *Request) (vs url.Values, err error) { |
| if r.Body == nil { |
| err = errors.New("missing form body") |
| return |
| } |
| ct := r.Header.Get("Content-Type") |
| |
| if ct == "" { |
| ct = "application/octet-stream" |
| } |
| ct, _, err = mime.ParseMediaType(ct) |
| switch { |
| case ct == "application/x-www-form-urlencoded": |
| var reader io.Reader = r.Body |
| maxFormSize := int64(1<<63 - 1) |
| if _, ok := r.Body.(*maxBytesReader); !ok { |
| maxFormSize = int64(10 << 20) |
| reader = io.LimitReader(r.Body, maxFormSize+1) |
| } |
| b, e := io.ReadAll(reader) |
| if e != nil { |
| if err == nil { |
| err = e |
| } |
| break |
| } |
| if int64(len(b)) > maxFormSize { |
| err = errors.New("http: POST too large") |
| return |
| } |
| vs, e = url.ParseQuery(string(b)) |
| if err == nil { |
| err = e |
| } |
| case ct == "multipart/form-data": |
| |
| } |
| return |
| } |
| |
| // ParseForm parses the raw query from the URL and updates r.Form. |
| // |
| // For POST or PUT requests, it also parses the request body as a form and |
| // put the results into both r.PostForm and r.Form. |
| // POST and PUT body parameters take precedence over URL query string values |
| // in r.Form. |
| // |
| // If the request Body's size has not already been limited by MaxBytesReader, |
| // the size is capped at 10MB. |
| // |
| // ParseMultipartForm calls ParseForm automatically. |
| // It is idempotent. |
| func (r *Request) ParseForm() error { |
| var err error |
| if r.PostForm == nil { |
| if r.Method == "POST" || r.Method == "PUT" || r.Method == "PATCH" { |
| r.PostForm, err = parsePostForm(r) |
| } |
| if r.PostForm == nil { |
| r.PostForm = make(url.Values) |
| } |
| } |
| if r.Form == nil { |
| if len(r.PostForm) > 0 { |
| r.Form = make(url.Values) |
| copyValues(r.Form, r.PostForm) |
| } |
| var newValues url.Values |
| if r.URL != nil { |
| var e error |
| newValues, e = url.ParseQuery(r.URL.RawQuery) |
| if err == nil { |
| err = e |
| } |
| } |
| if newValues == nil { |
| newValues = make(url.Values) |
| } |
| if r.Form == nil { |
| r.Form = newValues |
| } else { |
| copyValues(r.Form, newValues) |
| } |
| } |
| return err |
| } |
| |
| // ParseMultipartForm parses a request body as multipart/form-data. |
| // The whole request body is parsed and up to a total of maxMemory bytes of |
| // its file parts are stored in memory, with the remainder stored on |
| // disk in temporary files. |
| // ParseMultipartForm calls ParseForm if necessary. |
| // After one call to ParseMultipartForm, subsequent calls have no effect. |
| func (r *Request) ParseMultipartForm(maxMemory int64) error { |
| if r.MultipartForm == multipartByReader { |
| return errors.New("http: multipart handled by MultipartReader") |
| } |
| if r.Form == nil { |
| err := r.ParseForm() |
| if err != nil { |
| return err |
| } |
| } |
| if r.MultipartForm != nil { |
| return nil |
| } |
| |
| mr, err := r.multipartReader() |
| if err != nil { |
| return err |
| } |
| |
| f, err := mr.ReadForm(maxMemory) |
| if err != nil { |
| return err |
| } |
| for k, v := range f.Value { |
| r.Form[k] = append(r.Form[k], v...) |
| } |
| r.MultipartForm = f |
| |
| return nil |
| } |
| |
| // FormValue returns the first value for the named component of the query. |
| // POST and PUT body parameters take precedence over URL query string values. |
| // FormValue calls ParseMultipartForm and ParseForm if necessary and ignores |
| // any errors returned by these functions. |
| // If key is not present, FormValue returns the empty string. |
| // To access multiple values of the same key, call ParseForm and |
| // then inspect Request.Form directly. |
| func (r *Request) FormValue(key string) string { |
| if r.Form == nil { |
| r.ParseMultipartForm(defaultMaxMemory) |
| } |
| if vs := r.Form[key]; len(vs) > 0 { |
| return vs[0] |
| } |
| return "" |
| } |
| |
| // PostFormValue returns the first value for the named component of the POST |
| // or PUT request body. URL query parameters are ignored. |
| // PostFormValue calls ParseMultipartForm and ParseForm if necessary and ignores |
| // any errors returned by these functions. |
| // If key is not present, PostFormValue returns the empty string. |
| func (r *Request) PostFormValue(key string) string { |
| if r.PostForm == nil { |
| r.ParseMultipartForm(defaultMaxMemory) |
| } |
| if vs := r.PostForm[key]; len(vs) > 0 { |
| return vs[0] |
| } |
| return "" |
| } |
| |
| // FormFile returns the first file for the provided form key. |
| // FormFile calls ParseMultipartForm and ParseForm if necessary. |
| func (r *Request) FormFile(key string) (multipart.File, *multipart.FileHeader, error) { |
| if r.MultipartForm == multipartByReader { |
| return nil, nil, errors.New("http: multipart handled by MultipartReader") |
| } |
| if r.MultipartForm == nil { |
| err := r.ParseMultipartForm(defaultMaxMemory) |
| if err != nil { |
| return nil, nil, err |
| } |
| } |
| if r.MultipartForm != nil && r.MultipartForm.File != nil { |
| if fhs := r.MultipartForm.File[key]; len(fhs) > 0 { |
| f, err := fhs[0].Open() |
| return f, fhs[0], err |
| } |
| } |
| return nil, nil, ErrMissingFile |
| } |
| |
| func (r *Request) expectsContinue() bool { |
| return hasToken(r.Header.get("Expect"), "100-continue") |
| } |
| |
| func (r *Request) wantsHttp10KeepAlive() bool { |
| if r.ProtoMajor != 1 || r.ProtoMinor != 0 { |
| return false |
| } |
| return hasToken(r.Header.get("Connection"), "keep-alive") |
| } |
| |
| func (r *Request) wantsClose() bool { |
| return hasToken(r.Header.get("Connection"), "close") |
| } |
| |
| func (r *Request) closeBody() { |
| if r.Body != nil { |
| r.Body.Close() |
| } |
| } |
| |
| var respExcludeHeader = map[string]bool{ |
| "Content-Length": true, |
| "Transfer-Encoding": true, |
| "Trailer": true, |
| } |
| |
| // Response represents the response from an HTTP request. |
| // |
| type Response struct { |
| Status string // e.g. "200 OK" |
| StatusCode int // e.g. 200 |
| Proto string // e.g. "HTTP/1.0" |
| ProtoMajor int // e.g. 1 |
| ProtoMinor int // e.g. 0 |
| |
| // Header maps header keys to values. If the response had multiple |
| // headers with the same key, they may be concatenated, with comma |
| // delimiters. (Section 4.2 of RFC 2616 requires that multiple headers |
| // be semantically equivalent to a comma-delimited sequence.) Values |
| // duplicated by other fields in this struct (e.g., ContentLength) are |
| // omitted from Header. |
| // |
| // Keys in the map are canonicalized (see CanonicalHeaderKey). |
| Header Header |
| |
| // Body represents the response body. |
| // |
| // The http Client and Transport guarantee that Body is always |
| // non-nil, even on responses without a body or responses with |
| // a zero-length body. It is the caller's responsibility to |
| // close Body. The default HTTP client's Transport does not |
| // attempt to reuse HTTP/1.0 or HTTP/1.1 TCP connections |
| // ("keep-alive") unless the Body is read to completion and is |
| // closed. |
| // |
| // The Body is automatically dechunked if the server replied |
| // with a "chunked" Transfer-Encoding. |
| Body io.ReadCloser |
| |
| // ContentLength records the length of the associated content. The |
| // value -1 indicates that the length is unknown. Unless Request.Method |
| // is "HEAD", values >= 0 indicate that the given number of bytes may |
| // be read from Body. |
| ContentLength int64 |
| |
| // Contains transfer encodings from outer-most to inner-most. Value is |
| // nil, means that "identity" encoding is used. |
| TransferEncoding []string |
| |
| // Close records whether the header directed that the connection be |
| // closed after reading Body. The value is advice for clients: neither |
| // ReadResponse nor Response.Write ever closes a connection. |
| Close bool |
| |
| // Trailer maps trailer keys to values, in the same |
| // format as the header. |
| Trailer Header |
| |
| // The Request that was sent to obtain this Response. |
| // Request's Body is nil (having already been consumed). |
| // This is only populated for Client requests. |
| Request *Request |
| |
| // TLS contains information about the TLS connection on which the |
| // response was received. It is nil for unencrypted responses. |
| // The pointer is shared between responses and should not be |
| // modified. |
| TLS *tls.ConnectionState |
| } |
| |
| // Cookies parses and returns the cookies set in the Set-Cookie headers. |
| func (r *Response) Cookies() []*Cookie { |
| return readSetCookies(r.Header) |
| } |
| |
| // ErrNoLocation is returned by Response's Location method |
| // when no Location header is present. |
| var ErrNoLocation = errors.New("http: no Location header in response") |
| |
| // Location returns the URL of the response's "Location" header, |
| // if present. Relative redirects are resolved relative to |
| // the Response's Request. ErrNoLocation is returned if no |
| // Location header is present. |
| func (r *Response) Location() (*url.URL, error) { |
| lv := r.Header.Get("Location") |
| if lv == "" { |
| return nil, ErrNoLocation |
| } |
| if r.Request != nil && r.Request.URL != nil { |
| return r.Request.URL.Parse(lv) |
| } |
| return url.Parse(lv) |
| } |
| |
| // ReadResponse reads and returns an HTTP response from r. |
| // The req parameter optionally specifies the Request that corresponds |
| // to this Response. If nil, a GET request is assumed. |
| // Clients must call resp.Body.Close when finished reading resp.Body. |
| // After that call, clients can inspect resp.Trailer to find key/value |
| // pairs included in the response trailer. |
| func ReadResponse(r *bufio.Reader, req *Request) (*Response, error) { |
| tp := textproto.NewReader(r) |
| resp := &Response{ |
| Request: req, |
| } |
| |
| line, err := tp.ReadLine() |
| if err != nil { |
| if err == io.EOF { |
| err = io.ErrUnexpectedEOF |
| } |
| return nil, err |
| } |
| f := strings.SplitN(line, " ", 3) |
| if len(f) < 2 { |
| return nil, &badStringError{"malformed HTTP response", line} |
| } |
| reasonPhrase := "" |
| if len(f) > 2 { |
| reasonPhrase = f[2] |
| } |
| resp.Status = f[1] + " " + reasonPhrase |
| resp.StatusCode, err = strconv.Atoi(f[1]) |
| if err != nil { |
| return nil, &badStringError{"malformed HTTP status code", f[1]} |
| } |
| |
| resp.Proto = f[0] |
| var ok bool |
| if resp.ProtoMajor, resp.ProtoMinor, ok = ParseHTTPVersion(resp.Proto); !ok { |
| return nil, &badStringError{"malformed HTTP version", resp.Proto} |
| } |
| |
| mimeHeader, err := tp.ReadMIMEHeader() |
| if err != nil { |
| if err == io.EOF { |
| err = io.ErrUnexpectedEOF |
| } |
| return nil, err |
| } |
| resp.Header = Header(mimeHeader) |
| |
| fixPragmaCacheControl(resp.Header) |
| |
| err = readTransfer(resp, r) |
| if err != nil { |
| return nil, err |
| } |
| |
| return resp, nil |
| } |
| |
| // RFC2616: Should treat |
| // Pragma: no-cache |
| // like |
| // Cache-Control: no-cache |
| func fixPragmaCacheControl(header Header) { |
| if hp, ok := header["Pragma"]; ok && len(hp) > 0 && hp[0] == "no-cache" { |
| if _, presentcc := header["Cache-Control"]; !presentcc { |
| header["Cache-Control"] = []string{"no-cache"} |
| } |
| } |
| } |
| |
| // ProtoAtLeast reports whether the HTTP protocol used |
| // in the response is at least major.minor. |
| func (r *Response) ProtoAtLeast(major, minor int) bool { |
| return r.ProtoMajor > major || |
| r.ProtoMajor == major && r.ProtoMinor >= minor |
| } |
| |
| // Write writes r to w in the HTTP/1.n server response format, |
| // including the status line, headers, body, and optional trailer. |
| // |
| // This method consults the following fields of the response r: |
| // |
| // StatusCode |
| // ProtoMajor |
| // ProtoMinor |
| // Request.Method |
| // TransferEncoding |
| // Trailer |
| // Body |
| // ContentLength |
| // Header, values for non-canonical keys will have unpredictable behavior |
| // |
| // The Response Body is closed after it is sent. |
| func (r *Response) Write(w io.Writer) error { |
| |
| text := r.Status |
| if text == "" { |
| var ok bool |
| text, ok = statusText[r.StatusCode] |
| if !ok { |
| text = "status code " + strconv.Itoa(r.StatusCode) |
| } |
| } |
| protoMajor, protoMinor := strconv.Itoa(r.ProtoMajor), strconv.Itoa(r.ProtoMinor) |
| statusCode := strconv.Itoa(r.StatusCode) + " " |
| text = strings.TrimPrefix(text, statusCode) |
| if _, err := io.WriteString(w, "HTTP/"+protoMajor+"."+protoMinor+" "+statusCode+text+"\r\n"); err != nil { |
| return err |
| } |
| |
| r1 := new(Response) |
| *r1 = *r |
| if r1.ContentLength == 0 && r1.Body != nil { |
| // Is it actually 0 length? Or just unknown? |
| var buf [1]byte |
| n, err := r1.Body.Read(buf[:]) |
| if err != nil && err != io.EOF { |
| return err |
| } |
| if n == 0 { |
| |
| r1.Body = eofReader |
| } else { |
| r1.ContentLength = -1 |
| r1.Body = struct { |
| io.Reader |
| io.Closer |
| }{ |
| io.MultiReader(bytes.NewReader(buf[:1]), r.Body), |
| r.Body, |
| } |
| } |
| } |
| |
| if r1.ContentLength == -1 && !r1.Close && r1.ProtoAtLeast(1, 1) && !chunked(r1.TransferEncoding) { |
| r1.Close = true |
| } |
| |
| tw, err := newTransferWriter(r1) |
| if err != nil { |
| return err |
| } |
| err = tw.WriteHeader(w) |
| if err != nil { |
| return err |
| } |
| |
| err = r.Header.WriteSubset(w, respExcludeHeader) |
| if err != nil { |
| return err |
| } |
| |
| contentLengthAlreadySent := tw.shouldSendContentLength() |
| if r1.ContentLength == 0 && !chunked(r1.TransferEncoding) && !contentLengthAlreadySent { |
| if _, err := io.WriteString(w, "Content-Length: 0\r\n"); err != nil { |
| return err |
| } |
| } |
| |
| if _, err := io.WriteString(w, "\r\n"); err != nil { |
| return err |
| } |
| |
| err = tw.WriteBody(w) |
| if err != nil { |
| return err |
| } |
| |
| return nil |
| } |
| |
| // Errors introduced by the HTTP server. |
| var ( |
| ErrWriteAfterFlush = errors.New("Conn.Write called after Flush") |
| ErrBodyNotAllowed = errors.New("http: request method or response status code does not allow body") |
| ErrHijacked = errors.New("Conn has been hijacked") |
| ErrContentLength = errors.New("Conn.Write wrote more than the declared Content-Length") |
| ) |
| |
| // Objects implementing the Handler interface can be |
| // registered to serve a particular path or subtree |
| // in the HTTP server. |
| // |
| // ServeHTTP should write reply headers and data to the ResponseWriter |
| // and then return. Returning signals that the request is finished |
| // and that the HTTP server can move on to the next request on |
| // the connection. |
| // |
| // If ServeHTTP panics, the server (the caller of ServeHTTP) assumes |
| // that the effect of the panic was isolated to the active request. |
| // It recovers the panic, logs a stack trace to the server error log, |
| // and hangs up the connection. |
| // |
| type Handler interface { |
| ServeHTTP(ResponseWriter, *Request) |
| } |
| |
| // A ResponseWriter interface is used by an HTTP handler to |
| // construct an HTTP response. |
| type ResponseWriter interface { |
| // Header returns the header map that will be sent by |
| // WriteHeader. Changing the header after a call to |
| // WriteHeader (or Write) has no effect unless the modified |
| // headers were declared as trailers by setting the |
| // "Trailer" header before the call to WriteHeader (see example). |
| // To suppress implicit response headers, set their value to nil. |
| Header() Header |
| |
| // Write writes the data to the connection as part of an HTTP reply. |
| // If WriteHeader has not yet been called, Write calls WriteHeader(http.StatusOK) |
| // before writing the data. If the Header does not contain a |
| // Content-Type line, Write adds a Content-Type set to the result of passing |
| // the initial 512 bytes of written data to DetectContentType. |
| Write([]byte) (int, error) |
| |
| // WriteHeader sends an HTTP response header with status code. |
| // If WriteHeader is not called explicitly, the first call to Write |
| // will trigger an implicit WriteHeader(http.StatusOK). |
| // Thus explicit calls to WriteHeader are mainly used to |
| // send error codes. |
| WriteHeader(int) |
| } |
| |
| // The Flusher interface is implemented by ResponseWriters that allow |
| // an HTTP handler to flush buffered data to the client. |
| // |
| // Note that even for ResponseWriters that support Flush, |
| // if the client is connected through an HTTP proxy, |
| // the buffered data may not reach the client until the response |
| // completes. |
| type Flusher interface { |
| // Flush sends any buffered data to the client. |
| Flush() |
| } |
| |
| // The Hijacker interface is implemented by ResponseWriters that allow |
| // an HTTP handler to take over the connection. |
| type Hijacker interface { |
| // Hijack lets the caller take over the connection. |
| // After a call to Hijack(), the HTTP server library |
| // will not do anything else with the connection. |
| // |
| // It becomes the caller's responsibility to manage |
| // and close the connection. |
| // |
| // The returned net.Conn may have read or write deadlines |
| // already set, depending on the configuration of the |
| // Server. It is the caller's responsibility to set |
| // or clear those deadlines as needed. |
| Hijack() (net.Conn, *bufio.ReadWriter, error) |
| } |
| |
| // The CloseNotifier interface is implemented by ResponseWriters which |
| // allow detecting when the underlying connection has gone away. |
| // |
| // This mechanism can be used to cancel long operations on the server |
| // if the client has disconnected before the response is ready. |
| type CloseNotifier interface { |
| // CloseNotify returns a channel that receives a single value |
| // when the client connection has gone away. |
| CloseNotify() <-chan bool |
| } |
| |
| // A conn represents the server side of an HTTP connection. |
| type conn struct { |
| remoteAddr string // network address of remote side |
| server *Server // the Server on which the connection arrived |
| rwc net.Conn // i/o connection |
| w io.Writer // checkConnErrorWriter's copy of wrc, not zeroed on Hijack |
| werr error // any errors writing to w |
| sr liveSwitchReader // where the LimitReader reads from; usually the rwc |
| lr *io.LimitedReader // io.LimitReader(sr) |
| buf *bufio.ReadWriter // buffered(lr,rwc), reading from bufio->limitReader->sr->rwc |
| tlsState *tls.ConnectionState // or nil when not using TLS |
| lastMethod string // method of previous request, or "" |
| |
| mu sync.Mutex // guards the following |
| clientGone bool // if client has disconnected mid-request |
| closeNotifyc chan bool // made lazily |
| hijackedv bool // connection has been hijacked by handler |
| } |
| |
| func (c *conn) hijacked() bool { |
| c.mu.Lock() |
| defer c.mu.Unlock() |
| return c.hijackedv |
| } |
| |
| func (c *conn) hijack() (rwc net.Conn, buf *bufio.ReadWriter, err error) { |
| c.mu.Lock() |
| defer c.mu.Unlock() |
| if c.hijackedv { |
| return nil, nil, ErrHijacked |
| } |
| if c.closeNotifyc != nil { |
| return nil, nil, errors.New("http: Hijack is incompatible with use of CloseNotifier") |
| } |
| c.hijackedv = true |
| rwc = c.rwc |
| buf = c.buf |
| c.rwc = nil |
| c.buf = nil |
| c.setState(rwc, StateHijacked) |
| return |
| } |
| |
| func (c *conn) closeNotify() <-chan bool { |
| c.mu.Lock() |
| defer c.mu.Unlock() |
| if c.closeNotifyc == nil { |
| c.closeNotifyc = make(chan bool, 1) |
| if c.hijackedv { |
| |
| return c.closeNotifyc |
| } |
| pr, pw := io.Pipe() |
| |
| readSource := c.sr.r |
| c.sr.Lock() |
| c.sr.r = pr |
| c.sr.Unlock() |
| go func() { |
| _, err := io.Copy(pw, readSource) |
| if err == nil { |
| err = io.EOF |
| } |
| pw.CloseWithError(err) |
| c.noteClientGone() |
| }() |
| } |
| return c.closeNotifyc |
| } |
| |
| func (c *conn) noteClientGone() { |
| c.mu.Lock() |
| defer c.mu.Unlock() |
| if c.closeNotifyc != nil && !c.clientGone { |
| c.closeNotifyc <- true |
| } |
| c.clientGone = true |
| } |
| |
| // A switchWriter can have its Writer changed at runtime. |
| // It's not safe for concurrent Writes and switches. |
| type switchWriter struct { |
| io.Writer |
| } |
| |
| // A liveSwitchReader can have its Reader changed at runtime. It's |
| // safe for concurrent reads and switches, if its mutex is held. |
| type liveSwitchReader struct { |
| sync.Mutex |
| r io.Reader |
| } |
| |
| func (sr *liveSwitchReader) Read(p []byte) (n int, err error) { |
| sr.Lock() |
| r := sr.r |
| sr.Unlock() |
| return r.Read(p) |
| } |
| |
| // This should be >= 512 bytes for DetectContentType, |
| // but otherwise it's somewhat arbitrary. |
| const bufferBeforeChunkingSize = 2048 |
| |
| // chunkWriter writes to a response's conn buffer, and is the writer |
| // wrapped by the response.bufw buffered writer. |
| // |
| // chunkWriter also is responsible for finalizing the Header, including |
| // conditionally setting the Content-Type and setting a Content-Length |
| // in cases where the handler's final output is smaller than the buffer |
| // size. It also conditionally adds chunk headers, when in chunking mode. |
| // |
| // See the comment above (*response).Write for the entire write flow. |
| type chunkWriter struct { |
| res *response |
| |
| // header is either nil or a deep clone of res.handlerHeader |
| // at the time of res.WriteHeader, if res.WriteHeader is |
| // called and extra buffering is being done to calculate |
| // Content-Type and/or Content-Length. |
| header Header |
| |
| // wroteHeader tells whether the header's been written to "the |
| // wire" (or rather: w.conn.buf). this is unlike |
| // (*response).wroteHeader, which tells only whether it was |
| // logically written. |
| wroteHeader bool |
| |
| // set by the writeHeader method: |
| chunking bool // using chunked transfer encoding for reply body |
| } |
| |
| var ( |
| crlf = []byte("\r\n") |
| colonSpace = []byte(": ") |
| ) |
| |
| func (cw *chunkWriter) Write(p []byte) (n int, err error) { |
| if !cw.wroteHeader { |
| cw.writeHeader(p) |
| } |
| if cw.res.req.Method == "HEAD" { |
| |
| return len(p), nil |
| } |
| if cw.chunking { |
| _, err = fmt.Fprintf(cw.res.conn.buf, "%x\r\n", len(p)) |
| if err != nil { |
| cw.res.conn.rwc.Close() |
| return |
| } |
| } |
| n, err = cw.res.conn.buf.Write(p) |
| if cw.chunking && err == nil { |
| _, err = cw.res.conn.buf.Write(crlf) |
| } |
| if err != nil { |
| cw.res.conn.rwc.Close() |
| } |
| return |
| } |
| |
| func (cw *chunkWriter) flush() { |
| if !cw.wroteHeader { |
| cw.writeHeader(nil) |
| } |
| cw.res.conn.buf.Flush() |
| } |
| |
| func (cw *chunkWriter) close() { |
| if !cw.wroteHeader { |
| cw.writeHeader(nil) |
| } |
| if cw.chunking { |
| bw := cw.res.conn.buf |
| |
| bw.WriteString("0\r\n") |
| if len(cw.res.trailers) > 0 { |
| trailers := make(Header) |
| for _, h := range cw.res.trailers { |
| if vv := cw.res.handlerHeader[h]; len(vv) > 0 { |
| trailers[h] = vv |
| } |
| } |
| trailers.Write(bw) |
| } |
| |
| bw.WriteString("\r\n") |
| } |
| } |
| |
| // A response represents the server side of an HTTP response. |
| type response struct { |
| conn *conn |
| req *Request // request for this response |
| wroteHeader bool // reply header has been (logically) written |
| wroteContinue bool // 100 Continue response was written |
| |
| w *bufio.Writer // buffers output in chunks to chunkWriter |
| cw chunkWriter |
| sw *switchWriter // of the bufio.Writer, for return to putBufioWriter |
| |
| // handlerHeader is the Header that Handlers get access to, |
| // which may be retained and mutated even after WriteHeader. |
| // handlerHeader is copied into cw.header at WriteHeader |
| // time, and privately mutated thereafter. |
| handlerHeader Header |
| calledHeader bool // handler accessed handlerHeader via Header |
| |
| written int64 // number of bytes written in body |
| contentLength int64 // explicitly-declared Content-Length; or -1 |
| status int // status code passed to WriteHeader |
| |
| // close connection after this reply. set on request and |
| // updated after response from handler if there's a |
| // "Connection: keep-alive" response header and a |
| // Content-Length. |
| closeAfterReply bool |
| |
| // requestBodyLimitHit is set by requestTooLarge when |
| // maxBytesReader hits its max size. It is checked in |
| // WriteHeader, to make sure we don't consume the |
| // remaining request body to try to advance to the next HTTP |
| // request. Instead, when this is set, we stop reading |
| // subsequent requests on this connection and stop reading |
| // input from it. |
| requestBodyLimitHit bool |
| |
| // trailers are the headers to be sent after the handler |
| // finishes writing the body. This field is initialized from |
| // the Trailer response header when the response header is |
| // written. |
| trailers []string |
| |
| handlerDone bool // set true when the handler exits |
| |
| // Buffers for Date and Content-Length |
| dateBuf [len(TimeFormat)]byte |
| clenBuf [10]byte |
| } |
| |
| // declareTrailer is called for each Trailer header when the |
| // response header is written. It notes that a header will need to be |
| // written in the trailers at the end of the response. |
| func (w *response) declareTrailer(k string) { |
| k = CanonicalHeaderKey(k) |
| switch k { |
| case "Transfer-Encoding", "Content-Length", "Trailer": |
| |
| return |
| } |
| w.trailers = append(w.trailers, k) |
| } |
| |
| // requestTooLarge is called by maxBytesReader when too much input has |
| // been read from the client. |
| func (w *response) requestTooLarge() { |
| w.closeAfterReply = true |
| w.requestBodyLimitHit = true |
| if !w.wroteHeader { |
| w.Header().Set("Connection", "close") |
| } |
| } |
| |
| // needsSniff reports whether a Content-Type still needs to be sniffed. |
| func (w *response) needsSniff() bool { |
| _, haveType := w.handlerHeader["Content-Type"] |
| return !w.cw.wroteHeader && !haveType && w.written < sniffLen |
| } |
| |
| // writerOnly hides an io.Writer value's optional ReadFrom method |
| // from io.Copy. |
| type writerOnly struct { |
| io.Writer |
| } |
| |
| func srcIsRegularFile(src io.Reader) (isRegular bool, err error) { |
| switch v := src.(type) { |
| case *os.File: |
| fi, err := v.Stat() |
| if err != nil { |
| return false, err |
| } |
| return fi.Mode().IsRegular(), nil |
| case *io.LimitedReader: |
| return srcIsRegularFile(v.R) |
| default: |
| return |
| } |
| } |
| |
| // ReadFrom is here to optimize copying from an *os.File regular file |
| // to a *net.TCPConn with sendfile. |
| func (w *response) ReadFrom(src io.Reader) (n int64, err error) { |
| |
| rf, ok := w.conn.rwc.(io.ReaderFrom) |
| regFile, err := srcIsRegularFile(src) |
| if err != nil { |
| return 0, err |
| } |
| if !ok || !regFile { |
| return io.Copy(writerOnly{w}, src) |
| } |
| |
| if !w.wroteHeader { |
| w.WriteHeader(StatusOK) |
| } |
| |
| if w.needsSniff() { |
| n0, err := io.Copy(writerOnly{w}, io.LimitReader(src, sniffLen)) |
| n += n0 |
| if err != nil { |
| return n, err |
| } |
| } |
| |
| w.w.Flush() |
| w.cw.flush() |
| |
| if !w.cw.chunking && w.bodyAllowed() { |
| n0, err := rf.ReadFrom(src) |
| n += n0 |
| w.written += n0 |
| return n, err |
| } |
| |
| n0, err := io.Copy(writerOnly{w}, src) |
| n += n0 |
| return n, err |
| } |
| |
| // noLimit is an effective infinite upper bound for io.LimitedReader |
| const noLimit int64 = (1 << 63) - 1 |
| |
| // debugServerConnections controls whether all server connections are wrapped |
| // with a verbose logging wrapper. |
| const debugServerConnections = false |
| |
| // Create new connection from rwc. |
| func (srv *Server) newConn(rwc net.Conn) (c *conn, err error) { |
| c = new(conn) |
| c.remoteAddr = rwc.RemoteAddr().String() |
| c.server = srv |
| c.rwc = rwc |
| c.w = rwc |
| if debugServerConnections { |
| c.rwc = newLoggingConn("server", c.rwc) |
| } |
| c.sr.r = c.rwc |
| c.lr = io.LimitReader(&c.sr, noLimit).(*io.LimitedReader) |
| br := newBufioReader(c.lr) |
| bw := newBufioWriterSize(checkConnErrorWriter{c}, 4<<10) |
| c.buf = bufio.NewReadWriter(br, bw) |
| return c, nil |
| } |
| |
| var ( |
| bufioReaderPool sync.Pool |
| bufioWriter2kPool sync.Pool |
| bufioWriter4kPool sync.Pool |
| ) |
| |
| func bufioWriterPool(size int) *sync.Pool { |
| switch size { |
| case 2 << 10: |
| return &bufioWriter2kPool |
| case 4 << 10: |
| return &bufioWriter4kPool |
| } |
| return nil |
| } |
| |
| func newBufioReader(r io.Reader) *bufio.Reader { |
| if v := bufioReaderPool.Get(); v != nil { |
| br := v.(*bufio.Reader) |
| br.Reset(r) |
| return br |
| } |
| |
| return bufio.NewReader(r) |
| } |
| |
| func putBufioReader(br *bufio.Reader) { |
| br.Reset(nil) |
| bufioReaderPool.Put(br) |
| } |
| |
| func newBufioWriterSize(w io.Writer, size int) *bufio.Writer { |
| pool := bufioWriterPool(size) |
| if pool != nil { |
| if v := pool.Get(); v != nil { |
| bw := v.(*bufio.Writer) |
| bw.Reset(w) |
| return bw |
| } |
| } |
| return bufio.NewWriterSize(w, size) |
| } |
| |
| func putBufioWriter(bw *bufio.Writer) { |
| bw.Reset(nil) |
| if pool := bufioWriterPool(bw.Available()); pool != nil { |
| pool.Put(bw) |
| } |
| } |
| |
| // DefaultMaxHeaderBytes is the maximum permitted size of the headers |
| // in an HTTP request. |
| // This can be overridden by setting Server.MaxHeaderBytes. |
| const DefaultMaxHeaderBytes = 1 << 20 // 1 MB |
| |
| func (srv *Server) maxHeaderBytes() int { |
| if srv.MaxHeaderBytes > 0 { |
| return srv.MaxHeaderBytes |
| } |
| return DefaultMaxHeaderBytes |
| } |
| |
| func (srv *Server) initialLimitedReaderSize() int64 { |
| return int64(srv.maxHeaderBytes()) + 4096 |
| } |
| |
| // wrapper around io.ReaderCloser which on first read, sends an |
| // HTTP/1.1 100 Continue header |
| type expectContinueReader struct { |
| resp *response |
| readCloser io.ReadCloser |
| closed bool |
| sawEOF bool |
| } |
| |
| func (ecr *expectContinueReader) Read(p []byte) (n int, err error) { |
| if ecr.closed { |
| return 0, ErrBodyReadAfterClose |
| } |
| if !ecr.resp.wroteContinue && !ecr.resp.conn.hijacked() { |
| ecr.resp.wroteContinue = true |
| ecr.resp.conn.buf.WriteString("HTTP/1.1 100 Continue\r\n\r\n") |
| ecr.resp.conn.buf.Flush() |
| } |
| n, err = ecr.readCloser.Read(p) |
| if err == io.EOF { |
| ecr.sawEOF = true |
| } |
| return |
| } |
| |
| func (ecr *expectContinueReader) Close() error { |
| ecr.closed = true |
| return ecr.readCloser.Close() |
| } |
| |
| // TimeFormat is the time format to use with |
| // time.Parse and time.Time.Format when parsing |
| // or generating times in HTTP headers. |
| // It is like time.RFC1123 but hard codes GMT as the time zone. |
| const TimeFormat = "Mon, 02 Jan 2006 15:04:05 GMT" |
| |
| // appendTime is a non-allocating version of []byte(t.UTC().Format(TimeFormat)) |
| func appendTime(b []byte, t time.Time) []byte { |
| const days = "SunMonTueWedThuFriSat" |
| const months = "JanFebMarAprMayJunJulAugSepOctNovDec" |
| |
| t = t.UTC() |
| yy, mm, dd := t.Date() |
| hh, mn, ss := t.Clock() |
| day := days[3*t.Weekday():] |
| mon := months[3*(mm-1):] |
| |
| return append(b, |
| day[0], day[1], day[2], ',', ' ', |
| byte('0'+dd/10), byte('0'+dd%10), ' ', |
| mon[0], mon[1], mon[2], ' ', |
| byte('0'+yy/1000), byte('0'+(yy/100)%10), byte('0'+(yy/10)%10), byte('0'+yy%10), ' ', |
| byte('0'+hh/10), byte('0'+hh%10), ':', |
| byte('0'+mn/10), byte('0'+mn%10), ':', |
| byte('0'+ss/10), byte('0'+ss%10), ' ', |
| 'G', 'M', 'T') |
| } |
| |
| var errTooLarge = errors.New("http: request too large") |
| |
| // Read next request from connection. |
| func (c *conn) readRequest() (w *response, err error) { |
| if c.hijacked() { |
| return nil, ErrHijacked |
| } |
| |
| if d := c.server.ReadTimeout; d != 0 { |
| c.rwc.SetReadDeadline(time.Now().Add(d)) |
| } |
| if d := c.server.WriteTimeout; d != 0 { |
| defer func() { |
| c.rwc.SetWriteDeadline(time.Now().Add(d)) |
| }() |
| } |
| |
| c.lr.N = c.server.initialLimitedReaderSize() |
| if c.lastMethod == "POST" { |
| |
| peek, _ := c.buf.Reader.Peek(4) |
| c.buf.Reader.Discard(numLeadingCRorLF(peek)) |
| } |
| var req *Request |
| if req, err = ReadRequest(c.buf.Reader); err != nil { |
| if c.lr.N == 0 { |
| return nil, errTooLarge |
| } |
| return nil, err |
| } |
| c.lr.N = noLimit |
| c.lastMethod = req.Method |
| |
| req.RemoteAddr = c.remoteAddr |
| req.TLS = c.tlsState |
| if body, ok := req.Body.(*body); ok { |
| body.doEarlyClose = true |
| } |
| |
| w = &response{ |
| conn: c, |
| req: req, |
| handlerHeader: make(Header), |
| contentLength: -1, |
| } |
| w.cw.res = w |
| w.w = newBufioWriterSize(&w.cw, bufferBeforeChunkingSize) |
| return w, nil |
| } |
| |
| func (w *response) Header() Header { |
| if w.cw.header == nil && w.wroteHeader && !w.cw.wroteHeader { |
| |
| w.cw.header = w.handlerHeader.clone() |
| } |
| w.calledHeader = true |
| return w.handlerHeader |
| } |
| |
| // maxPostHandlerReadBytes is the max number of Request.Body bytes not |
| // consumed by a handler that the server will read from the client |
| // in order to keep a connection alive. If there are more bytes than |
| // this then the server to be paranoid instead sends a "Connection: |
| // close" response. |
| // |
| // This number is approximately what a typical machine's TCP buffer |
| // size is anyway. (if we have the bytes on the machine, we might as |
| // well read them) |
| const maxPostHandlerReadBytes = 256 << 10 |
| |
| func (w *response) WriteHeader(code int) { |
| if w.conn.hijacked() { |
| w.conn.server.logf("http: response.WriteHeader on hijacked connection") |
| return |
| } |
| if w.wroteHeader { |
| w.conn.server.logf("http: multiple response.WriteHeader calls") |
| return |
| } |
| w.wroteHeader = true |
| w.status = code |
| |
| if w.calledHeader && w.cw.header == nil { |
| w.cw.header = w.handlerHeader.clone() |
| } |
| |
| if cl := w.handlerHeader.get("Content-Length"); cl != "" { |
| v, err := strconv.ParseInt(cl, 10, 64) |
| if err == nil && v >= 0 { |
| w.contentLength = v |
| } else { |
| w.conn.server.logf("http: invalid Content-Length of %q", cl) |
| w.handlerHeader.Del("Content-Length") |
| } |
| } |
| } |
| |
| // extraHeader is the set of headers sometimes added by chunkWriter.writeHeader. |
| // This type is used to avoid extra allocations from cloning and/or populating |
| // the response Header map and all its 1-element slices. |
| type extraHeader struct { |
| contentType string |
| connection string |
| transferEncoding string |
| date []byte // written if not nil |
| contentLength []byte // written if not nil |
| } |
| |
| // Sorted the same as extraHeader.Write's loop. |
| var extraHeaderKeys = [][]byte{ |
| []byte("Content-Type"), |
| []byte("Connection"), |
| []byte("Transfer-Encoding"), |
| } |
| |
| var ( |
| headerContentLength = []byte("Content-Length: ") |
| headerDate = []byte("Date: ") |
| ) |
| |
| // Write writes the headers described in h to w. |
| // |
| // This method has a value receiver, despite the somewhat large size |
| // of h, because it prevents an allocation. The escape analysis isn't |
| // smart enough to realize this function doesn't mutate h. |
| func (h extraHeader) Write(w *bufio.Writer) { |
| if h.date != nil { |
| w.Write(headerDate) |
| w.Write(h.date) |
| w.Write(crlf) |
| } |
| if h.contentLength != nil { |
| w.Write(headerContentLength) |
| w.Write(h.contentLength) |
| w.Write(crlf) |
| } |
| for i, v := range []string{h.contentType, h.connection, h.transferEncoding} { |
| if v != "" { |
| w.Write(extraHeaderKeys[i]) |
| w.Write(colonSpace) |
| w.WriteString(v) |
| w.Write(crlf) |
| } |
| } |
| } |
| |
| // writeHeader finalizes the header sent to the client and writes it |
| // to cw.res.conn.buf. |
| // |
| // p is not written by writeHeader, but is the first chunk of the body |
| // that will be written. It is sniffed for a Content-Type if none is |
| // set explicitly. It's also used to set the Content-Length, if the |
| // total body size was small and the handler has already finished |
| // running. |
| func (cw *chunkWriter) writeHeader(p []byte) { |
| if cw.wroteHeader { |
| return |
| } |
| cw.wroteHeader = true |
| |
| w := cw.res |
| keepAlivesEnabled := w.conn.server.doKeepAlives() |
| isHEAD := w.req.Method == "HEAD" |
| |
| header := cw.header |
| owned := header != nil |
| if !owned { |
| header = w.handlerHeader |
| } |
| var excludeHeader map[string]bool |
| delHeader := func(key string) { |
| if owned { |
| header.Del(key) |
| return |
| } |
| if _, ok := header[key]; !ok { |
| return |
| } |
| if excludeHeader == nil { |
| excludeHeader = make(map[string]bool) |
| } |
| excludeHeader[key] = true |
| } |
| var setHeader extraHeader |
| |
| trailers := false |
| for _, v := range cw.header["Trailer"] { |
| trailers = true |
| foreachHeaderElement(v, cw.res.declareTrailer) |
| } |
| |
| te := header.get("Transfer-Encoding") |
| hasTE := te != "" |
| |
| if w.handlerDone && !trailers && !hasTE && bodyAllowedForStatus(w.status) && header.get("Content-Length") == "" && (!isHEAD || len(p) > 0) { |
| w.contentLength = int64(len(p)) |
| setHeader.contentLength = strconv.AppendInt(cw.res.clenBuf[:0], int64(len(p)), 10) |
| } |
| |
| if w.req.wantsHttp10KeepAlive() && keepAlivesEnabled { |
| sentLength := header.get("Content-Length") != "" |
| if sentLength && header.get("Connection") == "keep-alive" { |
| w.closeAfterReply = false |
| } |
| } |
| |
| hasCL := w.contentLength != -1 |
| |
| if w.req.wantsHttp10KeepAlive() && (isHEAD || hasCL) { |
| _, connectionHeaderSet := header["Connection"] |
| if !connectionHeaderSet { |
| setHeader.connection = "keep-alive" |
| } |
| } else if !w.req.ProtoAtLeast(1, 1) || w.req.wantsClose() { |
| w.closeAfterReply = true |
| } |
| |
| if header.get("Connection") == "close" || !keepAlivesEnabled { |
| w.closeAfterReply = true |
| } |
| |
| if ecr, ok := w.req.Body.(*expectContinueReader); ok && !ecr.sawEOF { |
| w.closeAfterReply = true |
| } |
| |
| if w.req.ContentLength != 0 && !w.closeAfterReply { |
| var discard, tooBig bool |
| |
| switch bdy := w.req.Body.(type) { |
| case *expectContinueReader: |
| if bdy.resp.wroteContinue { |
| discard = true |
| } |
| case *body: |
| bdy.mu.Lock() |
| switch { |
| case bdy.closed: |
| if !bdy.sawEOF { |
| |
| w.closeAfterReply = true |
| } |
| case bdy.unreadDataSizeLocked() >= maxPostHandlerReadBytes: |
| tooBig = true |
| default: |
| discard = true |
| } |
| bdy.mu.Unlock() |
| default: |
| discard = true |
| } |
| |
| if discard { |
| _, err := io.CopyN(io.Discard, w.req.Body, maxPostHandlerReadBytes+1) |
| switch err { |
| case nil: |
| |
| tooBig = true |
| case ErrBodyReadAfterClose: |
| |
| case io.EOF: |
| |
| err = w.req.Body.Close() |
| if err != nil { |
| w.closeAfterReply = true |
| } |
| default: |
| |
| w.closeAfterReply = true |
| } |
| } |
| |
| if tooBig { |
| w.requestTooLarge() |
| delHeader("Connection") |
| setHeader.connection = "close" |
| } |
| } |
| |
| code := w.status |
| if bodyAllowedForStatus(code) { |
| |
| _, haveType := header["Content-Type"] |
| if !haveType && !hasTE { |
| setHeader.contentType = DetectContentType(p) |
| } |
| } else { |
| for _, k := range suppressedHeaders(code) { |
| delHeader(k) |
| } |
| } |
| |
| if _, ok := header["Date"]; !ok { |
| setHeader.date = appendTime(cw.res.dateBuf[:0], time.Now()) |
| } |
| |
| if hasCL && hasTE && te != "identity" { |
| |
| w.conn.server.logf("http: WriteHeader called with both Transfer-Encoding of %q and a Content-Length of %d", |
| te, w.contentLength) |
| delHeader("Content-Length") |
| hasCL = false |
| } |
| |
| if w.req.Method == "HEAD" || !bodyAllowedForStatus(code) { |
| |
| } else if code == StatusNoContent { |
| delHeader("Transfer-Encoding") |
| } else if hasCL { |
| delHeader("Transfer-Encoding") |
| } else if w.req.ProtoAtLeast(1, 1) { |
| |
| if hasTE && te == "identity" { |
| cw.chunking = false |
| w.closeAfterReply = true |
| } else { |
| |
| cw.chunking = true |
| setHeader.transferEncoding = "chunked" |
| } |
| } else { |
| |
| w.closeAfterReply = true |
| delHeader("Transfer-Encoding") |
| } |
| |
| if cw.chunking { |
| delHeader("Content-Length") |
| } |
| if !w.req.ProtoAtLeast(1, 0) { |
| return |
| } |
| |
| if w.closeAfterReply && (!keepAlivesEnabled || !hasToken(cw.header.get("Connection"), "close")) { |
| delHeader("Connection") |
| if w.req.ProtoAtLeast(1, 1) { |
| setHeader.connection = "close" |
| } |
| } |
| |
| w.conn.buf.WriteString(statusLine(w.req, code)) |
| cw.header.WriteSubset(w.conn.buf, excludeHeader) |
| setHeader.Write(w.conn.buf.Writer) |
| w.conn.buf.Write(crlf) |
| } |
| |
| // foreachHeaderElement splits v according to the "#rule" construction |
| // in RFC 2616 section 2.1 and calls fn for each non-empty element. |
| func foreachHeaderElement(v string, fn func(string)) { |
| v = textproto.TrimString(v) |
| if v == "" { |
| return |
| } |
| if !strings.Contains(v, ",") { |
| fn(v) |
| return |
| } |
| for _, f := range strings.Split(v, ",") { |
| if f = textproto.TrimString(f); f != "" { |
| fn(f) |
| } |
| } |
| } |
| |
| // statusLines is a cache of Status-Line strings, keyed by code (for |
| // HTTP/1.1) or negative code (for HTTP/1.0). This is faster than a |
| // map keyed by struct of two fields. This map's max size is bounded |
| // by 2*len(statusText), two protocol types for each known official |
| // status code in the statusText map. |
| var ( |
| statusMu sync.RWMutex |
| statusLines = make(map[int]string) |
| ) |
| |
| // statusLine returns a response Status-Line (RFC 2616 Section 6.1) |
| // for the given request and response status code. |
| func statusLine(req *Request, code int) string { |
| |
| key := code |
| proto11 := req.ProtoAtLeast(1, 1) |
| if !proto11 { |
| key = -key |
| } |
| statusMu.RLock() |
| line, ok := statusLines[key] |
| statusMu.RUnlock() |
| if ok { |
| return line |
| } |
| |
| proto := "HTTP/1.0" |
| if proto11 { |
| proto = "HTTP/1.1" |
| } |
| codestring := strconv.Itoa(code) |
| text, ok := statusText[code] |
| if !ok { |
| text = "status code " + codestring |
| } |
| line = proto + " " + codestring + " " + text + "\r\n" |
| if ok { |
| statusMu.Lock() |
| defer statusMu.Unlock() |
| statusLines[key] = line |
| } |
| return line |
| } |
| |
| // bodyAllowed reports whether a Write is allowed for this response type. |
| // It's illegal to call this before the header has been flushed. |
| func (w *response) bodyAllowed() bool { |
| if !w.wroteHeader { |
| panic("") |
| } |
| return bodyAllowedForStatus(w.status) |
| } |
| |
| // The Life Of A Write is like this: |
| // |
| // Handler starts. No header has been sent. The handler can either |
| // write a header, or just start writing. Writing before sending a header |
| // sends an implicitly empty 200 OK header. |
| // |
| // If the handler didn't declare a Content-Length up front, we either |
| // go into chunking mode or, if the handler finishes running before |
| // the chunking buffer size, we compute a Content-Length and send that |
| // in the header instead. |
| // |
| // Likewise, if the handler didn't set a Content-Type, we sniff that |
| // from the initial chunk of output. |
| // |
| // The Writers are wired together like: |
| // |
| // 1. *response (the ResponseWriter) -> |
| // 2. (*response).w, a *bufio.Writer of bufferBeforeChunkingSize bytes |
| // 3. chunkWriter.Writer (whose writeHeader finalizes Content-Length/Type) |
| // and which writes the chunk headers, if needed. |
| // 4. conn.buf, a bufio.Writer of default (4kB) bytes, writing to -> |
| // 5. checkConnErrorWriter{c}, which notes any non-nil error on Write |
| // and populates c.werr with it if so. but otherwise writes to: |
| // 6. the rwc, the net.Conn. |
| // |
| // TODO(bradfitz): short-circuit some of the buffering when the |
| // initial header contains both a Content-Type and Content-Length. |
| // Also short-circuit in (1) when the header's been sent and not in |
| // chunking mode, writing directly to (4) instead, if (2) has no |
| // buffered data. More generally, we could short-circuit from (1) to |
| // (3) even in chunking mode if the write size from (1) is over some |
| // threshold and nothing is in (2). The answer might be mostly making |
| // bufferBeforeChunkingSize smaller and having bufio's fast-paths deal |
| // with this instead. |
| func (w *response) Write(data []byte) (n int, err error) { |
| return w.write(len(data), data, "") |
| } |
| |
| func (w *response) WriteString(data string) (n int, err error) { |
| return w.write(len(data), nil, data) |
| } |
| |
| // either dataB or dataS is non-zero. |
| func (w *response) write(lenData int, dataB []byte, dataS string) (n int, err error) { |
| if w.conn.hijacked() { |
| w.conn.server.logf("http: response.Write on hijacked connection") |
| return 0, ErrHijacked |
| } |
| if !w.wroteHeader { |
| w.WriteHeader(StatusOK) |
| } |
| if lenData == 0 { |
| return 0, nil |
| } |
| if !w.bodyAllowed() { |
| return 0, ErrBodyNotAllowed |
| } |
| |
| w.written += int64(lenData) |
| if w.contentLength != -1 && w.written > w.contentLength { |
| return 0, ErrContentLength |
| } |
| if dataB != nil { |
| return w.w.Write(dataB) |
| } else { |
| return w.w.WriteString(dataS) |
| } |
| } |
| |
| func (w *response) finishRequest() { |
| w.handlerDone = true |
| |
| if !w.wroteHeader { |
| w.WriteHeader(StatusOK) |
| } |
| |
| w.w.Flush() |
| putBufioWriter(w.w) |
| w.cw.close() |
| w.conn.buf.Flush() |
| |
| w.req.Body.Close() |
| |
| if w.req.MultipartForm != nil { |
| w.req.MultipartForm.RemoveAll() |
| } |
| } |
| |
| // shouldReuseConnection reports whether the underlying TCP connection can be reused. |
| // It must only be called after the handler is done executing. |
| func (w *response) shouldReuseConnection() bool { |
| if w.closeAfterReply { |
| |
| return false |
| } |
| |
| if w.req.Method != "HEAD" && w.contentLength != -1 && w.bodyAllowed() && w.contentLength != w.written { |
| |
| return false |
| } |
| |
| if w.conn.werr != nil { |
| return false |
| } |
| |
| if w.closedRequestBodyEarly() { |
| return false |
| } |
| |
| return true |
| } |
| |
| func (w *response) closedRequestBodyEarly() bool { |
| body, ok := w.req.Body.(*body) |
| return ok && body.didEarlyClose() |
| } |
| |
| func (w *response) Flush() { |
| if !w.wroteHeader { |
| w.WriteHeader(StatusOK) |
| } |
| w.w.Flush() |
| w.cw.flush() |
| } |
| |
| func (c *conn) finalFlush() { |
| if c.buf != nil { |
| c.buf.Flush() |
| |
| putBufioReader(c.buf.Reader) |
| |
| putBufioWriter(c.buf.Writer) |
| |
| c.buf = nil |
| } |
| } |
| |
| // Close the connection. |
| func (c *conn) close() { |
| c.finalFlush() |
| if c.rwc != nil { |
| c.rwc.Close() |
| c.rwc = nil |
| } |
| } |
| |
| // rstAvoidanceDelay is the amount of time we sleep after closing the |
| // write side of a TCP connection before closing the entire socket. |
| // By sleeping, we increase the chances that the client sees our FIN |
| // and processes its final data before they process the subsequent RST |
| // from closing a connection with known unread data. |
| // This RST seems to occur mostly on BSD systems. (And Windows?) |
| // This timeout is somewhat arbitrary (~latency around the planet). |
| const rstAvoidanceDelay = 500 * time.Millisecond |
| |
| type closeWriter interface { |
| CloseWrite() error |
| } |
| |
| var _ closeWriter = (*net.TCPConn)(nil) |
| |
| // closeWrite flushes any outstanding data and sends a FIN packet (if |
| // client is connected via TCP), signalling that we're done. We then |
| // pause for a bit, hoping the client processes it before any |
| // subsequent RST. |
| // |
| // See https://golang.org/issue/3595 |
| func (c *conn) closeWriteAndWait() { |
| c.finalFlush() |
| if tcp, ok := c.rwc.(closeWriter); ok { |
| tcp.CloseWrite() |
| } |
| time.Sleep(rstAvoidanceDelay) |
| } |
| |
| // validNPN reports whether the proto is not a blacklisted Next |
| // Protocol Negotiation protocol. Empty and built-in protocol types |
| // are blacklisted and can't be overridden with alternate |
| // implementations. |
| func validNPN(proto string) bool { |
| switch proto { |
| case "", "http/1.1", "http/1.0": |
| return false |
| } |
| return true |
| } |
| |
| func (c *conn) setState(nc net.Conn, state ConnState) { |
| if hook := c.server.ConnState; hook != nil { |
| hook(nc, state) |
| } |
| } |
| |
| // Serve a new connection. |
| func (c *conn) serve() { |
| origConn := c.rwc |
| defer func() { |
| if err := recover(); err != nil { |
| const size = 64 << 10 |
| buf := make([]byte, size) |
| buf = buf[:runtime.Stack(buf, false)] |
| c.server.logf("http: panic serving %v: %v\n%s", c.remoteAddr, err, buf) |
| } |
| if !c.hijacked() { |
| c.close() |
| c.setState(origConn, StateClosed) |
| } |
| }() |
| |
| if tlsConn, ok := c.rwc.(*tls.Conn); ok { |
| |