|  | // Copyright 2009 The Go Authors. All rights reserved. | 
|  | // Use of this source code is governed by a BSD-style | 
|  | // license that can be found in the LICENSE file. | 
|  |  | 
|  | // HTTP server. See RFC 7230 through 7235. | 
|  |  | 
|  | package http | 
|  |  | 
|  | import ( | 
|  | "bufio" | 
|  | "bytes" | 
|  | "context" | 
|  | "crypto/tls" | 
|  | "errors" | 
|  | "fmt" | 
|  | "io" | 
|  | "io/ioutil" | 
|  | "log" | 
|  | "net" | 
|  | "net/textproto" | 
|  | "net/url" | 
|  | "os" | 
|  | "path" | 
|  | "runtime" | 
|  | "sort" | 
|  | "strconv" | 
|  | "strings" | 
|  | "sync" | 
|  | "sync/atomic" | 
|  | "time" | 
|  |  | 
|  | "golang.org/x/net/http/httpguts" | 
|  | ) | 
|  |  | 
|  | // Errors used by the HTTP server. | 
|  | var ( | 
|  | // ErrBodyNotAllowed is returned by ResponseWriter.Write calls | 
|  | // when the HTTP method or response code does not permit a | 
|  | // body. | 
|  | ErrBodyNotAllowed = errors.New("http: request method or response status code does not allow body") | 
|  |  | 
|  | // ErrHijacked is returned by ResponseWriter.Write calls when | 
|  | // the underlying connection has been hijacked using the | 
|  | // Hijacker interface. A zero-byte write on a hijacked | 
|  | // connection will return ErrHijacked without any other side | 
|  | // effects. | 
|  | ErrHijacked = errors.New("http: connection has been hijacked") | 
|  |  | 
|  | // ErrContentLength is returned by ResponseWriter.Write calls | 
|  | // when a Handler set a Content-Length response header with a | 
|  | // declared size and then attempted to write more bytes than | 
|  | // declared. | 
|  | ErrContentLength = errors.New("http: wrote more than the declared Content-Length") | 
|  |  | 
|  | // Deprecated: ErrWriteAfterFlush is no longer returned by | 
|  | // anything in the net/http package. Callers should not | 
|  | // compare errors against this variable. | 
|  | ErrWriteAfterFlush = errors.New("unused") | 
|  | ) | 
|  |  | 
|  | // A Handler responds to an HTTP request. | 
|  | // | 
|  | // ServeHTTP should write reply headers and data to the ResponseWriter | 
|  | // and then return. Returning signals that the request is finished; it | 
|  | // is not valid to use the ResponseWriter or read from the | 
|  | // Request.Body after or concurrently with the completion of the | 
|  | // ServeHTTP call. | 
|  | // | 
|  | // Depending on the HTTP client software, HTTP protocol version, and | 
|  | // any intermediaries between the client and the Go server, it may not | 
|  | // be possible to read from the Request.Body after writing to the | 
|  | // ResponseWriter. Cautious handlers should read the Request.Body | 
|  | // first, and then reply. | 
|  | // | 
|  | // Except for reading the body, handlers should not modify the | 
|  | // provided Request. | 
|  | // | 
|  | // If ServeHTTP panics, the server (the caller of ServeHTTP) assumes | 
|  | // that the effect of the panic was isolated to the active request. | 
|  | // It recovers the panic, logs a stack trace to the server error log, | 
|  | // and either closes the network connection or sends an HTTP/2 | 
|  | // RST_STREAM, depending on the HTTP protocol. To abort a handler so | 
|  | // the client sees an interrupted response but the server doesn't log | 
|  | // an error, panic with the value ErrAbortHandler. | 
|  | type Handler interface { | 
|  | ServeHTTP(ResponseWriter, *Request) | 
|  | } | 
|  |  | 
|  | // A ResponseWriter interface is used by an HTTP handler to | 
|  | // construct an HTTP response. | 
|  | // | 
|  | // A ResponseWriter may not be used after the Handler.ServeHTTP method | 
|  | // has returned. | 
|  | type ResponseWriter interface { | 
|  | // Header returns the header map that will be sent by | 
|  | // WriteHeader. The Header map also is the mechanism with which | 
|  | // Handlers can set HTTP trailers. | 
|  | // | 
|  | // Changing the header map after a call to WriteHeader (or | 
|  | // Write) has no effect unless the modified headers are | 
|  | // trailers. | 
|  | // | 
|  | // There are two ways to set Trailers. The preferred way is to | 
|  | // predeclare in the headers which trailers you will later | 
|  | // send by setting the "Trailer" header to the names of the | 
|  | // trailer keys which will come later. In this case, those | 
|  | // keys of the Header map are treated as if they were | 
|  | // trailers. See the example. The second way, for trailer | 
|  | // keys not known to the Handler until after the first Write, | 
|  | // is to prefix the Header map keys with the TrailerPrefix | 
|  | // constant value. See TrailerPrefix. | 
|  | // | 
|  | // To suppress automatic response headers (such as "Date"), set | 
|  | // their value to nil. | 
|  | Header() Header | 
|  |  | 
|  | // Write writes the data to the connection as part of an HTTP reply. | 
|  | // | 
|  | // If WriteHeader has not yet been called, Write calls | 
|  | // WriteHeader(http.StatusOK) before writing the data. If the Header | 
|  | // does not contain a Content-Type line, Write adds a Content-Type set | 
|  | // to the result of passing the initial 512 bytes of written data to | 
|  | // DetectContentType. Additionally, if the total size of all written | 
|  | // data is under a few KB and there are no Flush calls, the | 
|  | // Content-Length header is added automatically. | 
|  | // | 
|  | // Depending on the HTTP protocol version and the client, calling | 
|  | // Write or WriteHeader may prevent future reads on the | 
|  | // Request.Body. For HTTP/1.x requests, handlers should read any | 
|  | // needed request body data before writing the response. Once the | 
|  | // headers have been flushed (due to either an explicit Flusher.Flush | 
|  | // call or writing enough data to trigger a flush), the request body | 
|  | // may be unavailable. For HTTP/2 requests, the Go HTTP server permits | 
|  | // handlers to continue to read the request body while concurrently | 
|  | // writing the response. However, such behavior may not be supported | 
|  | // by all HTTP/2 clients. Handlers should read before writing if | 
|  | // possible to maximize compatibility. | 
|  | Write([]byte) (int, error) | 
|  |  | 
|  | // WriteHeader sends an HTTP response header with the provided | 
|  | // status code. | 
|  | // | 
|  | // If WriteHeader is not called explicitly, the first call to Write | 
|  | // will trigger an implicit WriteHeader(http.StatusOK). | 
|  | // Thus explicit calls to WriteHeader are mainly used to | 
|  | // send error codes. | 
|  | // | 
|  | // The provided code must be a valid HTTP 1xx-5xx status code. | 
|  | // Only one header may be written. Go does not currently | 
|  | // support sending user-defined 1xx informational headers, | 
|  | // with the exception of 100-continue response header that the | 
|  | // Server sends automatically when the Request.Body is read. | 
|  | WriteHeader(statusCode int) | 
|  | } | 
|  |  | 
|  | // The Flusher interface is implemented by ResponseWriters that allow | 
|  | // an HTTP handler to flush buffered data to the client. | 
|  | // | 
|  | // The default HTTP/1.x and HTTP/2 ResponseWriter implementations | 
|  | // support Flusher, but ResponseWriter wrappers may not. Handlers | 
|  | // should always test for this ability at runtime. | 
|  | // | 
|  | // Note that even for ResponseWriters that support Flush, | 
|  | // if the client is connected through an HTTP proxy, | 
|  | // the buffered data may not reach the client until the response | 
|  | // completes. | 
|  | type Flusher interface { | 
|  | // Flush sends any buffered data to the client. | 
|  | Flush() | 
|  | } | 
|  |  | 
|  | // The Hijacker interface is implemented by ResponseWriters that allow | 
|  | // an HTTP handler to take over the connection. | 
|  | // | 
|  | // The default ResponseWriter for HTTP/1.x connections supports | 
|  | // Hijacker, but HTTP/2 connections intentionally do not. | 
|  | // ResponseWriter wrappers may also not support Hijacker. Handlers | 
|  | // should always test for this ability at runtime. | 
|  | type Hijacker interface { | 
|  | // Hijack lets the caller take over the connection. | 
|  | // After a call to Hijack the HTTP server library | 
|  | // will not do anything else with the connection. | 
|  | // | 
|  | // It becomes the caller's responsibility to manage | 
|  | // and close the connection. | 
|  | // | 
|  | // The returned net.Conn may have read or write deadlines | 
|  | // already set, depending on the configuration of the | 
|  | // Server. It is the caller's responsibility to set | 
|  | // or clear those deadlines as needed. | 
|  | // | 
|  | // The returned bufio.Reader may contain unprocessed buffered | 
|  | // data from the client. | 
|  | // | 
|  | // After a call to Hijack, the original Request.Body must not | 
|  | // be used. The original Request's Context remains valid and | 
|  | // is not canceled until the Request's ServeHTTP method | 
|  | // returns. | 
|  | Hijack() (net.Conn, *bufio.ReadWriter, error) | 
|  | } | 
|  |  | 
|  | // The CloseNotifier interface is implemented by ResponseWriters which | 
|  | // allow detecting when the underlying connection has gone away. | 
|  | // | 
|  | // This mechanism can be used to cancel long operations on the server | 
|  | // if the client has disconnected before the response is ready. | 
|  | // | 
|  | // Deprecated: the CloseNotifier interface predates Go's context package. | 
|  | // New code should use Request.Context instead. | 
|  | type CloseNotifier interface { | 
|  | // CloseNotify returns a channel that receives at most a | 
|  | // single value (true) when the client connection has gone | 
|  | // away. | 
|  | // | 
|  | // CloseNotify may wait to notify until Request.Body has been | 
|  | // fully read. | 
|  | // | 
|  | // After the Handler has returned, there is no guarantee | 
|  | // that the channel receives a value. | 
|  | // | 
|  | // If the protocol is HTTP/1.1 and CloseNotify is called while | 
|  | // processing an idempotent request (such a GET) while | 
|  | // HTTP/1.1 pipelining is in use, the arrival of a subsequent | 
|  | // pipelined request may cause a value to be sent on the | 
|  | // returned channel. In practice HTTP/1.1 pipelining is not | 
|  | // enabled in browsers and not seen often in the wild. If this | 
|  | // is a problem, use HTTP/2 or only use CloseNotify on methods | 
|  | // such as POST. | 
|  | CloseNotify() <-chan bool | 
|  | } | 
|  |  | 
|  | var ( | 
|  | // ServerContextKey is a context key. It can be used in HTTP | 
|  | // handlers with Context.Value to access the server that | 
|  | // started the handler. The associated value will be of | 
|  | // type *Server. | 
|  | ServerContextKey = &contextKey{"http-server"} | 
|  |  | 
|  | // LocalAddrContextKey is a context key. It can be used in | 
|  | // HTTP handlers with Context.Value to access the local | 
|  | // address the connection arrived on. | 
|  | // The associated value will be of type net.Addr. | 
|  | LocalAddrContextKey = &contextKey{"local-addr"} | 
|  | ) | 
|  |  | 
|  | // A conn represents the server side of an HTTP connection. | 
|  | type conn struct { | 
|  | // server is the server on which the connection arrived. | 
|  | // Immutable; never nil. | 
|  | server *Server | 
|  |  | 
|  | // cancelCtx cancels the connection-level context. | 
|  | cancelCtx context.CancelFunc | 
|  |  | 
|  | // rwc is the underlying network connection. | 
|  | // This is never wrapped by other types and is the value given out | 
|  | // to CloseNotifier callers. It is usually of type *net.TCPConn or | 
|  | // *tls.Conn. | 
|  | rwc net.Conn | 
|  |  | 
|  | // remoteAddr is rwc.RemoteAddr().String(). It is not populated synchronously | 
|  | // inside the Listener's Accept goroutine, as some implementations block. | 
|  | // It is populated immediately inside the (*conn).serve goroutine. | 
|  | // This is the value of a Handler's (*Request).RemoteAddr. | 
|  | remoteAddr string | 
|  |  | 
|  | // tlsState is the TLS connection state when using TLS. | 
|  | // nil means not TLS. | 
|  | tlsState *tls.ConnectionState | 
|  |  | 
|  | // werr is set to the first write error to rwc. | 
|  | // It is set via checkConnErrorWriter{w}, where bufw writes. | 
|  | werr error | 
|  |  | 
|  | // r is bufr's read source. It's a wrapper around rwc that provides | 
|  | // io.LimitedReader-style limiting (while reading request headers) | 
|  | // and functionality to support CloseNotifier. See *connReader docs. | 
|  | r *connReader | 
|  |  | 
|  | // bufr reads from r. | 
|  | bufr *bufio.Reader | 
|  |  | 
|  | // bufw writes to checkConnErrorWriter{c}, which populates werr on error. | 
|  | bufw *bufio.Writer | 
|  |  | 
|  | // lastMethod is the method of the most recent request | 
|  | // on this connection, if any. | 
|  | lastMethod string | 
|  |  | 
|  | curReq atomic.Value // of *response (which has a Request in it) | 
|  |  | 
|  | curState struct{ atomic uint64 } // packed (unixtime<<8|uint8(ConnState)) | 
|  |  | 
|  | // mu guards hijackedv | 
|  | mu sync.Mutex | 
|  |  | 
|  | // hijackedv is whether this connection has been hijacked | 
|  | // by a Handler with the Hijacker interface. | 
|  | // It is guarded by mu. | 
|  | hijackedv bool | 
|  | } | 
|  |  | 
|  | func (c *conn) hijacked() bool { | 
|  | c.mu.Lock() | 
|  | defer c.mu.Unlock() | 
|  | return c.hijackedv | 
|  | } | 
|  |  | 
|  | // c.mu must be held. | 
|  | func (c *conn) hijackLocked() (rwc net.Conn, buf *bufio.ReadWriter, err error) { | 
|  | if c.hijackedv { | 
|  | return nil, nil, ErrHijacked | 
|  | } | 
|  | c.r.abortPendingRead() | 
|  |  | 
|  | c.hijackedv = true | 
|  | rwc = c.rwc | 
|  | rwc.SetDeadline(time.Time{}) | 
|  |  | 
|  | buf = bufio.NewReadWriter(c.bufr, bufio.NewWriter(rwc)) | 
|  | if c.r.hasByte { | 
|  | if _, err := c.bufr.Peek(c.bufr.Buffered() + 1); err != nil { | 
|  | return nil, nil, fmt.Errorf("unexpected Peek failure reading buffered byte: %v", err) | 
|  | } | 
|  | } | 
|  | c.setState(rwc, StateHijacked) | 
|  | return | 
|  | } | 
|  |  | 
|  | // This should be >= 512 bytes for DetectContentType, | 
|  | // but otherwise it's somewhat arbitrary. | 
|  | const bufferBeforeChunkingSize = 2048 | 
|  |  | 
|  | // chunkWriter writes to a response's conn buffer, and is the writer | 
|  | // wrapped by the response.bufw buffered writer. | 
|  | // | 
|  | // chunkWriter also is responsible for finalizing the Header, including | 
|  | // conditionally setting the Content-Type and setting a Content-Length | 
|  | // in cases where the handler's final output is smaller than the buffer | 
|  | // size. It also conditionally adds chunk headers, when in chunking mode. | 
|  | // | 
|  | // See the comment above (*response).Write for the entire write flow. | 
|  | type chunkWriter struct { | 
|  | res *response | 
|  |  | 
|  | // header is either nil or a deep clone of res.handlerHeader | 
|  | // at the time of res.writeHeader, if res.writeHeader is | 
|  | // called and extra buffering is being done to calculate | 
|  | // Content-Type and/or Content-Length. | 
|  | header Header | 
|  |  | 
|  | // wroteHeader tells whether the header's been written to "the | 
|  | // wire" (or rather: w.conn.buf). this is unlike | 
|  | // (*response).wroteHeader, which tells only whether it was | 
|  | // logically written. | 
|  | wroteHeader bool | 
|  |  | 
|  | // set by the writeHeader method: | 
|  | chunking bool // using chunked transfer encoding for reply body | 
|  | } | 
|  |  | 
|  | var ( | 
|  | crlf       = []byte("\r\n") | 
|  | colonSpace = []byte(": ") | 
|  | ) | 
|  |  | 
|  | func (cw *chunkWriter) Write(p []byte) (n int, err error) { | 
|  | if !cw.wroteHeader { | 
|  | cw.writeHeader(p) | 
|  | } | 
|  | if cw.res.req.Method == "HEAD" { | 
|  | // Eat writes. | 
|  | return len(p), nil | 
|  | } | 
|  | if cw.chunking { | 
|  | _, err = fmt.Fprintf(cw.res.conn.bufw, "%x\r\n", len(p)) | 
|  | if err != nil { | 
|  | cw.res.conn.rwc.Close() | 
|  | return | 
|  | } | 
|  | } | 
|  | n, err = cw.res.conn.bufw.Write(p) | 
|  | if cw.chunking && err == nil { | 
|  | _, err = cw.res.conn.bufw.Write(crlf) | 
|  | } | 
|  | if err != nil { | 
|  | cw.res.conn.rwc.Close() | 
|  | } | 
|  | return | 
|  | } | 
|  |  | 
|  | func (cw *chunkWriter) flush() { | 
|  | if !cw.wroteHeader { | 
|  | cw.writeHeader(nil) | 
|  | } | 
|  | cw.res.conn.bufw.Flush() | 
|  | } | 
|  |  | 
|  | func (cw *chunkWriter) close() { | 
|  | if !cw.wroteHeader { | 
|  | cw.writeHeader(nil) | 
|  | } | 
|  | if cw.chunking { | 
|  | bw := cw.res.conn.bufw // conn's bufio writer | 
|  | // zero chunk to mark EOF | 
|  | bw.WriteString("0\r\n") | 
|  | if trailers := cw.res.finalTrailers(); trailers != nil { | 
|  | trailers.Write(bw) // the writer handles noting errors | 
|  | } | 
|  | // final blank line after the trailers (whether | 
|  | // present or not) | 
|  | bw.WriteString("\r\n") | 
|  | } | 
|  | } | 
|  |  | 
|  | // A response represents the server side of an HTTP response. | 
|  | type response struct { | 
|  | conn             *conn | 
|  | req              *Request // request for this response | 
|  | reqBody          io.ReadCloser | 
|  | cancelCtx        context.CancelFunc // when ServeHTTP exits | 
|  | wroteHeader      bool               // reply header has been (logically) written | 
|  | wroteContinue    bool               // 100 Continue response was written | 
|  | wants10KeepAlive bool               // HTTP/1.0 w/ Connection "keep-alive" | 
|  | wantsClose       bool               // HTTP request has Connection "close" | 
|  |  | 
|  | w  *bufio.Writer // buffers output in chunks to chunkWriter | 
|  | cw chunkWriter | 
|  |  | 
|  | // handlerHeader is the Header that Handlers get access to, | 
|  | // which may be retained and mutated even after WriteHeader. | 
|  | // handlerHeader is copied into cw.header at WriteHeader | 
|  | // time, and privately mutated thereafter. | 
|  | handlerHeader Header | 
|  | calledHeader  bool // handler accessed handlerHeader via Header | 
|  |  | 
|  | written       int64 // number of bytes written in body | 
|  | contentLength int64 // explicitly-declared Content-Length; or -1 | 
|  | status        int   // status code passed to WriteHeader | 
|  |  | 
|  | // close connection after this reply.  set on request and | 
|  | // updated after response from handler if there's a | 
|  | // "Connection: keep-alive" response header and a | 
|  | // Content-Length. | 
|  | closeAfterReply bool | 
|  |  | 
|  | // requestBodyLimitHit is set by requestTooLarge when | 
|  | // maxBytesReader hits its max size. It is checked in | 
|  | // WriteHeader, to make sure we don't consume the | 
|  | // remaining request body to try to advance to the next HTTP | 
|  | // request. Instead, when this is set, we stop reading | 
|  | // subsequent requests on this connection and stop reading | 
|  | // input from it. | 
|  | requestBodyLimitHit bool | 
|  |  | 
|  | // trailers are the headers to be sent after the handler | 
|  | // finishes writing the body. This field is initialized from | 
|  | // the Trailer response header when the response header is | 
|  | // written. | 
|  | trailers []string | 
|  |  | 
|  | handlerDone atomicBool // set true when the handler exits | 
|  |  | 
|  | // Buffers for Date, Content-Length, and status code | 
|  | dateBuf   [len(TimeFormat)]byte | 
|  | clenBuf   [10]byte | 
|  | statusBuf [3]byte | 
|  |  | 
|  | // closeNotifyCh is the channel returned by CloseNotify. | 
|  | // TODO(bradfitz): this is currently (for Go 1.8) always | 
|  | // non-nil. Make this lazily-created again as it used to be? | 
|  | closeNotifyCh  chan bool | 
|  | didCloseNotify int32 // atomic (only 0->1 winner should send) | 
|  | } | 
|  |  | 
|  | // TrailerPrefix is a magic prefix for ResponseWriter.Header map keys | 
|  | // that, if present, signals that the map entry is actually for | 
|  | // the response trailers, and not the response headers. The prefix | 
|  | // is stripped after the ServeHTTP call finishes and the values are | 
|  | // sent in the trailers. | 
|  | // | 
|  | // This mechanism is intended only for trailers that are not known | 
|  | // prior to the headers being written. If the set of trailers is fixed | 
|  | // or known before the header is written, the normal Go trailers mechanism | 
|  | // is preferred: | 
|  | //    https://golang.org/pkg/net/http/#ResponseWriter | 
|  | //    https://golang.org/pkg/net/http/#example_ResponseWriter_trailers | 
|  | const TrailerPrefix = "Trailer:" | 
|  |  | 
|  | // finalTrailers is called after the Handler exits and returns a non-nil | 
|  | // value if the Handler set any trailers. | 
|  | func (w *response) finalTrailers() Header { | 
|  | var t Header | 
|  | for k, vv := range w.handlerHeader { | 
|  | if strings.HasPrefix(k, TrailerPrefix) { | 
|  | if t == nil { | 
|  | t = make(Header) | 
|  | } | 
|  | t[strings.TrimPrefix(k, TrailerPrefix)] = vv | 
|  | } | 
|  | } | 
|  | for _, k := range w.trailers { | 
|  | if t == nil { | 
|  | t = make(Header) | 
|  | } | 
|  | for _, v := range w.handlerHeader[k] { | 
|  | t.Add(k, v) | 
|  | } | 
|  | } | 
|  | return t | 
|  | } | 
|  |  | 
|  | type atomicBool int32 | 
|  |  | 
|  | func (b *atomicBool) isSet() bool { return atomic.LoadInt32((*int32)(b)) != 0 } | 
|  | func (b *atomicBool) setTrue()    { atomic.StoreInt32((*int32)(b), 1) } | 
|  |  | 
|  | // declareTrailer is called for each Trailer header when the | 
|  | // response header is written. It notes that a header will need to be | 
|  | // written in the trailers at the end of the response. | 
|  | func (w *response) declareTrailer(k string) { | 
|  | k = CanonicalHeaderKey(k) | 
|  | if !httpguts.ValidTrailerHeader(k) { | 
|  | // Forbidden by RFC 7230, section 4.1.2 | 
|  | return | 
|  | } | 
|  | w.trailers = append(w.trailers, k) | 
|  | } | 
|  |  | 
|  | // requestTooLarge is called by maxBytesReader when too much input has | 
|  | // been read from the client. | 
|  | func (w *response) requestTooLarge() { | 
|  | w.closeAfterReply = true | 
|  | w.requestBodyLimitHit = true | 
|  | if !w.wroteHeader { | 
|  | w.Header().Set("Connection", "close") | 
|  | } | 
|  | } | 
|  |  | 
|  | // needsSniff reports whether a Content-Type still needs to be sniffed. | 
|  | func (w *response) needsSniff() bool { | 
|  | _, haveType := w.handlerHeader["Content-Type"] | 
|  | return !w.cw.wroteHeader && !haveType && w.written < sniffLen | 
|  | } | 
|  |  | 
|  | // writerOnly hides an io.Writer value's optional ReadFrom method | 
|  | // from io.Copy. | 
|  | type writerOnly struct { | 
|  | io.Writer | 
|  | } | 
|  |  | 
|  | func srcIsRegularFile(src io.Reader) (isRegular bool, err error) { | 
|  | switch v := src.(type) { | 
|  | case *os.File: | 
|  | fi, err := v.Stat() | 
|  | if err != nil { | 
|  | return false, err | 
|  | } | 
|  | return fi.Mode().IsRegular(), nil | 
|  | case *io.LimitedReader: | 
|  | return srcIsRegularFile(v.R) | 
|  | default: | 
|  | return | 
|  | } | 
|  | } | 
|  |  | 
|  | // ReadFrom is here to optimize copying from an *os.File regular file | 
|  | // to a *net.TCPConn with sendfile. | 
|  | func (w *response) ReadFrom(src io.Reader) (n int64, err error) { | 
|  | // Our underlying w.conn.rwc is usually a *TCPConn (with its | 
|  | // own ReadFrom method). If not, or if our src isn't a regular | 
|  | // file, just fall back to the normal copy method. | 
|  | rf, ok := w.conn.rwc.(io.ReaderFrom) | 
|  | regFile, err := srcIsRegularFile(src) | 
|  | if err != nil { | 
|  | return 0, err | 
|  | } | 
|  | if !ok || !regFile { | 
|  | bufp := copyBufPool.Get().(*[]byte) | 
|  | defer copyBufPool.Put(bufp) | 
|  | return io.CopyBuffer(writerOnly{w}, src, *bufp) | 
|  | } | 
|  |  | 
|  | // sendfile path: | 
|  |  | 
|  | if !w.wroteHeader { | 
|  | w.WriteHeader(StatusOK) | 
|  | } | 
|  |  | 
|  | if w.needsSniff() { | 
|  | n0, err := io.Copy(writerOnly{w}, io.LimitReader(src, sniffLen)) | 
|  | n += n0 | 
|  | if err != nil { | 
|  | return n, err | 
|  | } | 
|  | } | 
|  |  | 
|  | w.w.Flush()  // get rid of any previous writes | 
|  | w.cw.flush() // make sure Header is written; flush data to rwc | 
|  |  | 
|  | // Now that cw has been flushed, its chunking field is guaranteed initialized. | 
|  | if !w.cw.chunking && w.bodyAllowed() { | 
|  | n0, err := rf.ReadFrom(src) | 
|  | n += n0 | 
|  | w.written += n0 | 
|  | return n, err | 
|  | } | 
|  |  | 
|  | n0, err := io.Copy(writerOnly{w}, src) | 
|  | n += n0 | 
|  | return n, err | 
|  | } | 
|  |  | 
|  | // debugServerConnections controls whether all server connections are wrapped | 
|  | // with a verbose logging wrapper. | 
|  | const debugServerConnections = false | 
|  |  | 
|  | // Create new connection from rwc. | 
|  | func (srv *Server) newConn(rwc net.Conn) *conn { | 
|  | c := &conn{ | 
|  | server: srv, | 
|  | rwc:    rwc, | 
|  | } | 
|  | if debugServerConnections { | 
|  | c.rwc = newLoggingConn("server", c.rwc) | 
|  | } | 
|  | return c | 
|  | } | 
|  |  | 
|  | type readResult struct { | 
|  | n   int | 
|  | err error | 
|  | b   byte // byte read, if n == 1 | 
|  | } | 
|  |  | 
|  | // connReader is the io.Reader wrapper used by *conn. It combines a | 
|  | // selectively-activated io.LimitedReader (to bound request header | 
|  | // read sizes) with support for selectively keeping an io.Reader.Read | 
|  | // call blocked in a background goroutine to wait for activity and | 
|  | // trigger a CloseNotifier channel. | 
|  | type connReader struct { | 
|  | conn *conn | 
|  |  | 
|  | mu      sync.Mutex // guards following | 
|  | hasByte bool | 
|  | byteBuf [1]byte | 
|  | cond    *sync.Cond | 
|  | inRead  bool | 
|  | aborted bool  // set true before conn.rwc deadline is set to past | 
|  | remain  int64 // bytes remaining | 
|  | } | 
|  |  | 
|  | func (cr *connReader) lock() { | 
|  | cr.mu.Lock() | 
|  | if cr.cond == nil { | 
|  | cr.cond = sync.NewCond(&cr.mu) | 
|  | } | 
|  | } | 
|  |  | 
|  | func (cr *connReader) unlock() { cr.mu.Unlock() } | 
|  |  | 
|  | func (cr *connReader) startBackgroundRead() { | 
|  | cr.lock() | 
|  | defer cr.unlock() | 
|  | if cr.inRead { | 
|  | panic("invalid concurrent Body.Read call") | 
|  | } | 
|  | if cr.hasByte { | 
|  | return | 
|  | } | 
|  | cr.inRead = true | 
|  | cr.conn.rwc.SetReadDeadline(time.Time{}) | 
|  | go cr.backgroundRead() | 
|  | } | 
|  |  | 
|  | func (cr *connReader) backgroundRead() { | 
|  | n, err := cr.conn.rwc.Read(cr.byteBuf[:]) | 
|  | cr.lock() | 
|  | if n == 1 { | 
|  | cr.hasByte = true | 
|  | // We were past the end of the previous request's body already | 
|  | // (since we wouldn't be in a background read otherwise), so | 
|  | // this is a pipelined HTTP request. Prior to Go 1.11 we used to | 
|  | // send on the CloseNotify channel and cancel the context here, | 
|  | // but the behavior was documented as only "may", and we only | 
|  | // did that because that's how CloseNotify accidentally behaved | 
|  | // in very early Go releases prior to context support. Once we | 
|  | // added context support, people used a Handler's | 
|  | // Request.Context() and passed it along. Having that context | 
|  | // cancel on pipelined HTTP requests caused problems. | 
|  | // Fortunately, almost nothing uses HTTP/1.x pipelining. | 
|  | // Unfortunately, apt-get does, or sometimes does. | 
|  | // New Go 1.11 behavior: don't fire CloseNotify or cancel | 
|  | // contexts on pipelined requests. Shouldn't affect people, but | 
|  | // fixes cases like Issue 23921. This does mean that a client | 
|  | // closing their TCP connection after sending a pipelined | 
|  | // request won't cancel the context, but we'll catch that on any | 
|  | // write failure (in checkConnErrorWriter.Write). | 
|  | // If the server never writes, yes, there are still contrived | 
|  | // server & client behaviors where this fails to ever cancel the | 
|  | // context, but that's kinda why HTTP/1.x pipelining died | 
|  | // anyway. | 
|  | } | 
|  | if ne, ok := err.(net.Error); ok && cr.aborted && ne.Timeout() { | 
|  | // Ignore this error. It's the expected error from | 
|  | // another goroutine calling abortPendingRead. | 
|  | } else if err != nil { | 
|  | cr.handleReadError(err) | 
|  | } | 
|  | cr.aborted = false | 
|  | cr.inRead = false | 
|  | cr.unlock() | 
|  | cr.cond.Broadcast() | 
|  | } | 
|  |  | 
|  | func (cr *connReader) abortPendingRead() { | 
|  | cr.lock() | 
|  | defer cr.unlock() | 
|  | if !cr.inRead { | 
|  | return | 
|  | } | 
|  | cr.aborted = true | 
|  | cr.conn.rwc.SetReadDeadline(aLongTimeAgo) | 
|  | for cr.inRead { | 
|  | cr.cond.Wait() | 
|  | } | 
|  | cr.conn.rwc.SetReadDeadline(time.Time{}) | 
|  | } | 
|  |  | 
|  | func (cr *connReader) setReadLimit(remain int64) { cr.remain = remain } | 
|  | func (cr *connReader) setInfiniteReadLimit()     { cr.remain = maxInt64 } | 
|  | func (cr *connReader) hitReadLimit() bool        { return cr.remain <= 0 } | 
|  |  | 
|  | // handleReadError is called whenever a Read from the client returns a | 
|  | // non-nil error. | 
|  | // | 
|  | // The provided non-nil err is almost always io.EOF or a "use of | 
|  | // closed network connection". In any case, the error is not | 
|  | // particularly interesting, except perhaps for debugging during | 
|  | // development. Any error means the connection is dead and we should | 
|  | // down its context. | 
|  | // | 
|  | // It may be called from multiple goroutines. | 
|  | func (cr *connReader) handleReadError(_ error) { | 
|  | cr.conn.cancelCtx() | 
|  | cr.closeNotify() | 
|  | } | 
|  |  | 
|  | // may be called from multiple goroutines. | 
|  | func (cr *connReader) closeNotify() { | 
|  | res, _ := cr.conn.curReq.Load().(*response) | 
|  | if res != nil && atomic.CompareAndSwapInt32(&res.didCloseNotify, 0, 1) { | 
|  | res.closeNotifyCh <- true | 
|  | } | 
|  | } | 
|  |  | 
|  | func (cr *connReader) Read(p []byte) (n int, err error) { | 
|  | cr.lock() | 
|  | if cr.inRead { | 
|  | cr.unlock() | 
|  | if cr.conn.hijacked() { | 
|  | panic("invalid Body.Read call. After hijacked, the original Request must not be used") | 
|  | } | 
|  | panic("invalid concurrent Body.Read call") | 
|  | } | 
|  | if cr.hitReadLimit() { | 
|  | cr.unlock() | 
|  | return 0, io.EOF | 
|  | } | 
|  | if len(p) == 0 { | 
|  | cr.unlock() | 
|  | return 0, nil | 
|  | } | 
|  | if int64(len(p)) > cr.remain { | 
|  | p = p[:cr.remain] | 
|  | } | 
|  | if cr.hasByte { | 
|  | p[0] = cr.byteBuf[0] | 
|  | cr.hasByte = false | 
|  | cr.unlock() | 
|  | return 1, nil | 
|  | } | 
|  | cr.inRead = true | 
|  | cr.unlock() | 
|  | n, err = cr.conn.rwc.Read(p) | 
|  |  | 
|  | cr.lock() | 
|  | cr.inRead = false | 
|  | if err != nil { | 
|  | cr.handleReadError(err) | 
|  | } | 
|  | cr.remain -= int64(n) | 
|  | cr.unlock() | 
|  |  | 
|  | cr.cond.Broadcast() | 
|  | return n, err | 
|  | } | 
|  |  | 
|  | var ( | 
|  | bufioReaderPool   sync.Pool | 
|  | bufioWriter2kPool sync.Pool | 
|  | bufioWriter4kPool sync.Pool | 
|  | ) | 
|  |  | 
|  | var copyBufPool = sync.Pool{ | 
|  | New: func() interface{} { | 
|  | b := make([]byte, 32*1024) | 
|  | return &b | 
|  | }, | 
|  | } | 
|  |  | 
|  | func bufioWriterPool(size int) *sync.Pool { | 
|  | switch size { | 
|  | case 2 << 10: | 
|  | return &bufioWriter2kPool | 
|  | case 4 << 10: | 
|  | return &bufioWriter4kPool | 
|  | } | 
|  | return nil | 
|  | } | 
|  |  | 
|  | func newBufioReader(r io.Reader) *bufio.Reader { | 
|  | if v := bufioReaderPool.Get(); v != nil { | 
|  | br := v.(*bufio.Reader) | 
|  | br.Reset(r) | 
|  | return br | 
|  | } | 
|  | // Note: if this reader size is ever changed, update | 
|  | // TestHandlerBodyClose's assumptions. | 
|  | return bufio.NewReader(r) | 
|  | } | 
|  |  | 
|  | func putBufioReader(br *bufio.Reader) { | 
|  | br.Reset(nil) | 
|  | bufioReaderPool.Put(br) | 
|  | } | 
|  |  | 
|  | func newBufioWriterSize(w io.Writer, size int) *bufio.Writer { | 
|  | pool := bufioWriterPool(size) | 
|  | if pool != nil { | 
|  | if v := pool.Get(); v != nil { | 
|  | bw := v.(*bufio.Writer) | 
|  | bw.Reset(w) | 
|  | return bw | 
|  | } | 
|  | } | 
|  | return bufio.NewWriterSize(w, size) | 
|  | } | 
|  |  | 
|  | func putBufioWriter(bw *bufio.Writer) { | 
|  | bw.Reset(nil) | 
|  | if pool := bufioWriterPool(bw.Available()); pool != nil { | 
|  | pool.Put(bw) | 
|  | } | 
|  | } | 
|  |  | 
|  | // DefaultMaxHeaderBytes is the maximum permitted size of the headers | 
|  | // in an HTTP request. | 
|  | // This can be overridden by setting Server.MaxHeaderBytes. | 
|  | const DefaultMaxHeaderBytes = 1 << 20 // 1 MB | 
|  |  | 
|  | func (srv *Server) maxHeaderBytes() int { | 
|  | if srv.MaxHeaderBytes > 0 { | 
|  | return srv.MaxHeaderBytes | 
|  | } | 
|  | return DefaultMaxHeaderBytes | 
|  | } | 
|  |  | 
|  | func (srv *Server) initialReadLimitSize() int64 { | 
|  | return int64(srv.maxHeaderBytes()) + 4096 // bufio slop | 
|  | } | 
|  |  | 
|  | // wrapper around io.ReadCloser which on first read, sends an | 
|  | // HTTP/1.1 100 Continue header | 
|  | type expectContinueReader struct { | 
|  | resp       *response | 
|  | readCloser io.ReadCloser | 
|  | closed     bool | 
|  | sawEOF     bool | 
|  | } | 
|  |  | 
|  | func (ecr *expectContinueReader) Read(p []byte) (n int, err error) { | 
|  | if ecr.closed { | 
|  | return 0, ErrBodyReadAfterClose | 
|  | } | 
|  | if !ecr.resp.wroteContinue && !ecr.resp.conn.hijacked() { | 
|  | ecr.resp.wroteContinue = true | 
|  | ecr.resp.conn.bufw.WriteString("HTTP/1.1 100 Continue\r\n\r\n") | 
|  | ecr.resp.conn.bufw.Flush() | 
|  | } | 
|  | n, err = ecr.readCloser.Read(p) | 
|  | if err == io.EOF { | 
|  | ecr.sawEOF = true | 
|  | } | 
|  | return | 
|  | } | 
|  |  | 
|  | func (ecr *expectContinueReader) Close() error { | 
|  | ecr.closed = true | 
|  | return ecr.readCloser.Close() | 
|  | } | 
|  |  | 
|  | // TimeFormat is the time format to use when generating times in HTTP | 
|  | // headers. It is like time.RFC1123 but hard-codes GMT as the time | 
|  | // zone. The time being formatted must be in UTC for Format to | 
|  | // generate the correct format. | 
|  | // | 
|  | // For parsing this time format, see ParseTime. | 
|  | const TimeFormat = "Mon, 02 Jan 2006 15:04:05 GMT" | 
|  |  | 
|  | // appendTime is a non-allocating version of []byte(t.UTC().Format(TimeFormat)) | 
|  | func appendTime(b []byte, t time.Time) []byte { | 
|  | const days = "SunMonTueWedThuFriSat" | 
|  | const months = "JanFebMarAprMayJunJulAugSepOctNovDec" | 
|  |  | 
|  | t = t.UTC() | 
|  | yy, mm, dd := t.Date() | 
|  | hh, mn, ss := t.Clock() | 
|  | day := days[3*t.Weekday():] | 
|  | mon := months[3*(mm-1):] | 
|  |  | 
|  | return append(b, | 
|  | day[0], day[1], day[2], ',', ' ', | 
|  | byte('0'+dd/10), byte('0'+dd%10), ' ', | 
|  | mon[0], mon[1], mon[2], ' ', | 
|  | byte('0'+yy/1000), byte('0'+(yy/100)%10), byte('0'+(yy/10)%10), byte('0'+yy%10), ' ', | 
|  | byte('0'+hh/10), byte('0'+hh%10), ':', | 
|  | byte('0'+mn/10), byte('0'+mn%10), ':', | 
|  | byte('0'+ss/10), byte('0'+ss%10), ' ', | 
|  | 'G', 'M', 'T') | 
|  | } | 
|  |  | 
|  | var errTooLarge = errors.New("http: request too large") | 
|  |  | 
|  | // Read next request from connection. | 
|  | func (c *conn) readRequest(ctx context.Context) (w *response, err error) { | 
|  | if c.hijacked() { | 
|  | return nil, ErrHijacked | 
|  | } | 
|  |  | 
|  | var ( | 
|  | wholeReqDeadline time.Time // or zero if none | 
|  | hdrDeadline      time.Time // or zero if none | 
|  | ) | 
|  | t0 := time.Now() | 
|  | if d := c.server.readHeaderTimeout(); d != 0 { | 
|  | hdrDeadline = t0.Add(d) | 
|  | } | 
|  | if d := c.server.ReadTimeout; d != 0 { | 
|  | wholeReqDeadline = t0.Add(d) | 
|  | } | 
|  | c.rwc.SetReadDeadline(hdrDeadline) | 
|  | if d := c.server.WriteTimeout; d != 0 { | 
|  | defer func() { | 
|  | c.rwc.SetWriteDeadline(time.Now().Add(d)) | 
|  | }() | 
|  | } | 
|  |  | 
|  | c.r.setReadLimit(c.server.initialReadLimitSize()) | 
|  | if c.lastMethod == "POST" { | 
|  | // RFC 7230 section 3 tolerance for old buggy clients. | 
|  | peek, _ := c.bufr.Peek(4) // ReadRequest will get err below | 
|  | c.bufr.Discard(numLeadingCRorLF(peek)) | 
|  | } | 
|  | req, err := readRequest(c.bufr, keepHostHeader) | 
|  | if err != nil { | 
|  | if c.r.hitReadLimit() { | 
|  | return nil, errTooLarge | 
|  | } | 
|  | return nil, err | 
|  | } | 
|  |  | 
|  | if !http1ServerSupportsRequest(req) { | 
|  | return nil, badRequestError("unsupported protocol version") | 
|  | } | 
|  |  | 
|  | c.lastMethod = req.Method | 
|  | c.r.setInfiniteReadLimit() | 
|  |  | 
|  | hosts, haveHost := req.Header["Host"] | 
|  | isH2Upgrade := req.isH2Upgrade() | 
|  | if req.ProtoAtLeast(1, 1) && (!haveHost || len(hosts) == 0) && !isH2Upgrade && req.Method != "CONNECT" { | 
|  | return nil, badRequestError("missing required Host header") | 
|  | } | 
|  | if len(hosts) > 1 { | 
|  | return nil, badRequestError("too many Host headers") | 
|  | } | 
|  | if len(hosts) == 1 && !httpguts.ValidHostHeader(hosts[0]) { | 
|  | return nil, badRequestError("malformed Host header") | 
|  | } | 
|  | for k, vv := range req.Header { | 
|  | if !httpguts.ValidHeaderFieldName(k) { | 
|  | return nil, badRequestError("invalid header name") | 
|  | } | 
|  | for _, v := range vv { | 
|  | if !httpguts.ValidHeaderFieldValue(v) { | 
|  | return nil, badRequestError("invalid header value") | 
|  | } | 
|  | } | 
|  | } | 
|  | delete(req.Header, "Host") | 
|  |  | 
|  | ctx, cancelCtx := context.WithCancel(ctx) | 
|  | req.ctx = ctx | 
|  | req.RemoteAddr = c.remoteAddr | 
|  | req.TLS = c.tlsState | 
|  | if body, ok := req.Body.(*body); ok { | 
|  | body.doEarlyClose = true | 
|  | } | 
|  |  | 
|  | // Adjust the read deadline if necessary. | 
|  | if !hdrDeadline.Equal(wholeReqDeadline) { | 
|  | c.rwc.SetReadDeadline(wholeReqDeadline) | 
|  | } | 
|  |  | 
|  | w = &response{ | 
|  | conn:          c, | 
|  | cancelCtx:     cancelCtx, | 
|  | req:           req, | 
|  | reqBody:       req.Body, | 
|  | handlerHeader: make(Header), | 
|  | contentLength: -1, | 
|  | closeNotifyCh: make(chan bool, 1), | 
|  |  | 
|  | // We populate these ahead of time so we're not | 
|  | // reading from req.Header after their Handler starts | 
|  | // and maybe mutates it (Issue 14940) | 
|  | wants10KeepAlive: req.wantsHttp10KeepAlive(), | 
|  | wantsClose:       req.wantsClose(), | 
|  | } | 
|  | if isH2Upgrade { | 
|  | w.closeAfterReply = true | 
|  | } | 
|  | w.cw.res = w | 
|  | w.w = newBufioWriterSize(&w.cw, bufferBeforeChunkingSize) | 
|  | return w, nil | 
|  | } | 
|  |  | 
|  | // http1ServerSupportsRequest reports whether Go's HTTP/1.x server | 
|  | // supports the given request. | 
|  | func http1ServerSupportsRequest(req *Request) bool { | 
|  | if req.ProtoMajor == 1 { | 
|  | return true | 
|  | } | 
|  | // Accept "PRI * HTTP/2.0" upgrade requests, so Handlers can | 
|  | // wire up their own HTTP/2 upgrades. | 
|  | if req.ProtoMajor == 2 && req.ProtoMinor == 0 && | 
|  | req.Method == "PRI" && req.RequestURI == "*" { | 
|  | return true | 
|  | } | 
|  | // Reject HTTP/0.x, and all other HTTP/2+ requests (which | 
|  | // aren't encoded in ASCII anyway). | 
|  | return false | 
|  | } | 
|  |  | 
|  | func (w *response) Header() Header { | 
|  | if w.cw.header == nil && w.wroteHeader && !w.cw.wroteHeader { | 
|  | // Accessing the header between logically writing it | 
|  | // and physically writing it means we need to allocate | 
|  | // a clone to snapshot the logically written state. | 
|  | w.cw.header = w.handlerHeader.Clone() | 
|  | } | 
|  | w.calledHeader = true | 
|  | return w.handlerHeader | 
|  | } | 
|  |  | 
|  | // maxPostHandlerReadBytes is the max number of Request.Body bytes not | 
|  | // consumed by a handler that the server will read from the client | 
|  | // in order to keep a connection alive. If there are more bytes than | 
|  | // this then the server to be paranoid instead sends a "Connection: | 
|  | // close" response. | 
|  | // | 
|  | // This number is approximately what a typical machine's TCP buffer | 
|  | // size is anyway.  (if we have the bytes on the machine, we might as | 
|  | // well read them) | 
|  | const maxPostHandlerReadBytes = 256 << 10 | 
|  |  | 
|  | func checkWriteHeaderCode(code int) { | 
|  | // Issue 22880: require valid WriteHeader status codes. | 
|  | // For now we only enforce that it's three digits. | 
|  | // In the future we might block things over 599 (600 and above aren't defined | 
|  | // at https://httpwg.org/specs/rfc7231.html#status.codes) | 
|  | // and we might block under 200 (once we have more mature 1xx support). | 
|  | // But for now any three digits. | 
|  | // | 
|  | // We used to send "HTTP/1.1 000 0" on the wire in responses but there's | 
|  | // no equivalent bogus thing we can realistically send in HTTP/2, | 
|  | // so we'll consistently panic instead and help people find their bugs | 
|  | // early. (We can't return an error from WriteHeader even if we wanted to.) | 
|  | if code < 100 || code > 999 { | 
|  | panic(fmt.Sprintf("invalid WriteHeader code %v", code)) | 
|  | } | 
|  | } | 
|  |  | 
|  | // relevantCaller searches the call stack for the first function outside of net/http. | 
|  | // The purpose of this function is to provide more helpful error messages. | 
|  | func relevantCaller() runtime.Frame { | 
|  | pc := make([]uintptr, 16) | 
|  | n := runtime.Callers(1, pc) | 
|  | frames := runtime.CallersFrames(pc[:n]) | 
|  | prefix1 := "net/http." | 
|  | prefix2 := "net/http." | 
|  | if runtime.Compiler == "gccgo" { | 
|  | prefix2 = "http." | 
|  | } | 
|  | var frame runtime.Frame | 
|  | for { | 
|  | frame, more := frames.Next() | 
|  | if !strings.HasPrefix(frame.Function, prefix1) && !strings.HasPrefix(frame.Function, prefix2) { | 
|  | return frame | 
|  | } | 
|  | if !more { | 
|  | break | 
|  | } | 
|  | } | 
|  | return frame | 
|  | } | 
|  |  | 
|  | func (w *response) WriteHeader(code int) { | 
|  | if w.conn.hijacked() { | 
|  | caller := relevantCaller() | 
|  | w.conn.server.logf("http: response.WriteHeader on hijacked connection from %s (%s:%d)", caller.Function, path.Base(caller.File), caller.Line) | 
|  | return | 
|  | } | 
|  | if w.wroteHeader { | 
|  | caller := relevantCaller() | 
|  | w.conn.server.logf("http: superfluous response.WriteHeader call from %s (%s:%d)", caller.Function, path.Base(caller.File), caller.Line) | 
|  | return | 
|  | } | 
|  | checkWriteHeaderCode(code) | 
|  | w.wroteHeader = true | 
|  | w.status = code | 
|  |  | 
|  | if w.calledHeader && w.cw.header == nil { | 
|  | w.cw.header = w.handlerHeader.Clone() | 
|  | } | 
|  |  | 
|  | if cl := w.handlerHeader.get("Content-Length"); cl != "" { | 
|  | v, err := strconv.ParseInt(cl, 10, 64) | 
|  | if err == nil && v >= 0 { | 
|  | w.contentLength = v | 
|  | } else { | 
|  | w.conn.server.logf("http: invalid Content-Length of %q", cl) | 
|  | w.handlerHeader.Del("Content-Length") | 
|  | } | 
|  | } | 
|  | } | 
|  |  | 
|  | // extraHeader is the set of headers sometimes added by chunkWriter.writeHeader. | 
|  | // This type is used to avoid extra allocations from cloning and/or populating | 
|  | // the response Header map and all its 1-element slices. | 
|  | type extraHeader struct { | 
|  | contentType      string | 
|  | connection       string | 
|  | transferEncoding string | 
|  | date             []byte // written if not nil | 
|  | contentLength    []byte // written if not nil | 
|  | } | 
|  |  | 
|  | // Sorted the same as extraHeader.Write's loop. | 
|  | var extraHeaderKeys = [][]byte{ | 
|  | []byte("Content-Type"), | 
|  | []byte("Connection"), | 
|  | []byte("Transfer-Encoding"), | 
|  | } | 
|  |  | 
|  | var ( | 
|  | headerContentLength = []byte("Content-Length: ") | 
|  | headerDate          = []byte("Date: ") | 
|  | ) | 
|  |  | 
|  | // Write writes the headers described in h to w. | 
|  | // | 
|  | // This method has a value receiver, despite the somewhat large size | 
|  | // of h, because it prevents an allocation. The escape analysis isn't | 
|  | // smart enough to realize this function doesn't mutate h. | 
|  | func (h extraHeader) Write(w *bufio.Writer) { | 
|  | if h.date != nil { | 
|  | w.Write(headerDate) | 
|  | w.Write(h.date) | 
|  | w.Write(crlf) | 
|  | } | 
|  | if h.contentLength != nil { | 
|  | w.Write(headerContentLength) | 
|  | w.Write(h.contentLength) | 
|  | w.Write(crlf) | 
|  | } | 
|  | for i, v := range []string{h.contentType, h.connection, h.transferEncoding} { | 
|  | if v != "" { | 
|  | w.Write(extraHeaderKeys[i]) | 
|  | w.Write(colonSpace) | 
|  | w.WriteString(v) | 
|  | w.Write(crlf) | 
|  | } | 
|  | } | 
|  | } | 
|  |  | 
|  | // writeHeader finalizes the header sent to the client and writes it | 
|  | // to cw.res.conn.bufw. | 
|  | // | 
|  | // p is not written by writeHeader, but is the first chunk of the body | 
|  | // that will be written. It is sniffed for a Content-Type if none is | 
|  | // set explicitly. It's also used to set the Content-Length, if the | 
|  | // total body size was small and the handler has already finished | 
|  | // running. | 
|  | func (cw *chunkWriter) writeHeader(p []byte) { | 
|  | if cw.wroteHeader { | 
|  | return | 
|  | } | 
|  | cw.wroteHeader = true | 
|  |  | 
|  | w := cw.res | 
|  | keepAlivesEnabled := w.conn.server.doKeepAlives() | 
|  | isHEAD := w.req.Method == "HEAD" | 
|  |  | 
|  | // header is written out to w.conn.buf below. Depending on the | 
|  | // state of the handler, we either own the map or not. If we | 
|  | // don't own it, the exclude map is created lazily for | 
|  | // WriteSubset to remove headers. The setHeader struct holds | 
|  | // headers we need to add. | 
|  | header := cw.header | 
|  | owned := header != nil | 
|  | if !owned { | 
|  | header = w.handlerHeader | 
|  | } | 
|  | var excludeHeader map[string]bool | 
|  | delHeader := func(key string) { | 
|  | if owned { | 
|  | header.Del(key) | 
|  | return | 
|  | } | 
|  | if _, ok := header[key]; !ok { | 
|  | return | 
|  | } | 
|  | if excludeHeader == nil { | 
|  | excludeHeader = make(map[string]bool) | 
|  | } | 
|  | excludeHeader[key] = true | 
|  | } | 
|  | var setHeader extraHeader | 
|  |  | 
|  | // Don't write out the fake "Trailer:foo" keys. See TrailerPrefix. | 
|  | trailers := false | 
|  | for k := range cw.header { | 
|  | if strings.HasPrefix(k, TrailerPrefix) { | 
|  | if excludeHeader == nil { | 
|  | excludeHeader = make(map[string]bool) | 
|  | } | 
|  | excludeHeader[k] = true | 
|  | trailers = true | 
|  | } | 
|  | } | 
|  | for _, v := range cw.header["Trailer"] { | 
|  | trailers = true | 
|  | foreachHeaderElement(v, cw.res.declareTrailer) | 
|  | } | 
|  |  | 
|  | te := header.get("Transfer-Encoding") | 
|  | hasTE := te != "" | 
|  |  | 
|  | // If the handler is done but never sent a Content-Length | 
|  | // response header and this is our first (and last) write, set | 
|  | // it, even to zero. This helps HTTP/1.0 clients keep their | 
|  | // "keep-alive" connections alive. | 
|  | // Exceptions: 304/204/1xx responses never get Content-Length, and if | 
|  | // it was a HEAD request, we don't know the difference between | 
|  | // 0 actual bytes and 0 bytes because the handler noticed it | 
|  | // was a HEAD request and chose not to write anything. So for | 
|  | // HEAD, the handler should either write the Content-Length or | 
|  | // write non-zero bytes. If it's actually 0 bytes and the | 
|  | // handler never looked at the Request.Method, we just don't | 
|  | // send a Content-Length header. | 
|  | // Further, we don't send an automatic Content-Length if they | 
|  | // set a Transfer-Encoding, because they're generally incompatible. | 
|  | if w.handlerDone.isSet() && !trailers && !hasTE && bodyAllowedForStatus(w.status) && header.get("Content-Length") == "" && (!isHEAD || len(p) > 0) { | 
|  | w.contentLength = int64(len(p)) | 
|  | setHeader.contentLength = strconv.AppendInt(cw.res.clenBuf[:0], int64(len(p)), 10) | 
|  | } | 
|  |  | 
|  | // If this was an HTTP/1.0 request with keep-alive and we sent a | 
|  | // Content-Length back, we can make this a keep-alive response ... | 
|  | if w.wants10KeepAlive && keepAlivesEnabled { | 
|  | sentLength := header.get("Content-Length") != "" | 
|  | if sentLength && header.get("Connection") == "keep-alive" { | 
|  | w.closeAfterReply = false | 
|  | } | 
|  | } | 
|  |  | 
|  | // Check for an explicit (and valid) Content-Length header. | 
|  | hasCL := w.contentLength != -1 | 
|  |  | 
|  | if w.wants10KeepAlive && (isHEAD || hasCL || !bodyAllowedForStatus(w.status)) { | 
|  | _, connectionHeaderSet := header["Connection"] | 
|  | if !connectionHeaderSet { | 
|  | setHeader.connection = "keep-alive" | 
|  | } | 
|  | } else if !w.req.ProtoAtLeast(1, 1) || w.wantsClose { | 
|  | w.closeAfterReply = true | 
|  | } | 
|  |  | 
|  | if header.get("Connection") == "close" || !keepAlivesEnabled { | 
|  | w.closeAfterReply = true | 
|  | } | 
|  |  | 
|  | // If the client wanted a 100-continue but we never sent it to | 
|  | // them (or, more strictly: we never finished reading their | 
|  | // request body), don't reuse this connection because it's now | 
|  | // in an unknown state: we might be sending this response at | 
|  | // the same time the client is now sending its request body | 
|  | // after a timeout.  (Some HTTP clients send Expect: | 
|  | // 100-continue but knowing that some servers don't support | 
|  | // it, the clients set a timer and send the body later anyway) | 
|  | // If we haven't seen EOF, we can't skip over the unread body | 
|  | // because we don't know if the next bytes on the wire will be | 
|  | // the body-following-the-timer or the subsequent request. | 
|  | // See Issue 11549. | 
|  | if ecr, ok := w.req.Body.(*expectContinueReader); ok && !ecr.sawEOF { | 
|  | w.closeAfterReply = true | 
|  | } | 
|  |  | 
|  | // Per RFC 2616, we should consume the request body before | 
|  | // replying, if the handler hasn't already done so. But we | 
|  | // don't want to do an unbounded amount of reading here for | 
|  | // DoS reasons, so we only try up to a threshold. | 
|  | // TODO(bradfitz): where does RFC 2616 say that? See Issue 15527 | 
|  | // about HTTP/1.x Handlers concurrently reading and writing, like | 
|  | // HTTP/2 handlers can do. Maybe this code should be relaxed? | 
|  | if w.req.ContentLength != 0 && !w.closeAfterReply { | 
|  | var discard, tooBig bool | 
|  |  | 
|  | switch bdy := w.req.Body.(type) { | 
|  | case *expectContinueReader: | 
|  | if bdy.resp.wroteContinue { | 
|  | discard = true | 
|  | } | 
|  | case *body: | 
|  | bdy.mu.Lock() | 
|  | switch { | 
|  | case bdy.closed: | 
|  | if !bdy.sawEOF { | 
|  | // Body was closed in handler with non-EOF error. | 
|  | w.closeAfterReply = true | 
|  | } | 
|  | case bdy.unreadDataSizeLocked() >= maxPostHandlerReadBytes: | 
|  | tooBig = true | 
|  | default: | 
|  | discard = true | 
|  | } | 
|  | bdy.mu.Unlock() | 
|  | default: | 
|  | discard = true | 
|  | } | 
|  |  | 
|  | if discard { | 
|  | _, err := io.CopyN(ioutil.Discard, w.reqBody, maxPostHandlerReadBytes+1) | 
|  | switch err { | 
|  | case nil: | 
|  | // There must be even more data left over. | 
|  | tooBig = true | 
|  | case ErrBodyReadAfterClose: | 
|  | // Body was already consumed and closed. | 
|  | case io.EOF: | 
|  | // The remaining body was just consumed, close it. | 
|  | err = w.reqBody.Close() | 
|  | if err != nil { | 
|  | w.closeAfterReply = true | 
|  | } | 
|  | default: | 
|  | // Some other kind of error occurred, like a read timeout, or | 
|  | // corrupt chunked encoding. In any case, whatever remains | 
|  | // on the wire must not be parsed as another HTTP request. | 
|  | w.closeAfterReply = true | 
|  | } | 
|  | } | 
|  |  | 
|  | if tooBig { | 
|  | w.requestTooLarge() | 
|  | delHeader("Connection") | 
|  | setHeader.connection = "close" | 
|  | } | 
|  | } | 
|  |  | 
|  | code := w.status | 
|  | if bodyAllowedForStatus(code) { | 
|  | // If no content type, apply sniffing algorithm to body. | 
|  | _, haveType := header["Content-Type"] | 
|  | if !haveType && !hasTE && len(p) > 0 { | 
|  | setHeader.contentType = DetectContentType(p) | 
|  | } | 
|  | } else { | 
|  | for _, k := range suppressedHeaders(code) { | 
|  | delHeader(k) | 
|  | } | 
|  | } | 
|  |  | 
|  | if !header.has("Date") { | 
|  | setHeader.date = appendTime(cw.res.dateBuf[:0], time.Now()) | 
|  | } | 
|  |  | 
|  | if hasCL && hasTE && te != "identity" { | 
|  | // TODO: return an error if WriteHeader gets a return parameter | 
|  | // For now just ignore the Content-Length. | 
|  | w.conn.server.logf("http: WriteHeader called with both Transfer-Encoding of %q and a Content-Length of %d", | 
|  | te, w.contentLength) | 
|  | delHeader("Content-Length") | 
|  | hasCL = false | 
|  | } | 
|  |  | 
|  | if w.req.Method == "HEAD" || !bodyAllowedForStatus(code) { | 
|  | // do nothing | 
|  | } else if code == StatusNoContent { | 
|  | delHeader("Transfer-Encoding") | 
|  | } else if hasCL { | 
|  | delHeader("Transfer-Encoding") | 
|  | } else if w.req.ProtoAtLeast(1, 1) { | 
|  | // HTTP/1.1 or greater: Transfer-Encoding has been set to identity, and no | 
|  | // content-length has been provided. The connection must be closed after the | 
|  | // reply is written, and no chunking is to be done. This is the setup | 
|  | // recommended in the Server-Sent Events candidate recommendation 11, | 
|  | // section 8. | 
|  | if hasTE && te == "identity" { | 
|  | cw.chunking = false | 
|  | w.closeAfterReply = true | 
|  | } else { | 
|  | // HTTP/1.1 or greater: use chunked transfer encoding | 
|  | // to avoid closing the connection at EOF. | 
|  | cw.chunking = true | 
|  | setHeader.transferEncoding = "chunked" | 
|  | if hasTE && te == "chunked" { | 
|  | // We will send the chunked Transfer-Encoding header later. | 
|  | delHeader("Transfer-Encoding") | 
|  | } | 
|  | } | 
|  | } else { | 
|  | // HTTP version < 1.1: cannot do chunked transfer | 
|  | // encoding and we don't know the Content-Length so | 
|  | // signal EOF by closing connection. | 
|  | w.closeAfterReply = true | 
|  | delHeader("Transfer-Encoding") // in case already set | 
|  | } | 
|  |  | 
|  | // Cannot use Content-Length with non-identity Transfer-Encoding. | 
|  | if cw.chunking { | 
|  | delHeader("Content-Length") | 
|  | } | 
|  | if !w.req.ProtoAtLeast(1, 0) { | 
|  | return | 
|  | } | 
|  |  | 
|  | if w.closeAfterReply && (!keepAlivesEnabled || !hasToken(cw.header.get("Connection"), "close")) { | 
|  | delHeader("Connection") | 
|  | if w.req.ProtoAtLeast(1, 1) { | 
|  | setHeader.connection = "close" | 
|  | } | 
|  | } | 
|  |  | 
|  | writeStatusLine(w.conn.bufw, w.req.ProtoAtLeast(1, 1), code, w.statusBuf[:]) | 
|  | cw.header.WriteSubset(w.conn.bufw, excludeHeader) | 
|  | setHeader.Write(w.conn.bufw) | 
|  | w.conn.bufw.Write(crlf) | 
|  | } | 
|  |  | 
|  | // foreachHeaderElement splits v according to the "#rule" construction | 
|  | // in RFC 7230 section 7 and calls fn for each non-empty element. | 
|  | func foreachHeaderElement(v string, fn func(string)) { | 
|  | v = textproto.TrimString(v) | 
|  | if v == "" { | 
|  | return | 
|  | } | 
|  | if !strings.Contains(v, ",") { | 
|  | fn(v) | 
|  | return | 
|  | } | 
|  | for _, f := range strings.Split(v, ",") { | 
|  | if f = textproto.TrimString(f); f != "" { | 
|  | fn(f) | 
|  | } | 
|  | } | 
|  | } | 
|  |  | 
|  | // writeStatusLine writes an HTTP/1.x Status-Line (RFC 7230 Section 3.1.2) | 
|  | // to bw. is11 is whether the HTTP request is HTTP/1.1. false means HTTP/1.0. | 
|  | // code is the response status code. | 
|  | // scratch is an optional scratch buffer. If it has at least capacity 3, it's used. | 
|  | func writeStatusLine(bw *bufio.Writer, is11 bool, code int, scratch []byte) { | 
|  | if is11 { | 
|  | bw.WriteString("HTTP/1.1 ") | 
|  | } else { | 
|  | bw.WriteString("HTTP/1.0 ") | 
|  | } | 
|  | if text, ok := statusText[code]; ok { | 
|  | bw.Write(strconv.AppendInt(scratch[:0], int64(code), 10)) | 
|  | bw.WriteByte(' ') | 
|  | bw.WriteString(text) | 
|  | bw.WriteString("\r\n") | 
|  | } else { | 
|  | // don't worry about performance | 
|  | fmt.Fprintf(bw, "%03d status code %d\r\n", code, code) | 
|  | } | 
|  | } | 
|  |  | 
|  | // bodyAllowed reports whether a Write is allowed for this response type. | 
|  | // It's illegal to call this before the header has been flushed. | 
|  | func (w *response) bodyAllowed() bool { | 
|  | if !w.wroteHeader { | 
|  | panic("") | 
|  | } | 
|  | return bodyAllowedForStatus(w.status) | 
|  | } | 
|  |  | 
|  | // The Life Of A Write is like this: | 
|  | // | 
|  | // Handler starts. No header has been sent. The handler can either | 
|  | // write a header, or just start writing. Writing before sending a header | 
|  | // sends an implicitly empty 200 OK header. | 
|  | // | 
|  | // If the handler didn't declare a Content-Length up front, we either | 
|  | // go into chunking mode or, if the handler finishes running before | 
|  | // the chunking buffer size, we compute a Content-Length and send that | 
|  | // in the header instead. | 
|  | // | 
|  | // Likewise, if the handler didn't set a Content-Type, we sniff that | 
|  | // from the initial chunk of output. | 
|  | // | 
|  | // The Writers are wired together like: | 
|  | // | 
|  | // 1. *response (the ResponseWriter) -> | 
|  | // 2. (*response).w, a *bufio.Writer of bufferBeforeChunkingSize bytes | 
|  | // 3. chunkWriter.Writer (whose writeHeader finalizes Content-Length/Type) | 
|  | //    and which writes the chunk headers, if needed. | 
|  | // 4. conn.buf, a bufio.Writer of default (4kB) bytes, writing to -> | 
|  | // 5. checkConnErrorWriter{c}, which notes any non-nil error on Write | 
|  | //    and populates c.werr with it if so. but otherwise writes to: | 
|  | // 6. the rwc, the net.Conn. | 
|  | // | 
|  | // TODO(bradfitz): short-circuit some of the buffering when the | 
|  | // initial header contains both a Content-Type and Content-Length. | 
|  | // Also short-circuit in (1) when the header's been sent and not in | 
|  | // chunking mode, writing directly to (4) instead, if (2) has no | 
|  | // buffered data. More generally, we could short-circuit from (1) to | 
|  | // (3) even in chunking mode if the write size from (1) is over some | 
|  | // threshold and nothing is in (2).  The answer might be mostly making | 
|  | // bufferBeforeChunkingSize smaller and having bufio's fast-paths deal | 
|  | // with this instead. | 
|  | func (w *response) Write(data []byte) (n int, err error) { | 
|  | return w.write(len(data), data, "") | 
|  | } | 
|  |  | 
|  | func (w *response) WriteString(data string) (n int, err error) { | 
|  | return w.write(len(data), nil, data) | 
|  | } | 
|  |  | 
|  | // either dataB or dataS is non-zero. | 
|  | func (w *response) write(lenData int, dataB []byte, dataS string) (n int, err error) { | 
|  | if w.conn.hijacked() { | 
|  | if lenData > 0 { | 
|  | caller := relevantCaller() | 
|  | w.conn.server.logf("http: response.Write on hijacked connection from %s (%s:%d)", caller.Function, path.Base(caller.File), caller.Line) | 
|  | } | 
|  | return 0, ErrHijacked | 
|  | } | 
|  | if !w.wroteHeader { | 
|  | w.WriteHeader(StatusOK) | 
|  | } | 
|  | if lenData == 0 { | 
|  | return 0, nil | 
|  | } | 
|  | if !w.bodyAllowed() { | 
|  | return 0, ErrBodyNotAllowed | 
|  | } | 
|  |  | 
|  | w.written += int64(lenData) // ignoring errors, for errorKludge | 
|  | if w.contentLength != -1 && w.written > w.contentLength { | 
|  | return 0, ErrContentLength | 
|  | } | 
|  | if dataB != nil { | 
|  | return w.w.Write(dataB) | 
|  | } else { | 
|  | return w.w.WriteString(dataS) | 
|  | } | 
|  | } | 
|  |  | 
|  | func (w *response) finishRequest() { | 
|  | w.handlerDone.setTrue() | 
|  |  | 
|  | if !w.wroteHeader { | 
|  | w.WriteHeader(StatusOK) | 
|  | } | 
|  |  | 
|  | w.w.Flush() | 
|  | putBufioWriter(w.w) | 
|  | w.cw.close() | 
|  | w.conn.bufw.Flush() | 
|  |  | 
|  | w.conn.r.abortPendingRead() | 
|  |  | 
|  | // Close the body (regardless of w.closeAfterReply) so we can | 
|  | // re-use its bufio.Reader later safely. | 
|  | w.reqBody.Close() | 
|  |  | 
|  | if w.req.MultipartForm != nil { | 
|  | w.req.MultipartForm.RemoveAll() | 
|  | } | 
|  | } | 
|  |  | 
|  | // shouldReuseConnection reports whether the underlying TCP connection can be reused. | 
|  | // It must only be called after the handler is done executing. | 
|  | func (w *response) shouldReuseConnection() bool { | 
|  | if w.closeAfterReply { | 
|  | // The request or something set while executing the | 
|  | // handler indicated we shouldn't reuse this | 
|  | // connection. | 
|  | return false | 
|  | } | 
|  |  | 
|  | if w.req.Method != "HEAD" && w.contentLength != -1 && w.bodyAllowed() && w.contentLength != w.written { | 
|  | // Did not write enough. Avoid getting out of sync. | 
|  | return false | 
|  | } | 
|  |  | 
|  | // There was some error writing to the underlying connection | 
|  | // during the request, so don't re-use this conn. | 
|  | if w.conn.werr != nil { | 
|  | return false | 
|  | } | 
|  |  | 
|  | if w.closedRequestBodyEarly() { | 
|  | return false | 
|  | } | 
|  |  | 
|  | return true | 
|  | } | 
|  |  | 
|  | func (w *response) closedRequestBodyEarly() bool { | 
|  | body, ok := w.req.Body.(*body) | 
|  | return ok && body.didEarlyClose() | 
|  | } | 
|  |  | 
|  | func (w *response) Flush() { | 
|  | if !w.wroteHeader { | 
|  | w.WriteHeader(StatusOK) | 
|  | } | 
|  | w.w.Flush() | 
|  | w.cw.flush() | 
|  | } | 
|  |  | 
|  | func (c *conn) finalFlush() { | 
|  | if c.bufr != nil { | 
|  | // Steal the bufio.Reader (~4KB worth of memory) and its associated | 
|  | // reader for a future connection. | 
|  | putBufioReader(c.bufr) | 
|  | c.bufr = nil | 
|  | } | 
|  |  | 
|  | if c.bufw != nil { | 
|  | c.bufw.Flush() | 
|  | // Steal the bufio.Writer (~4KB worth of memory) and its associated | 
|  | // writer for a future connection. | 
|  | putBufioWriter(c.bufw) | 
|  | c.bufw = nil | 
|  | } | 
|  | } | 
|  |  | 
|  | // Close the connection. | 
|  | func (c *conn) close() { | 
|  | c.finalFlush() | 
|  | c.rwc.Close() | 
|  | } | 
|  |  | 
|  | // rstAvoidanceDelay is the amount of time we sleep after closing the | 
|  | // write side of a TCP connection before closing the entire socket. | 
|  | // By sleeping, we increase the chances that the client sees our FIN | 
|  | // and processes its final data before they process the subsequent RST | 
|  | // from closing a connection with known unread data. | 
|  | // This RST seems to occur mostly on BSD systems. (And Windows?) | 
|  | // This timeout is somewhat arbitrary (~latency around the planet). | 
|  | const rstAvoidanceDelay = 500 * time.Millisecond | 
|  |  | 
|  | type closeWriter interface { | 
|  | CloseWrite() error | 
|  | } | 
|  |  | 
|  | var _ closeWriter = (*net.TCPConn)(nil) | 
|  |  | 
|  | // closeWrite flushes any outstanding data and sends a FIN packet (if | 
|  | // client is connected via TCP), signalling that we're done. We then | 
|  | // pause for a bit, hoping the client processes it before any | 
|  | // subsequent RST. | 
|  | // | 
|  | // See https://golang.org/issue/3595 | 
|  | func (c *conn) closeWriteAndWait() { | 
|  | c.finalFlush() | 
|  | if tcp, ok := c.rwc.(closeWriter); ok { | 
|  | tcp.CloseWrite() | 
|  | } | 
|  | time.Sleep(rstAvoidanceDelay) | 
|  | } | 
|  |  | 
|  | // validNPN reports whether the proto is not a blacklisted Next | 
|  | // Protocol Negotiation protocol. Empty and built-in protocol types | 
|  | // are blacklisted and can't be overridden with alternate | 
|  | // implementations. | 
|  | func validNPN(proto string) bool { | 
|  | switch proto { | 
|  | case "", "http/1.1", "http/1.0": | 
|  | return false | 
|  | } | 
|  | return true | 
|  | } | 
|  |  | 
|  | func (c *conn) setState(nc net.Conn, state ConnState) { | 
|  | srv := c.server | 
|  | switch state { | 
|  | case StateNew: | 
|  | srv.trackConn(c, true) | 
|  | case StateHijacked, StateClosed: | 
|  | srv.trackConn(c, false) | 
|  | } | 
|  | if state > 0xff || state < 0 { | 
|  | panic("internal error") | 
|  | } | 
|  | packedState := uint64(time.Now().Unix()<<8) | uint64(state) | 
|  | atomic.StoreUint64(&c.curState.atomic, packedState) | 
|  | if hook := srv.ConnState; hook != nil { | 
|  | hook(nc, state) | 
|  | } | 
|  | } | 
|  |  | 
|  | func (c *conn) getState() (state ConnState, unixSec int64) { | 
|  | packedState := atomic.LoadUint64(&c.curState.atomic) | 
|  | return ConnState(packedState & 0xff), int64(packedState >> 8) | 
|  | } | 
|  |  | 
|  | // badRequestError is a literal string (used by in the server in HTML, | 
|  | // unescaped) to tell the user why their request was bad. It should | 
|  | // be plain text without user info or other embedded errors. | 
|  | type badRequestError string | 
|  |  | 
|  | func (e badRequestError) Error() string { return "Bad Request: " + string(e) } | 
|  |  | 
|  | // ErrAbortHandler is a sentinel panic value to abort a handler. | 
|  | // While any panic from ServeHTTP aborts the response to the client, | 
|  | // panicking with ErrAbortHandler also suppresses logging of a stack | 
|  | // trace to the server's error log. | 
|  | var ErrAbortHandler = errors.New("net/http: abort Handler") | 
|  |  | 
|  | // isCommonNetReadError reports whether err is a common error | 
|  | // encountered during reading a request off the network when the | 
|  | // client has gone away or had its read fail somehow. This is used to | 
|  | // determine which logs are interesting enough to log about. | 
|  | func isCommonNetReadError(err error) bool { | 
|  | if err == io.EOF { | 
|  | return true | 
|  | } | 
|  | if neterr, ok := err.(net.Error); ok && neterr.Timeout() { | 
|  | return true | 
|  | } | 
|  | if oe, ok := err.(*net.OpError); ok && oe.Op == "read" { | 
|  | return true | 
|  | } | 
|  | return false | 
|  | } | 
|  |  | 
|  | // Serve a new connection. | 
|  | func (c *conn) serve(ctx context.Context) { | 
|  | c.remoteAddr = c.rwc.RemoteAddr().String() | 
|  | ctx = context.WithValue(ctx, LocalAddrContextKey, c.rwc.LocalAddr()) | 
|  | defer func() { | 
|  | if err := recover(); err != nil && err != ErrAbortHandler { | 
|  | const size = 64 << 10 | 
|  | buf := make([]byte, size) | 
|  | buf = buf[:runtime.Stack(buf, false)] | 
|  | c.server.logf("http: panic serving %v: %v\n%s", c.remoteAddr, err, buf) | 
|  | } | 
|  | if !c.hijacked() { | 
|  | c.close() | 
|  | c.setState(c.rwc, StateClosed) | 
|  | } | 
|  | }() | 
|  |  | 
|  | if tlsConn, ok := c.rwc.(*tls.Conn); ok { | 
|  | if d := c.server.ReadTimeout; d != 0 { | 
|  | c.rwc.SetReadDeadline(time.Now().Add(d)) | 
|  | } | 
|  | if d := c.server.WriteTimeout; d != 0 { | 
|  | c.rwc.SetWriteDeadline(time.Now().Add(d)) | 
|  | } | 
|  | if err := tlsConn.Handshake(); err != nil { | 
|  | // If the handshake failed due to the client not speaking | 
|  | // TLS, assume they're speaking plaintext HTTP and write a | 
|  | // 400 response on the TLS conn's underlying net.Conn. | 
|  | if re, ok := err.(tls.RecordHeaderError); ok && re.Conn != nil && tlsRecordHeaderLooksLikeHTTP(re.RecordHeader) { | 
|  | io.WriteString(re.Conn, "HTTP/1.0 400 Bad Request\r\n\r\nClient sent an HTTP request to an HTTPS server.\n") | 
|  | re.Conn.Close() | 
|  | return | 
|  | } | 
|  | c.server.logf("http: TLS handshake error from %s: %v", c.rwc.RemoteAddr(), err) | 
|  | return | 
|  | } | 
|  | c.tlsState = new(tls.ConnectionState) | 
|  | *c.tlsState = tlsConn.ConnectionState() | 
|  | if proto := c.tlsState.NegotiatedProtocol; validNPN(proto) { | 
|  | if fn := c.server.TLSNextProto[proto]; fn != nil { | 
|  | h := initNPNRequest{ctx, tlsConn, serverHandler{c.server}} | 
|  | fn(c.server, tlsConn, h) | 
|  | } | 
|  | return | 
|  | } | 
|  | } | 
|  |  | 
|  | // HTTP/1.x from here on. | 
|  |  | 
|  | ctx, cancelCtx := context.WithCancel(ctx) | 
|  | c.cancelCtx = cancelCtx | 
|  | defer cancelCtx() | 
|  |  | 
|  | c.r = &connReader{conn: c} | 
|  | c.bufr = newBufioReader(c.r) | 
|  | c.bufw = newBufioWriterSize(checkConnErrorWriter{c}, 4<<10) | 
|  |  | 
|  | for { | 
|  | w, err := c.readRequest(ctx) | 
|  | if c.r.remain != c.server.initialReadLimitSize() { | 
|  | // If we read any bytes off the wire, we're active. | 
|  | c.setState(c.rwc, StateActive) | 
|  | } | 
|  | if err != nil { | 
|  | const errorHeaders = "\r\nContent-Type: text/plain; charset=utf-8\r\nConnection: close\r\n\r\n" | 
|  |  | 
|  | switch { | 
|  | case err == errTooLarge: | 
|  | // Their HTTP client may or may not be | 
|  | // able to read this if we're | 
|  | // responding to them and hanging up | 
|  | // while they're still writing their | 
|  | // request. Undefined behavior. | 
|  | const publicErr = "431 Request Header Fields Too Large" | 
|  | fmt.Fprintf(c.rwc, "HTTP/1.1 "+publicErr+errorHeaders+publicErr) | 
|  | c.closeWriteAndWait() | 
|  | return | 
|  |  | 
|  | case isUnsupportedTEError(err): | 
|  | // Respond as per RFC 7230 Section 3.3.1 which says, | 
|  | //      A server that receives a request message with a | 
|  | //      transfer coding it does not understand SHOULD | 
|  | //      respond with 501 (Unimplemented). | 
|  | code := StatusNotImplemented | 
|  |  | 
|  | // We purposefully aren't echoing back the transfer-encoding's value, | 
|  | // so as to mitigate the risk of cross side scripting by an attacker. | 
|  | fmt.Fprintf(c.rwc, "HTTP/1.1 %d %s%sUnsupported transfer encoding", code, StatusText(code), errorHeaders) | 
|  | return | 
|  |  | 
|  | case isCommonNetReadError(err): | 
|  | return // don't reply | 
|  |  | 
|  | default: | 
|  | publicErr := "400 Bad Request" | 
|  | if v, ok := err.(badRequestError); ok { | 
|  | publicErr = publicErr + ": " + string(v) | 
|  | } | 
|  |  | 
|  | fmt.Fprintf(c.rwc, "HTTP/1.1 "+publicErr+errorHeaders+publicErr) | 
|  | return | 
|  | } | 
|  | } | 
|  |  | 
|  | // Expect 100 Continue support | 
|  | req := w.req | 
|  | if req.expectsContinue() { | 
|  | if req.ProtoAtLeast(1, 1) && req.ContentLength != 0 { | 
|  | // Wrap the Body reader with one that replies on the connection | 
|  | req.Body = &expectContinueReader{readCloser: req.Body, resp: w} | 
|  | } | 
|  | } else if req.Header.get("Expect") != "" { | 
|  | w.sendExpectationFailed() | 
|  | return | 
|  | } | 
|  |  | 
|  | c.curReq.Store(w) | 
|  |  | 
|  | if requestBodyRemains(req.Body) { | 
|  | registerOnHitEOF(req.Body, w.conn.r.startBackgroundRead) | 
|  | } else { | 
|  | w.conn.r.startBackgroundRead() | 
|  | } | 
|  |  | 
|  | // HTTP cannot have multiple simultaneous active requests.[*] | 
|  | // Until the server replies to this request, it can't read another, | 
|  | // so we might as well run the handler in this goroutine. | 
|  | // [*] Not strictly true: HTTP pipelining. We could let them all process | 
|  | // in parallel even if their responses need to be serialized. | 
|  | // But we're not going to implement HTTP pipelining because it | 
|  | // was never deployed in the wild and the answer is HTTP/2. | 
|  | serverHandler{c.server}.ServeHTTP(w, w.req) | 
|  | w.cancelCtx() | 
|  | if c.hijacked() { | 
|  | return | 
|  | } | 
|  | w.finishRequest() | 
|  | if !w.shouldReuseConnection() { | 
|  | if w.requestBodyLimitHit || w.closedRequestBodyEarly() { | 
|  | c.closeWriteAndWait() | 
|  | } | 
|  | return | 
|  | } | 
|  | c.setState(c.rwc, StateIdle) | 
|  | c.curReq.Store((*response)(nil)) | 
|  |  | 
|  | if !w.conn.server.doKeepAlives() { | 
|  | // We're in shutdown mode. We might've replied | 
|  | // to the user without "Connection: close" and | 
|  | // they might think they can send another | 
|  | // request, but such is life with HTTP/1.1. | 
|  | return | 
|  | } | 
|  |  | 
|  | if d := c.server.idleTimeout(); d != 0 { | 
|  | c.rwc.SetReadDeadline(time.Now().Add(d)) | 
|  | if _, err := c.bufr.Peek(4); err != nil { | 
|  | return | 
|  | } | 
|  | } | 
|  | c.rwc.SetReadDeadline(time.Time{}) | 
|  | } | 
|  | } | 
|  |  | 
|  | func (w *response) sendExpectationFailed() { | 
|  | // TODO(bradfitz): let ServeHTTP handlers handle | 
|  | // requests with non-standard expectation[s]? Seems | 
|  | // theoretical at best, and doesn't fit into the | 
|  | // current ServeHTTP model anyway. We'd need to | 
|  | // make the ResponseWriter an optional | 
|  | // "ExpectReplier" interface or something. | 
|  | // | 
|  | // For now we'll just obey RFC 7231 5.1.1 which says | 
|  | // "A server that receives an Expect field-value other | 
|  | // than 100-continue MAY respond with a 417 (Expectation | 
|  | // Failed) status code to indicate that the unexpected | 
|  | // expectation cannot be met." | 
|  | w.Header().Set("Connection", "close") | 
|  | w.WriteHeader(StatusExpectationFailed) | 
|  | w.finishRequest() | 
|  | } | 
|  |  | 
|  | // Hijack implements the Hijacker.Hijack method. Our response is both a ResponseWriter | 
|  | // and a Hijacker. | 
|  | func (w *response) Hijack() (rwc net.Conn, buf *bufio.ReadWriter, err error) { | 
|  | if w.handlerDone.isSet() { | 
|  | panic("net/http: Hijack called after ServeHTTP finished") | 
|  | } | 
|  | if w.wroteHeader { | 
|  | w.cw.flush() | 
|  | } | 
|  |  | 
|  | c := w.conn | 
|  | c.mu.Lock() | 
|  | defer c.mu.Unlock() | 
|  |  | 
|  | // Release the bufioWriter that writes to the chunk writer, it is not | 
|  | // used after a connection has been hijacked. | 
|  | rwc, buf, err = c.hijackLocked() | 
|  | if err == nil { | 
|  | putBufioWriter(w.w) | 
|  | w.w = nil | 
|  | } | 
|  | return rwc, buf, err | 
|  | } | 
|  |  | 
|  | func (w *response) CloseNotify() <-chan bool { | 
|  | if w.handlerDone.isSet() { | 
|  | panic("net/http: CloseNotify called after ServeHTTP finished") | 
|  | } | 
|  | return w.closeNotifyCh | 
|  | } | 
|  |  | 
|  | func registerOnHitEOF(rc io.ReadCloser, fn func()) { | 
|  | switch v := rc.(type) { | 
|  | case *expectContinueReader: | 
|  | registerOnHitEOF(v.readCloser, fn) | 
|  | case *body: | 
|  | v.registerOnHitEOF(fn) | 
|  | default: | 
|  | panic("unexpected type " + fmt.Sprintf("%T", rc)) | 
|  | } | 
|  | } | 
|  |  | 
|  | // requestBodyRemains reports whether future calls to Read | 
|  | // on rc might yield more data. | 
|  | func requestBodyRemains(rc io.ReadCloser) bool { | 
|  | if rc == NoBody { | 
|  | return false | 
|  | } | 
|  | switch v := rc.(type) { | 
|  | case *expectContinueReader: | 
|  | return requestBodyRemains(v.readCloser) | 
|  | case *body: | 
|  | return v.bodyRemains() | 
|  | default: | 
|  | panic("unexpected type " + fmt.Sprintf("%T", rc)) | 
|  | } | 
|  | } | 
|  |  | 
|  | // The HandlerFunc type is an adapter to allow the use of | 
|  | // ordinary functions as HTTP handlers. If f is a function | 
|  | // with the appropriate signature, HandlerFunc(f) is a | 
|  | // Handler that calls f. | 
|  | type HandlerFunc func(ResponseWriter, *Request) | 
|  |  | 
|  | // ServeHTTP calls f(w, r). | 
|  | func (f HandlerFunc) ServeHTTP(w ResponseWriter, r *Request) { | 
|  | f(w, r) | 
|  | } | 
|  |  | 
|  | // Helper handlers | 
|  |  | 
|  | // Error replies to the request with the specified error message and HTTP code. | 
|  | // It does not otherwise end the request; the caller should ensure no further | 
|  | // writes are done to w. | 
|  | // The error message should be plain text. | 
|  | func Error(w ResponseWriter, error string, code int) { | 
|  | w.Header().Set("Content-Type", "text/plain; charset=utf-8") | 
|  | w.Header().Set("X-Content-Type-Options", "nosniff") | 
|  | w.WriteHeader(code) | 
|  | fmt.Fprintln(w, error) | 
|  | } | 
|  |  | 
|  | // NotFound replies to the request with an HTTP 404 not found error. | 
|  | func NotFound(w ResponseWriter, r *Request) { Error(w, "404 page not found", StatusNotFound) } | 
|  |  | 
|  | // NotFoundHandler returns a simple request handler | 
|  | // that replies to each request with a ``404 page not found'' reply. | 
|  | func NotFoundHandler() Handler { return HandlerFunc(NotFound) } | 
|  |  | 
|  | // StripPrefix returns a handler that serves HTTP requests | 
|  | // by removing the given prefix from the request URL's Path | 
|  | // and invoking the handler h. StripPrefix handles a | 
|  | // request for a path that doesn't begin with prefix by | 
|  | // replying with an HTTP 404 not found error. | 
|  | func StripPrefix(prefix string, h Handler) Handler { | 
|  | if prefix == "" { | 
|  | return h | 
|  | } | 
|  | return HandlerFunc(func(w ResponseWriter, r *Request) { | 
|  | if p := strings.TrimPrefix(r.URL.Path, prefix); len(p) < len(r.URL.Path) { | 
|  | r2 := new(Request) | 
|  | *r2 = *r | 
|  | r2.URL = new(url.URL) | 
|  | *r2.URL = *r.URL | 
|  | r2.URL.Path = p | 
|  | h.ServeHTTP(w, r2) | 
|  | } else { | 
|  | NotFound(w, r) | 
|  | } | 
|  | }) | 
|  | } | 
|  |  | 
|  | // Redirect replies to the request with a redirect to url, | 
|  | // which may be a path relative to the request path. | 
|  | // | 
|  | // The provided code should be in the 3xx range and is usually | 
|  | // StatusMovedPermanently, StatusFound or StatusSeeOther. | 
|  | // | 
|  | // If the Content-Type header has not been set, Redirect sets it | 
|  | // to "text/html; charset=utf-8" and writes a small HTML body. | 
|  | // Setting the Content-Type header to any value, including nil, | 
|  | // disables that behavior. | 
|  | func Redirect(w ResponseWriter, r *Request, url string, code int) { | 
|  | // parseURL is just url.Parse (url is shadowed for godoc). | 
|  | if u, err := parseURL(url); err == nil { | 
|  | // If url was relative, make its path absolute by | 
|  | // combining with request path. | 
|  | // The client would probably do this for us, | 
|  | // but doing it ourselves is more reliable. | 
|  | // See RFC 7231, section 7.1.2 | 
|  | if u.Scheme == "" && u.Host == "" { | 
|  | oldpath := r.URL.Path | 
|  | if oldpath == "" { // should not happen, but avoid a crash if it does | 
|  | oldpath = "/" | 
|  | } | 
|  |  | 
|  | // no leading http://server | 
|  | if url == "" || url[0] != '/' { | 
|  | // make relative path absolute | 
|  | olddir, _ := path.Split(oldpath) | 
|  | url = olddir + url | 
|  | } | 
|  |  | 
|  | var query string | 
|  | if i := strings.Index(url, "?"); i != -1 { | 
|  | url, query = url[:i], url[i:] | 
|  | } | 
|  |  | 
|  | // clean up but preserve trailing slash | 
|  | trailing := strings.HasSuffix(url, "/") | 
|  | url = path.Clean(url) | 
|  | if trailing && !strings.HasSuffix(url, "/") { | 
|  | url += "/" | 
|  | } | 
|  | url += query | 
|  | } | 
|  | } | 
|  |  | 
|  | h := w.Header() | 
|  |  | 
|  | // RFC 7231 notes that a short HTML body is usually included in | 
|  | // the response because older user agents may not understand 301/307. | 
|  | // Do it only if the request didn't already have a Content-Type header. | 
|  | _, hadCT := h["Content-Type"] | 
|  |  | 
|  | h.Set("Location", hexEscapeNonASCII(url)) | 
|  | if !hadCT && (r.Method == "GET" || r.Method == "HEAD") { | 
|  | h.Set("Content-Type", "text/html; charset=utf-8") | 
|  | } | 
|  | w.WriteHeader(code) | 
|  |  | 
|  | // Shouldn't send the body for POST or HEAD; that leaves GET. | 
|  | if !hadCT && r.Method == "GET" { | 
|  | body := "<a href=\"" + htmlEscape(url) + "\">" + statusText[code] + "</a>.\n" | 
|  | fmt.Fprintln(w, body) | 
|  | } | 
|  | } | 
|  |  | 
|  | // parseURL is just url.Parse. It exists only so that url.Parse can be called | 
|  | // in places where url is shadowed for godoc. See https://golang.org/cl/49930. | 
|  | var parseURL = url.Parse | 
|  |  | 
|  | var htmlReplacer = strings.NewReplacer( | 
|  | "&", "&", | 
|  | "<", "<", | 
|  | ">", ">", | 
|  | // """ is shorter than """. | 
|  | `"`, """, | 
|  | // "'" is shorter than "'" and apos was not in HTML until HTML5. | 
|  | "'", "'", | 
|  | ) | 
|  |  | 
|  | func htmlEscape(s string) string { | 
|  | return htmlReplacer.Replace(s) | 
|  | } | 
|  |  | 
|  | // Redirect to a fixed URL | 
|  | type redirectHandler struct { | 
|  | url  string | 
|  | code int | 
|  | } | 
|  |  | 
|  | func (rh *redirectHandler) ServeHTTP(w ResponseWriter, r *Request) { | 
|  | Redirect(w, r, rh.url, rh.code) | 
|  | } | 
|  |  | 
|  | // RedirectHandler returns a request handler that redirects | 
|  | // each request it receives to the given url using the given | 
|  | // status code. | 
|  | // | 
|  | // The provided code should be in the 3xx range and is usually | 
|  | // StatusMovedPermanently, StatusFound or StatusSeeOther. | 
|  | func RedirectHandler(url string, code int) Handler { | 
|  | return &redirectHandler{url, code} | 
|  | } | 
|  |  | 
|  | // ServeMux is an HTTP request multiplexer. | 
|  | // It matches the URL of each incoming request against a list of registered | 
|  | // patterns and calls the handler for the pattern that | 
|  | // most closely matches the URL. | 
|  | // | 
|  | // Patterns name fixed, rooted paths, like "/favicon.ico", | 
|  | // or rooted subtrees, like "/images/" (note the trailing slash). | 
|  | // Longer patterns take precedence over shorter ones, so that | 
|  | // if there are handlers registered for both "/images/" | 
|  | // and "/images/thumbnails/", the latter handler will be | 
|  | // called for paths beginning "/images/thumbnails/" and the | 
|  | // former will receive requests for any other paths in the | 
|  | // "/images/" subtree. | 
|  | // | 
|  | // Note that since a pattern ending in a slash names a rooted subtree, | 
|  | // the pattern "/" matches all paths not matched by other registered | 
|  | // patterns, not just the URL with Path == "/". | 
|  | // | 
|  | // If a subtree has been registered and a request is received naming the | 
|  | // subtree root without its trailing slash, ServeMux redirects that | 
|  | // request to the subtree root (adding the trailing slash). This behavior can | 
|  | // be overridden with a separate registration for the path without | 
|  | // the trailing slash. For example, registering "/images/" causes ServeMux | 
|  | // to redirect a request for "/images" to "/images/", unless "/images" has | 
|  | // been registered separately. | 
|  | // | 
|  | // Patterns may optionally begin with a host name, restricting matches to | 
|  | // URLs on that host only. Host-specific patterns take precedence over | 
|  | // general patterns, so that a handler might register for the two patterns | 
|  | // "/codesearch" and "codesearch.google.com/" without also taking over | 
|  | // requests for "http://www.google.com/". | 
|  | // | 
|  | // ServeMux also takes care of sanitizing the URL request path and the Host | 
|  | // header, stripping the port number and redirecting any request containing . or | 
|  | // .. elements or repeated slashes to an equivalent, cleaner URL. | 
|  | type ServeMux struct { | 
|  | mu    sync.RWMutex | 
|  | m     map[string]muxEntry | 
|  | es    []muxEntry // slice of entries sorted from longest to shortest. | 
|  | hosts bool       // whether any patterns contain hostnames | 
|  | } | 
|  |  | 
|  | type muxEntry struct { | 
|  | h       Handler | 
|  | pattern string | 
|  | } | 
|  |  | 
|  | // NewServeMux allocates and returns a new ServeMux. | 
|  | func NewServeMux() *ServeMux { return new(ServeMux) } | 
|  |  | 
|  | // DefaultServeMux is the default ServeMux used by Serve. | 
|  | var DefaultServeMux = &defaultServeMux | 
|  |  | 
|  | var defaultServeMux ServeMux | 
|  |  | 
|  | // cleanPath returns the canonical path for p, eliminating . and .. elements. | 
|  | func cleanPath(p string) string { | 
|  | if p == "" { | 
|  | return "/" | 
|  | } | 
|  | if p[0] != '/' { | 
|  | p = "/" + p | 
|  | } | 
|  | np := path.Clean(p) | 
|  | // path.Clean removes trailing slash except for root; | 
|  | // put the trailing slash back if necessary. | 
|  | if p[len(p)-1] == '/' && np != "/" { | 
|  | // Fast path for common case of p being the string we want: | 
|  | if len(p) == len(np)+1 && strings.HasPrefix(p, np) { | 
|  | np = p | 
|  | } else { | 
|  | np += "/" | 
|  | } | 
|  | } | 
|  | return np | 
|  | } | 
|  |  | 
|  | // stripHostPort returns h without any trailing ":<port>". | 
|  | func stripHostPort(h string) string { | 
|  | // If no port on host, return unchanged | 
|  | if strings.IndexByte(h, ':') == -1 { | 
|  | return h | 
|  | } | 
|  | host, _, err := net.SplitHostPort(h) | 
|  | if err != nil { | 
|  | return h // on error, return unchanged | 
|  | } | 
|  | return host | 
|  | } | 
|  |  | 
|  | // Find a handler on a handler map given a path string. | 
|  | // Most-specific (longest) pattern wins. | 
|  | func (mux *ServeMux) match(path string) (h Handler, pattern string) { | 
|  | // Check for exact match first. | 
|  | v, ok := mux.m[path] | 
|  | if ok { | 
|  | return v.h, v.pattern | 
|  | } | 
|  |  | 
|  | // Check for longest valid match.  mux.es contains all patterns | 
|  | // that end in / sorted from longest to shortest. | 
|  | for _, e := range mux.es { | 
|  | if strings.HasPrefix(path, e.pattern) { | 
|  | return e.h, e.pattern | 
|  | } | 
|  | } | 
|  | return nil, "" | 
|  | } | 
|  |  | 
|  | // redirectToPathSlash determines if the given path needs appending "/" to it. | 
|  | // This occurs when a handler for path + "/" was already registered, but | 
|  | // not for path itself. If the path needs appending to, it creates a new | 
|  | // URL, setting the path to u.Path + "/" and returning true to indicate so. | 
|  | func (mux *ServeMux) redirectToPathSlash(host, path string, u *url.URL) (*url.URL, bool) { | 
|  | mux.mu.RLock() | 
|  | shouldRedirect := mux.shouldRedirectRLocked(host, path) | 
|  | mux.mu.RUnlock() | 
|  | if !shouldRedirect { | 
|  | return u, false | 
|  | } | 
|  | path = path + "/" | 
|  | u = &url.URL{Path: path, RawQuery: u.RawQuery} | 
|  | return u, true | 
|  | } | 
|  |  | 
|  | // shouldRedirectRLocked reports whether the given path and host should be redirected to | 
|  | // path+"/". This should happen if a handler is registered for path+"/" but | 
|  | // not path -- see comments at ServeMux. | 
|  | func (mux *ServeMux) shouldRedirectRLocked(host, path string) bool { | 
|  | p := []string{path, host + path} | 
|  |  | 
|  | for _, c := range p { | 
|  | if _, exist := mux.m[c]; exist { | 
|  | return false | 
|  | } | 
|  | } | 
|  |  | 
|  | n := len(path) | 
|  | if n == 0 { | 
|  | return false | 
|  | } | 
|  | for _, c := range p { | 
|  | if _, exist := mux.m[c+"/"]; exist { | 
|  | return path[n-1] != '/' | 
|  | } | 
|  | } | 
|  |  | 
|  | return false | 
|  | } | 
|  |  | 
|  | // Handler returns the handler to use for the given request, | 
|  | // consulting r.Method, r.Host, and r.URL.Path. It always returns | 
|  | // a non-nil handler. If the path is not in its canonical form, the | 
|  | // handler will be an internally-generated handler that redirects | 
|  | // to the canonical path. If the host contains a port, it is ignored | 
|  | // when matching handlers. | 
|  | // | 
|  | // The path and host are used unchanged for CONNECT requests. | 
|  | // | 
|  | // Handler also returns the registered pattern that matches the | 
|  | // request or, in the case of internally-generated redirects, | 
|  | // the pattern that will match after following the redirect. | 
|  | // | 
|  | // If there is no registered handler that applies to the request, | 
|  | // Handler returns a ``page not found'' handler and an empty pattern. | 
|  | func (mux *ServeMux) Handler(r *Request) (h Handler, pattern string) { | 
|  |  | 
|  | // CONNECT requests are not canonicalized. | 
|  | if r.Method == "CONNECT" { | 
|  | // If r.URL.Path is /tree and its handler is not registered, | 
|  | // the /tree -> /tree/ redirect applies to CONNECT requests | 
|  | // but the path canonicalization does not. | 
|  | if u, ok := mux.redirectToPathSlash(r.URL.Host, r.URL.Path, r.URL); ok { | 
|  | return RedirectHandler(u.String(), StatusMovedPermanently), u.Path | 
|  | } | 
|  |  | 
|  | return mux.handler(r.Host, r.URL.Path) | 
|  | } | 
|  |  | 
|  | // All other requests have any port stripped and path cleaned | 
|  | // before passing to mux.handler. | 
|  | host := stripHostPort(r.Host) | 
|  | path := cleanPath(r.URL.Path) | 
|  |  | 
|  | // If the given path is /tree and its handler is not registered, | 
|  | // redirect for /tree/. | 
|  | if u, ok := mux.redirectToPathSlash(host, path, r.URL); ok { | 
|  | return RedirectHandler(u.String(), StatusMovedPermanently), u.Path | 
|  | } | 
|  |  | 
|  | if path != r.URL.Path { | 
|  | _, pattern = mux.handler(host, path) | 
|  | url := *r.URL | 
|  | url.Path = path | 
|  | return RedirectHandler(url.String(), StatusMovedPermanently), pattern | 
|  | } | 
|  |  | 
|  | return mux.handler(host, r.URL.Path) | 
|  | } | 
|  |  | 
|  | // handler is the main implementation of Handler. | 
|  | // The path is known to be in canonical form, except for CONNECT methods. | 
|  | func (mux *ServeMux) handler(host, path string) (h Handler, pattern string) { | 
|  | mux.mu.RLock() | 
|  | defer mux.mu.RUnlock() | 
|  |  | 
|  | // Host-specific pattern takes precedence over generic ones | 
|  | if mux.hosts { | 
|  | h, pattern = mux.match(host + path) | 
|  | } | 
|  | if h == nil { | 
|  | h, pattern = mux.match(path) | 
|  | } | 
|  | if h == nil { | 
|  | h, pattern = NotFoundHandler(), "" | 
|  | } | 
|  | return | 
|  | } | 
|  |  | 
|  | // ServeHTTP dispatches the request to the handler whose | 
|  | // pattern most closely matches the request URL. | 
|  | func (mux *ServeMux) ServeHTTP(w ResponseWriter, r *Request) { | 
|  | if r.RequestURI == "*" { | 
|  | if r.ProtoAtLeast(1, 1) { | 
|  | w.Header().Set("Connection", "close") | 
|  | } | 
|  | w.WriteHeader(StatusBadRequest) | 
|  | return | 
|  | } | 
|  | h, _ := mux.Handler(r) | 
|  | h.ServeHTTP(w, r) | 
|  | } | 
|  |  | 
|  | // Handle registers the handler for the given pattern. | 
|  | // If a handler already exists for pattern, Handle panics. | 
|  | func (mux *ServeMux) Handle(pattern string, handler Handler) { | 
|  | mux.mu.Lock() | 
|  | defer mux.mu.Unlock() | 
|  |  | 
|  | if pattern == "" { | 
|  | panic("http: invalid pattern") | 
|  | } | 
|  | if handler == nil { | 
|  | panic("http: nil handler") | 
|  | } | 
|  | if _, exist := mux.m[pattern]; exist { | 
|  | panic("http: multiple registrations for " + pattern) | 
|  | } | 
|  |  | 
|  | if mux.m == nil { | 
|  | mux.m = make(map[string]muxEntry) | 
|  | } | 
|  | e := muxEntry{h: handler, pattern: pattern} | 
|  | mux.m[pattern] = e | 
|  | if pattern[len(pattern)-1] == '/' { | 
|  | mux.es = appendSorted(mux.es, e) | 
|  | } | 
|  |  | 
|  | if pattern[0] != '/' { | 
|  | mux.hosts = true | 
|  | } | 
|  | } | 
|  |  | 
|  | func appendSorted(es []muxEntry, e muxEntry) []muxEntry { | 
|  | n := len(es) | 
|  | i := sort.Search(n, func(i int) bool { | 
|  | return len(es[i].pattern) < len(e.pattern) | 
|  | }) | 
|  | if i == n { | 
|  | return append(es, e) | 
|  | } | 
|  | // we now know that i points at where we want to insert | 
|  | es = append(es, muxEntry{}) // try to grow the slice in place, any entry works. | 
|  | copy(es[i+1:], es[i:])      // Move shorter entries down | 
|  | es[i] = e | 
|  | return es | 
|  | } | 
|  |  | 
|  | // HandleFunc registers the handler function for the given pattern. | 
|  | func (mux *ServeMux) HandleFunc(pattern string, handler func(ResponseWriter, *Request)) { | 
|  | if handler == nil { | 
|  | panic("http: nil handler") | 
|  | } | 
|  | mux.Handle(pattern, HandlerFunc(handler)) | 
|  | } | 
|  |  | 
|  | // Handle registers the handler for the given pattern | 
|  | // in the DefaultServeMux. | 
|  | // The documentation for ServeMux explains how patterns are matched. | 
|  | func Handle(pattern string, handler Handler) { DefaultServeMux.Handle(pattern, handler) } | 
|  |  | 
|  | // HandleFunc registers the handler function for the given pattern | 
|  | // in the DefaultServeMux. | 
|  | // The documentation for ServeMux explains how patterns are matched. | 
|  | func HandleFunc(pattern string, handler func(ResponseWriter, *Request)) { | 
|  | DefaultServeMux.HandleFunc(pattern, handler) | 
|  | } | 
|  |  | 
|  | // Serve accepts incoming HTTP connections on the listener l, | 
|  | // creating a new service goroutine for each. The service goroutines | 
|  | // read requests and then call handler to reply to them. | 
|  | // | 
|  | // The handler is typically nil, in which case the DefaultServeMux is used. | 
|  | // | 
|  | // HTTP/2 support is only enabled if the Listener returns *tls.Conn | 
|  | // connections and they were configured with "h2" in the TLS | 
|  | // Config.NextProtos. | 
|  | // | 
|  | // Serve always returns a non-nil error. | 
|  | func Serve(l net.Listener, handler Handler) error { | 
|  | srv := &Server{Handler: handler} | 
|  | return srv.Serve(l) | 
|  | } | 
|  |  | 
|  | // ServeTLS accepts incoming HTTPS connections on the listener l, | 
|  | // creating a new service goroutine for each. The service goroutines | 
|  | // read requests and then call handler to reply to them. | 
|  | // | 
|  | // The handler is typically nil, in which case the DefaultServeMux is used. | 
|  | // | 
|  | // Additionally, files containing a certificate and matching private key | 
|  | // for the server must be provided. If the certificate is signed by a | 
|  | // certificate authority, the certFile should be the concatenation | 
|  | // of the server's certificate, any intermediates, and the CA's certificate. | 
|  | // | 
|  | // ServeTLS always returns a non-nil error. | 
|  | func ServeTLS(l net.Listener, handler Handler, certFile, keyFile string) error { | 
|  | srv := &Server{Handler: handler} | 
|  | return srv.ServeTLS(l, certFile, keyFile) | 
|  | } | 
|  |  | 
|  | // A Server defines parameters for running an HTTP server. | 
|  | // The zero value for Server is a valid configuration. | 
|  | type Server struct { | 
|  | Addr    string  // TCP address to listen on, ":http" if empty | 
|  | Handler Handler // handler to invoke, http.DefaultServeMux if nil | 
|  |  | 
|  | // TLSConfig optionally provides a TLS configuration for use | 
|  | // by ServeTLS and ListenAndServeTLS. Note that this value is | 
|  | // cloned by ServeTLS and ListenAndServeTLS, so it's not | 
|  | // possible to modify the configuration with methods like | 
|  | // tls.Config.SetSessionTicketKeys. To use | 
|  | // SetSessionTicketKeys, use Server.Serve with a TLS Listener | 
|  | // instead. | 
|  | TLSConfig *tls.Config | 
|  |  | 
|  | // ReadTimeout is the maximum duration for reading the entire | 
|  | // request, including the body. | 
|  | // | 
|  | // Because ReadTimeout does not let Handlers make per-request | 
|  | // decisions on each request body's acceptable deadline or | 
|  | // upload rate, most users will prefer to use | 
|  | // ReadHeaderTimeout. It is valid to use them both. | 
|  | ReadTimeout time.Duration | 
|  |  | 
|  | // ReadHeaderTimeout is the amount of time allowed to read | 
|  | // request headers. The connection's read deadline is reset | 
|  | // after reading the headers and the Handler can decide what | 
|  | // is considered too slow for the body. If ReadHeaderTimeout | 
|  | // is zero, the value of ReadTimeout is used. If both are | 
|  | // zero, there is no timeout. | 
|  | ReadHeaderTimeout time.Duration | 
|  |  | 
|  | // WriteTimeout is the maximum duration before timing out | 
|  | // writes of the response. It is reset whenever a new | 
|  | // request's header is read. Like ReadTimeout, it does not | 
|  | // let Handlers make decisions on a per-request basis. | 
|  | WriteTimeout time.Duration | 
|  |  | 
|  | // IdleTimeout is the maximum amount of time to wait for the | 
|  | // next request when keep-alives are enabled. If IdleTimeout | 
|  | // is zero, the value of ReadTimeout is used. If both are | 
|  | // zero, there is no timeout. | 
|  | IdleTimeout time.Duration | 
|  |  | 
|  | // MaxHeaderBytes controls the maximum number of bytes the | 
|  | // server will read parsing the request header's keys and | 
|  | // values, including the request line. It does not limit the | 
|  | // size of the request body. | 
|  | // If zero, DefaultMaxHeaderBytes is used. | 
|  | MaxHeaderBytes int | 
|  |  | 
|  | // TLSNextProto optionally specifies a function to take over | 
|  | // ownership of the provided TLS connection when an NPN/ALPN | 
|  | // protocol upgrade has occurred. The map key is the protocol | 
|  | // name negotiated. The Handler argument should be used to | 
|  | // handle HTTP requests and will initialize the Request's TLS | 
|  | // and RemoteAddr if not already set. The connection is | 
|  | // automatically closed when the function returns. | 
|  | // If TLSNextProto is not nil, HTTP/2 support is not enabled | 
|  | // automatically. | 
|  | TLSNextProto map[string]func(*Server, *tls.Conn, Handler) | 
|  |  | 
|  | // ConnState specifies an optional callback function that is | 
|  | // called when a client connection changes state. See the | 
|  | // ConnState type and associated constants for details. | 
|  | ConnState func(net.Conn, ConnState) | 
|  |  | 
|  | // ErrorLog specifies an optional logger for errors accepting | 
|  | // connections, unexpected behavior from handlers, and | 
|  | // underlying FileSystem errors. | 
|  | // If nil, logging is done via the log package's standard logger. | 
|  | ErrorLog *log.Logger | 
|  |  | 
|  | // BaseContext optionally specifies a function that returns | 
|  | // the base context for incoming requests on this server. | 
|  | // The provided Listener is the specific Listener that's | 
|  | // about to start accepting requests. | 
|  | // If BaseContext is nil, the default is context.Background(). | 
|  | // If non-nil, it must return a non-nil context. | 
|  | BaseContext func(net.Listener) context.Context | 
|  |  | 
|  | // ConnContext optionally specifies a function that modifies | 
|  | // the context used for a new connection c. The provided ctx | 
|  | // is derived from the base context and has a ServerContextKey | 
|  | // value. | 
|  | ConnContext func(ctx context.Context, c net.Conn) context.Context | 
|  |  | 
|  | disableKeepAlives int32     // accessed atomically. | 
|  | inShutdown        int32     // accessed atomically (non-zero means we're in Shutdown) | 
|  | nextProtoOnce     sync.Once // guards setupHTTP2_* init | 
|  | nextProtoErr      error     // result of http2.ConfigureServer if used | 
|  |  | 
|  | mu         sync.Mutex | 
|  | listeners  map[*net.Listener]struct{} | 
|  | activeConn map[*conn]struct{} | 
|  | doneChan   chan struct{} | 
|  | onShutdown []func() | 
|  | } | 
|  |  | 
|  | func (s *Server) getDoneChan() <-chan struct{} { | 
|  | s.mu.Lock() | 
|  | defer s.mu.Unlock() | 
|  | return s.getDoneChanLocked() | 
|  | } | 
|  |  | 
|  | func (s *Server) getDoneChanLocked() chan struct{} { | 
|  | if s.doneChan == nil { | 
|  | s.doneChan = make(chan struct{}) | 
|  | } | 
|  | return s.doneChan | 
|  | } | 
|  |  | 
|  | func (s *Server) closeDoneChanLocked() { | 
|  | ch := s.getDoneChanLocked() | 
|  | select { | 
|  | case <-ch: | 
|  | // Already closed. Don't close again. | 
|  | default: | 
|  | // Safe to close here. We're the only closer, guarded | 
|  | // by s.mu. | 
|  | close(ch) | 
|  | } | 
|  | } | 
|  |  | 
|  | // Close immediately closes all active net.Listeners and any | 
|  | // connections in state StateNew, StateActive, or StateIdle. For a | 
|  | // graceful shutdown, use Shutdown. | 
|  | // | 
|  | // Close does not attempt to close (and does not even know about) | 
|  | // any hijacked connections, such as WebSockets. | 
|  | // | 
|  | // Close returns any error returned from closing the Server's | 
|  | // underlying Listener(s). | 
|  | func (srv *Server) Close() error { | 
|  | atomic.StoreInt32(&srv.inShutdown, 1) | 
|  | srv.mu.Lock() | 
|  | defer srv.mu.Unlock() | 
|  | srv.closeDoneChanLocked() | 
|  | err := srv.closeListenersLocked() | 
|  | for c := range srv.activeConn { | 
|  | c.rwc.Close() | 
|  | delete(srv.activeConn, c) | 
|  | } | 
|  | return err | 
|  | } | 
|  |  | 
|  | // shutdownPollInterval is how often we poll for quiescence | 
|  | // during Server.Shutdown. This is lower during tests, to | 
|  | // speed up tests. | 
|  | // Ideally we could find a solution that doesn't involve polling, | 
|  | // but which also doesn't have a high runtime cost (and doesn't | 
|  | // involve any contentious mutexes), but that is left as an | 
|  | // exercise for the reader. | 
|  | var shutdownPollInterval = 500 * time.Millisecond | 
|  |  | 
|  | // Shutdown gracefully shuts down the server without interrupting any | 
|  | // active connections. Shutdown works by first closing all open | 
|  | // listeners, then closing all idle connections, and then waiting | 
|  | // indefinitely for connections to return to idle and then shut down. | 
|  | // If the provided context expires before the shutdown is complete, | 
|  | // Shutdown returns the context's error, otherwise it returns any | 
|  | // error returned from closing the Server's underlying Listener(s). | 
|  | // | 
|  | // When Shutdown is called, Serve, ListenAndServe, and | 
|  | // ListenAndServeTLS immediately return ErrServerClosed. Make sure the | 
|  | // program doesn't exit and waits instead for Shutdown to return. | 
|  | // | 
|  | // Shutdown does not attempt to close nor wait for hijacked | 
|  | // connections such as WebSockets. The caller of Shutdown should | 
|  | // separately notify such long-lived connections of shutdown and wait | 
|  | // for them to close, if desired. See RegisterOnShutdown for a way to | 
|  | // register shutdown notification functions. | 
|  | // | 
|  | // Once Shutdown has been called on a server, it may not be reused; | 
|  | // future calls to methods such as Serve will return ErrServerClosed. | 
|  | func (srv *Server) Shutdown(ctx context.Context) error { | 
|  | atomic.StoreInt32(&srv.inShutdown, 1) | 
|  |  | 
|  | srv.mu.Lock() | 
|  | lnerr := srv.closeListenersLocked() | 
|  | srv.closeDoneChanLocked() | 
|  | for _, f := range srv.onShutdown { | 
|  | go f() | 
|  | } | 
|  | srv.mu.Unlock() | 
|  |  | 
|  | ticker := time.NewTicker(shutdownPollInterval) | 
|  | defer ticker.Stop() | 
|  | for { | 
|  | if srv.closeIdleConns() { | 
|  | return lnerr | 
|  | } | 
|  | select { | 
|  | case <-ctx.Done(): | 
|  | return ctx.Err() | 
|  | case <-ticker.C: | 
|  | } | 
|  | } | 
|  | } | 
|  |  | 
|  | // RegisterOnShutdown registers a function to call on Shutdown. | 
|  | // This can be used to gracefully shutdown connections that have | 
|  | // undergone NPN/ALPN protocol upgrade or that have been hijacked. | 
|  | // This function should start protocol-specific graceful shutdown, | 
|  | // but should not wait for shutdown to complete. | 
|  | func (srv *Server) RegisterOnShutdown(f func()) { | 
|  | srv.mu.Lock() | 
|  | srv.onShutdown = append(srv.onShutdown, f) | 
|  | srv.mu.Unlock() | 
|  | } | 
|  |  | 
|  | // closeIdleConns closes all idle connections and reports whether the | 
|  | // server is quiescent. | 
|  | func (s *Server) closeIdleConns() bool { | 
|  | s.mu.Lock() | 
|  | defer s.mu.Unlock() | 
|  | quiescent := true | 
|  | for c := range s.activeConn { | 
|  | st, unixSec := c.getState() | 
|  | // Issue 22682: treat StateNew connections as if | 
|  | // they're idle if we haven't read the first request's | 
|  | // header in over 5 seconds. | 
|  | if st == StateNew && unixSec < time.Now().Unix()-5 { | 
|  | st = StateIdle | 
|  | } | 
|  | if st != StateIdle || unixSec == 0 { | 
|  | // Assume unixSec == 0 means it's a very new | 
|  | // connection, without state set yet. | 
|  | quiescent = false | 
|  | continue | 
|  | } | 
|  | c.rwc.Close() | 
|  | delete(s.activeConn, c) | 
|  | } | 
|  | return quiescent | 
|  | } | 
|  |  | 
|  | func (s *Server) closeListenersLocked() error { | 
|  | var err error | 
|  | for ln := range s.listeners { | 
|  | if cerr := (*ln).Close(); cerr != nil && err == nil { | 
|  | err = cerr | 
|  | } | 
|  | delete(s.listeners, ln) | 
|  | } | 
|  | return err | 
|  | } | 
|  |  | 
|  | // A ConnState represents the state of a client connection to a server. | 
|  | // It's used by the optional Server.ConnState hook. | 
|  | type ConnState int | 
|  |  | 
|  | const ( | 
|  | // StateNew represents a new connection that is expected to | 
|  | // send a request immediately. Connections begin at this | 
|  | // state and then transition to either StateActive or | 
|  | // StateClosed. | 
|  | StateNew ConnState = iota | 
|  |  | 
|  | // StateActive represents a connection that has read 1 or more | 
|  | // bytes of a request. The Server.ConnState hook for | 
|  | // StateActive fires before the request has entered a handler | 
|  | // and doesn't fire again until the request has been | 
|  | // handled. After the request is handled, the state | 
|  | // transitions to StateClosed, StateHijacked, or StateIdle. | 
|  | // For HTTP/2, StateActive fires on the transition from zero | 
|  | // to one active request, and only transitions away once all | 
|  | // active requests are complete. That means that ConnState | 
|  | // cannot be used to do per-request work; ConnState only notes | 
|  | // the overall state of the connection. | 
|  | StateActive | 
|  |  | 
|  | // StateIdle represents a connection that has finished | 
|  | // handling a request and is in the keep-alive state, waiting | 
|  | // for a new request. Connections transition from StateIdle | 
|  | // to either StateActive or StateClosed. | 
|  | StateIdle | 
|  |  | 
|  | // StateHijacked represents a hijacked connection. | 
|  | // This is a terminal state. It does not transition to StateClosed. | 
|  | StateHijacked | 
|  |  | 
|  | // StateClosed represents a closed connection. | 
|  | // This is a terminal state. Hijacked connections do not | 
|  | // transition to StateClosed. | 
|  | StateClosed | 
|  | ) | 
|  |  | 
|  | var stateName = map[ConnState]string{ | 
|  | StateNew:      "new", | 
|  | StateActive:   "active", | 
|  | StateIdle:     "idle", | 
|  | StateHijacked: "hijacked", | 
|  | StateClosed:   "closed", | 
|  | } | 
|  |  | 
|  | func (c ConnState) String() string { | 
|  | return stateName[c] | 
|  | } | 
|  |  | 
|  | // serverHandler delegates to either the server's Handler or | 
|  | // DefaultServeMux and also handles "OPTIONS *" requests. | 
|  | type serverHandler struct { | 
|  | srv *Server | 
|  | } | 
|  |  | 
|  | func (sh serverHandler) ServeHTTP(rw ResponseWriter, req *Request) { | 
|  | handler := sh.srv.Handler | 
|  | if handler == nil { | 
|  | handler = DefaultServeMux | 
|  | } | 
|  | if req.RequestURI == "*" && req.Method == "OPTIONS" { | 
|  | handler = globalOptionsHandler{} | 
|  | } | 
|  | handler.ServeHTTP(rw, req) | 
|  | } | 
|  |  | 
|  | // ListenAndServe listens on the TCP network address srv.Addr and then | 
|  | // calls Serve to handle requests on incoming connections. | 
|  | // Accepted connections are configured to enable TCP keep-alives. | 
|  | // | 
|  | // If srv.Addr is blank, ":http" is used. | 
|  | // | 
|  | // ListenAndServe always returns a non-nil error. After Shutdown or Close, | 
|  | // the returned error is ErrServerClosed. | 
|  | func (srv *Server) ListenAndServe() error { | 
|  | if srv.shuttingDown() { | 
|  | return ErrServerClosed | 
|  | } | 
|  | addr := srv.Addr | 
|  | if addr == "" { | 
|  | addr = ":http" | 
|  | } | 
|  | ln, err := net.Listen("tcp", addr) | 
|  | if err != nil { | 
|  | return err | 
|  | } | 
|  | return srv.Serve(ln) | 
|  | } | 
|  |  | 
|  | var testHookServerServe func(*Server, net.Listener) // used if non-nil | 
|  |  | 
|  | // shouldDoServeHTTP2 reports whether Server.Serve should configure | 
|  | // automatic HTTP/2. (which sets up the srv.TLSNextProto map) | 
|  | func (srv *Server) shouldConfigureHTTP2ForServe() bool { | 
|  | if srv.TLSConfig == nil { | 
|  | // Compatibility with Go 1.6: | 
|  | // If there's no TLSConfig, it's possible that the user just | 
|  | // didn't set it on the http.Server, but did pass it to | 
|  | // tls.NewListener and passed that listener to Serve. | 
|  | // So we should configure HTTP/2 (to set up srv.TLSNextProto) | 
|  | // in case the listener returns an "h2" *tls.Conn. | 
|  | return true | 
|  | } | 
|  | // The user specified a TLSConfig on their http.Server. | 
|  | // In this, case, only configure HTTP/2 if their tls.Config | 
|  | // explicitly mentions "h2". Otherwise http2.ConfigureServer | 
|  | // would modify the tls.Config to add it, but they probably already | 
|  | // passed this tls.Config to tls.NewListener. And if they did, | 
|  | // it's too late anyway to fix it. It would only be potentially racy. | 
|  | // See Issue 15908. | 
|  | return strSliceContains(srv.TLSConfig.NextProtos, http2NextProtoTLS) | 
|  | } | 
|  |  | 
|  | // ErrServerClosed is returned by the Server's Serve, ServeTLS, ListenAndServe, | 
|  | // and ListenAndServeTLS methods after a call to Shutdown or Close. | 
|  | var ErrServerClosed = errors.New("http: Server closed") | 
|  |  | 
|  | // Serve accepts incoming connections on the Listener l, creating a | 
|  | // new service goroutine for each. The service goroutines read requests and | 
|  | // then call srv.Handler to reply to them. | 
|  | // | 
|  | // HTTP/2 support is only enabled if the Listener returns *tls.Conn | 
|  | // connections and they were configured with "h2" in the TLS | 
|  | // Config.NextProtos. | 
|  | // | 
|  | // Serve always returns a non-nil error and closes l. | 
|  | // After Shutdown or Close, the returned error is ErrServerClosed. | 
|  | func (srv *Server) Serve(l net.Listener) error { | 
|  | if fn := testHookServerServe; fn != nil { | 
|  | fn(srv, l) // call hook with unwrapped listener | 
|  | } | 
|  |  | 
|  | origListener := l | 
|  | l = &onceCloseListener{Listener: l} | 
|  | defer l.Close() | 
|  |  | 
|  | if err := srv.setupHTTP2_Serve(); err != nil { | 
|  | return err | 
|  | } | 
|  |  | 
|  | if !srv.trackListener(&l, true) { | 
|  | return ErrServerClosed | 
|  | } | 
|  | defer srv.trackListener(&l, false) | 
|  |  | 
|  | var tempDelay time.Duration // how long to sleep on accept failure | 
|  |  | 
|  | baseCtx := context.Background() | 
|  | if srv.BaseContext != nil { | 
|  | baseCtx = srv.BaseContext(origListener) | 
|  | if baseCtx == nil { | 
|  | panic("BaseContext returned a nil context") | 
|  | } | 
|  | } | 
|  |  | 
|  | ctx := context.WithValue(baseCtx, ServerContextKey, srv) | 
|  | for { | 
|  | rw, e := l.Accept() | 
|  | if e != nil { | 
|  | select { | 
|  | case <-srv.getDoneChan(): | 
|  | return ErrServerClosed | 
|  | default: | 
|  | } | 
|  | if ne, ok := e.(net.Error); ok && ne.Temporary() { | 
|  | if tempDelay == 0 { | 
|  | tempDelay = 5 * time.Millisecond | 
|  | } else { | 
|  | tempDelay *= 2 | 
|  | } | 
|  | if max := 1 * time.Second; tempDelay > max { | 
|  | tempDelay = max | 
|  | } | 
|  | srv.logf("http: Accept error: %v; retrying in %v", e, tempDelay) | 
|  | time.Sleep(tempDelay) | 
|  | continue | 
|  | } | 
|  | return e | 
|  | } | 
|  | if cc := srv.ConnContext; cc != nil { | 
|  | ctx = cc(ctx, rw) | 
|  | if ctx == nil { | 
|  | panic("ConnContext returned nil") | 
|  | } | 
|  | } | 
|  | tempDelay = 0 | 
|  | c := srv.newConn(rw) | 
|  | c.setState(c.rwc, StateNew) // before Serve can return | 
|  | go c.serve(ctx) | 
|  | } | 
|  | } | 
|  |  | 
|  | // ServeTLS accepts incoming connections on the Listener l, creating a | 
|  | // new service goroutine for each. The service goroutines perform TLS | 
|  | // setup and then read requests, calling srv.Handler to reply to them. | 
|  | // | 
|  | // Files containing a certificate and matching private key for the | 
|  | // server must be provided if neither the Server's | 
|  | // TLSConfig.Certificates nor TLSConfig.GetCertificate are populated. | 
|  | // If the certificate is signed by a certificate authority, the | 
|  | // certFile should be the concatenation of the server's certificate, | 
|  | // any intermediates, and the CA's certificate. | 
|  | // | 
|  | // ServeTLS always returns a non-nil error. After Shutdown or Close, the | 
|  | // returned error is ErrServerClosed. | 
|  | func (srv *Server) ServeTLS(l net.Listener, certFile, keyFile string) error { | 
|  | // Setup HTTP/2 before srv.Serve, to initialize srv.TLSConfig | 
|  | // before we clone it and create the TLS Listener. | 
|  | if err := srv.setupHTTP2_ServeTLS(); err != nil { | 
|  | return err | 
|  | } | 
|  |  | 
|  | config := cloneTLSConfig(srv.TLSConfig) | 
|  | if !strSliceContains(config.NextProtos, "http/1.1") { | 
|  | config.NextProtos = append(config.NextProtos, "http/1.1") | 
|  | } | 
|  |  | 
|  | configHasCert := len(config.Certificates) > 0 || config.GetCertificate != nil | 
|  | if !configHasCert || certFile != "" || keyFile != "" { | 
|  | var err error | 
|  | config.Certificates = make([]tls.Certificate, 1) | 
|  | config.Certificates[0], err = tls.LoadX509KeyPair(certFile, keyFile) | 
|  | if err != nil { | 
|  | return err | 
|  | } | 
|  | } | 
|  |  | 
|  | tlsListener := tls.NewListener(l, config) | 
|  | return srv.Serve(tlsListener) | 
|  | } | 
|  |  | 
|  | // trackListener adds or removes a net.Listener to the set of tracked | 
|  | // listeners. | 
|  | // | 
|  | // We store a pointer to interface in the map set, in case the | 
|  | // net.Listener is not comparable. This is safe because we only call | 
|  | // trackListener via Serve and can track+defer untrack the same | 
|  | // pointer to local variable there. We never need to compare a | 
|  | // Listener from another caller. | 
|  | // | 
|  | // It reports whether the server is still up (not Shutdown or Closed). | 
|  | func (s *Server) trackListener(ln *net.Listener, add bool) bool { | 
|  | s.mu.Lock() | 
|  | defer s.mu.Unlock() | 
|  | if s.listeners == nil { | 
|  | s.listeners = make(map[*net.Listener]struct{}) | 
|  | } | 
|  | if add { | 
|  | if s.shuttingDown() { | 
|  | return false | 
|  | } | 
|  | s.listeners[ln] = struct{}{} | 
|  | } else { | 
|  | delete(s.listeners, ln) | 
|  | } | 
|  | return true | 
|  | } | 
|  |  | 
|  | func (s *Server) trackConn(c *conn, add bool) { | 
|  | s.mu.Lock() | 
|  | defer s.mu.Unlock() | 
|  | if s.activeConn == nil { | 
|  | s.activeConn = make(map[*conn]struct{}) | 
|  | } | 
|  | if add { | 
|  | s.activeConn[c] = struct{}{} | 
|  | } else { | 
|  | delete(s.activeConn, c) | 
|  | } | 
|  | } | 
|  |  | 
|  | func (s *Server) idleTimeout() time.Duration { | 
|  | if s.IdleTimeout != 0 { | 
|  | return s.IdleTimeout | 
|  | } | 
|  | return s.ReadTimeout | 
|  | } | 
|  |  | 
|  | func (s *Server) readHeaderTimeout() time.Duration { | 
|  | if s.ReadHeaderTimeout != 0 { | 
|  | return s.ReadHeaderTimeout | 
|  | } | 
|  | return s.ReadTimeout | 
|  | } | 
|  |  | 
|  | func (s *Server) doKeepAlives() bool { | 
|  | return atomic.LoadInt32(&s.disableKeepAlives) == 0 && !s.shuttingDown() | 
|  | } | 
|  |  | 
|  | func (s *Server) shuttingDown() bool { | 
|  | // TODO: replace inShutdown with the existing atomicBool type; | 
|  | // see https://github.com/golang/go/issues/20239#issuecomment-381434582 | 
|  | return atomic.LoadInt32(&s.inShutdown) != 0 | 
|  | } | 
|  |  | 
|  | // SetKeepAlivesEnabled controls whether HTTP keep-alives are enabled. | 
|  | // By default, keep-alives are always enabled. Only very | 
|  | // resource-constrained environments or servers in the process of | 
|  | // shutting down should disable them. | 
|  | func (srv *Server) SetKeepAlivesEnabled(v bool) { | 
|  | if v { | 
|  | atomic.StoreInt32(&srv.disableKeepAlives, 0) | 
|  | return | 
|  | } | 
|  | atomic.StoreInt32(&srv.disableKeepAlives, 1) | 
|  |  | 
|  | // Close idle HTTP/1 conns: | 
|  | srv.closeIdleConns() | 
|  |  | 
|  | // TODO: Issue 26303: close HTTP/2 conns as soon as they become idle. | 
|  | } | 
|  |  | 
|  | func (s *Server) logf(format string, args ...interface{}) { | 
|  | if s.ErrorLog != nil { | 
|  | s.ErrorLog.Printf(format, args...) | 
|  | } else { | 
|  | log.Printf(format, args...) | 
|  | } | 
|  | } | 
|  |  | 
|  | // logf prints to the ErrorLog of the *Server associated with request r | 
|  | // via ServerContextKey. If there's no associated server, or if ErrorLog | 
|  | // is nil, logging is done via the log package's standard logger. | 
|  | func logf(r *Request, format string, args ...interface{}) { | 
|  | s, _ := r.Context().Value(ServerContextKey).(*Server) | 
|  | if s != nil && s.ErrorLog != nil { | 
|  | s.ErrorLog.Printf(format, args...) | 
|  | } else { | 
|  | log.Printf(format, args...) | 
|  | } | 
|  | } | 
|  |  | 
|  | // ListenAndServe listens on the TCP network address addr and then calls | 
|  | // Serve with handler to handle requests on incoming connections. | 
|  | // Accepted connections are configured to enable TCP keep-alives. | 
|  | // | 
|  | // The handler is typically nil, in which case the DefaultServeMux is used. | 
|  | // | 
|  | // ListenAndServe always returns a non-nil error. | 
|  | func ListenAndServe(addr string, handler Handler) error { | 
|  | server := &Server{Addr: addr, Handler: handler} | 
|  | return server.ListenAndServe() | 
|  | } | 
|  |  | 
|  | // ListenAndServeTLS acts identically to ListenAndServe, except that it | 
|  | // expects HTTPS connections. Additionally, files containing a certificate and | 
|  | // matching private key for the server must be provided. If the certificate | 
|  | // is signed by a certificate authority, the certFile should be the concatenation | 
|  | // of the server's certificate, any intermediates, and the CA's certificate. | 
|  | func ListenAndServeTLS(addr, certFile, keyFile string, handler Handler) error { | 
|  | server := &Server{Addr: addr, Handler: handler} | 
|  | return server.ListenAndServeTLS(certFile, keyFile) | 
|  | } | 
|  |  | 
|  | // ListenAndServeTLS listens on the TCP network address srv.Addr and | 
|  | // then calls ServeTLS to handle requests on incoming TLS connections. | 
|  | // Accepted connections are configured to enable TCP keep-alives. | 
|  | // | 
|  | // Filenames containing a certificate and matching private key for the | 
|  | // server must be provided if neither the Server's TLSConfig.Certificates | 
|  | // nor TLSConfig.GetCertificate are populated. If the certificate is | 
|  | // signed by a certificate authority, the certFile should be the | 
|  | // concatenation of the server's certificate, any intermediates, and | 
|  | // the CA's certificate. | 
|  | // | 
|  | // If srv.Addr is blank, ":https" is used. | 
|  | // | 
|  | // ListenAndServeTLS always returns a non-nil error. After Shutdown or | 
|  | // Close, the returned error is ErrServerClosed. | 
|  | func (srv *Server) ListenAndServeTLS(certFile, keyFile string) error { | 
|  | if srv.shuttingDown() { | 
|  | return ErrServerClosed | 
|  | } | 
|  | addr := srv.Addr | 
|  | if addr == "" { | 
|  | addr = ":https" | 
|  | } | 
|  |  | 
|  | ln, err := net.Listen("tcp", addr) | 
|  | if err != nil { | 
|  | return err | 
|  | } | 
|  |  | 
|  | defer ln.Close() | 
|  |  | 
|  | return srv.ServeTLS(ln, certFile, keyFile) | 
|  | } | 
|  |  | 
|  | // setupHTTP2_ServeTLS conditionally configures HTTP/2 on | 
|  | // srv and reports whether there was an error setting it up. If it is | 
|  | // not configured for policy reasons, nil is returned. | 
|  | func (srv *Server) setupHTTP2_ServeTLS() error { | 
|  | srv.nextProtoOnce.Do(srv.onceSetNextProtoDefaults) | 
|  | return srv.nextProtoErr | 
|  | } | 
|  |  | 
|  | // setupHTTP2_Serve is called from (*Server).Serve and conditionally | 
|  | // configures HTTP/2 on srv using a more conservative policy than | 
|  | // setupHTTP2_ServeTLS because Serve is called after tls.Listen, | 
|  | // and may be called concurrently. See shouldConfigureHTTP2ForServe. | 
|  | // | 
|  | // The tests named TestTransportAutomaticHTTP2* and | 
|  | // TestConcurrentServerServe in server_test.go demonstrate some | 
|  | // of the supported use cases and motivations. | 
|  | func (srv *Server) setupHTTP2_Serve() error { | 
|  | srv.nextProtoOnce.Do(srv.onceSetNextProtoDefaults_Serve) | 
|  | return srv.nextProtoErr | 
|  | } | 
|  |  | 
|  | func (srv *Server) onceSetNextProtoDefaults_Serve() { | 
|  | if srv.shouldConfigureHTTP2ForServe() { | 
|  | srv.onceSetNextProtoDefaults() | 
|  | } | 
|  | } | 
|  |  | 
|  | // onceSetNextProtoDefaults configures HTTP/2, if the user hasn't | 
|  | // configured otherwise. (by setting srv.TLSNextProto non-nil) | 
|  | // It must only be called via srv.nextProtoOnce (use srv.setupHTTP2_*). | 
|  | func (srv *Server) onceSetNextProtoDefaults() { | 
|  | if strings.Contains(os.Getenv("GODEBUG"), "http2server=0") { | 
|  | return | 
|  | } | 
|  | // Enable HTTP/2 by default if the user hasn't otherwise | 
|  | // configured their TLSNextProto map. | 
|  | if srv.TLSNextProto == nil { | 
|  | conf := &http2Server{ | 
|  | NewWriteScheduler: func() http2WriteScheduler { return http2NewPriorityWriteScheduler(nil) }, | 
|  | } | 
|  | srv.nextProtoErr = http2ConfigureServer(srv, conf) | 
|  | } | 
|  | } | 
|  |  | 
|  | // TimeoutHandler returns a Handler that runs h with the given time limit. | 
|  | // | 
|  | // The new Handler calls h.ServeHTTP to handle each request, but if a | 
|  | // call runs for longer than its time limit, the handler responds with | 
|  | // a 503 Service Unavailable error and the given message in its body. | 
|  | // (If msg is empty, a suitable default message will be sent.) | 
|  | // After such a timeout, writes by h to its ResponseWriter will return | 
|  | // ErrHandlerTimeout. | 
|  | // | 
|  | // TimeoutHandler supports the Flusher and Pusher interfaces but does not | 
|  | // support the Hijacker interface. | 
|  | func TimeoutHandler(h Handler, dt time.Duration, msg string) Handler { | 
|  | return &timeoutHandler{ | 
|  | handler: h, | 
|  | body:    msg, | 
|  | dt:      dt, | 
|  | } | 
|  | } | 
|  |  | 
|  | // ErrHandlerTimeout is returned on ResponseWriter Write calls | 
|  | // in handlers which have timed out. | 
|  | var ErrHandlerTimeout = errors.New("http: Handler timeout") | 
|  |  | 
|  | type timeoutHandler struct { | 
|  | handler Handler | 
|  | body    string | 
|  | dt      time.Duration | 
|  |  | 
|  | // When set, no context will be created and this context will | 
|  | // be used instead. | 
|  | testContext context.Context | 
|  | } | 
|  |  | 
|  | func (h *timeoutHandler) errorBody() string { | 
|  | if h.body != "" { | 
|  | return h.body | 
|  | } | 
|  | return "<html><head><title>Timeout</title></head><body><h1>Timeout</h1></body></html>" | 
|  | } | 
|  |  | 
|  | func (h *timeoutHandler) ServeHTTP(w ResponseWriter, r *Request) { | 
|  | ctx := h.testContext | 
|  | if ctx == nil { | 
|  | var cancelCtx context.CancelFunc | 
|  | ctx, cancelCtx = context.WithTimeout(r.Context(), h.dt) | 
|  | defer cancelCtx() | 
|  | } | 
|  | r = r.WithContext(ctx) | 
|  | done := make(chan struct{}) | 
|  | tw := &timeoutWriter{ | 
|  | w: w, | 
|  | h: make(Header), | 
|  | } | 
|  | panicChan := make(chan interface{}, 1) | 
|  | go func() { | 
|  | defer func() { | 
|  | if p := recover(); p != nil { | 
|  | panicChan <- p | 
|  | } | 
|  | }() | 
|  | h.handler.ServeHTTP(tw, r) | 
|  | close(done) | 
|  | }() | 
|  | select { | 
|  | case p := <-panicChan: | 
|  | panic(p) | 
|  | case <-done: | 
|  | tw.mu.Lock() | 
|  | defer tw.mu.Unlock() | 
|  | dst := w.Header() | 
|  | for k, vv := range tw.h { | 
|  | dst[k] = vv | 
|  | } | 
|  | if !tw.wroteHeader { | 
|  | tw.code = StatusOK | 
|  | } | 
|  | w.WriteHeader(tw.code) | 
|  | w.Write(tw.wbuf.Bytes()) | 
|  | case <-ctx.Done(): | 
|  | tw.mu.Lock() | 
|  | defer tw.mu.Unlock() | 
|  | w.WriteHeader(StatusServiceUnavailable) | 
|  | io.WriteString(w, h.errorBody()) | 
|  | tw.timedOut = true | 
|  | } | 
|  | } | 
|  |  | 
|  | type timeoutWriter struct { | 
|  | w    ResponseWriter | 
|  | h    Header | 
|  | wbuf bytes.Buffer | 
|  |  | 
|  | mu          sync.Mutex | 
|  | timedOut    bool | 
|  | wroteHeader bool | 
|  | code        int | 
|  | } | 
|  |  | 
|  | var _ Pusher = (*timeoutWriter)(nil) | 
|  | var _ Flusher = (*timeoutWriter)(nil) | 
|  |  | 
|  | // Push implements the Pusher interface. | 
|  | func (tw *timeoutWriter) Push(target string, opts *PushOptions) error { | 
|  | if pusher, ok := tw.w.(Pusher); ok { | 
|  | return pusher.Push(target, opts) | 
|  | } | 
|  | return ErrNotSupported | 
|  | } | 
|  |  | 
|  | // Flush implements the Flusher interface. | 
|  | func (tw *timeoutWriter) Flush() { | 
|  | f, ok := tw.w.(Flusher) | 
|  | if ok { | 
|  | f.Flush() | 
|  | } | 
|  | } | 
|  |  | 
|  | func (tw *timeoutWriter) Header() Header { return tw.h } | 
|  |  | 
|  | func (tw *timeoutWriter) Write(p []byte) (int, error) { | 
|  | tw.mu.Lock() | 
|  | defer tw.mu.Unlock() | 
|  | if tw.timedOut { | 
|  | return 0, ErrHandlerTimeout | 
|  | } | 
|  | if !tw.wroteHeader { | 
|  | tw.writeHeader(StatusOK) | 
|  | } | 
|  | return tw.wbuf.Write(p) | 
|  | } | 
|  |  | 
|  | func (tw *timeoutWriter) WriteHeader(code int) { | 
|  | checkWriteHeaderCode(code) | 
|  | tw.mu.Lock() | 
|  | defer tw.mu.Unlock() | 
|  | if tw.timedOut || tw.wroteHeader { | 
|  | return | 
|  | } | 
|  | tw.writeHeader(code) | 
|  | } | 
|  |  | 
|  | func (tw *timeoutWriter) writeHeader(code int) { | 
|  | tw.wroteHeader = true | 
|  | tw.code = code | 
|  | } | 
|  |  | 
|  | // onceCloseListener wraps a net.Listener, protecting it from | 
|  | // multiple Close calls. | 
|  | type onceCloseListener struct { | 
|  | net.Listener | 
|  | once     sync.Once | 
|  | closeErr error | 
|  | } | 
|  |  | 
|  | func (oc *onceCloseListener) Close() error { | 
|  | oc.once.Do(oc.close) | 
|  | return oc.closeErr | 
|  | } | 
|  |  | 
|  | func (oc *onceCloseListener) close() { oc.closeErr = oc.Listener.Close() } | 
|  |  | 
|  | // globalOptionsHandler responds to "OPTIONS *" requests. | 
|  | type globalOptionsHandler struct{} | 
|  |  | 
|  | func (globalOptionsHandler) ServeHTTP(w ResponseWriter, r *Request) { | 
|  | w.Header().Set("Content-Length", "0") | 
|  | if r.ContentLength != 0 { | 
|  | // Read up to 4KB of OPTIONS body (as mentioned in the | 
|  | // spec as being reserved for future use), but anything | 
|  | // over that is considered a waste of server resources | 
|  | // (or an attack) and we abort and close the connection, | 
|  | // courtesy of MaxBytesReader's EOF behavior. | 
|  | mb := MaxBytesReader(w, r.Body, 4<<10) | 
|  | io.Copy(ioutil.Discard, mb) | 
|  | } | 
|  | } | 
|  |  | 
|  | // initNPNRequest is an HTTP handler that initializes certain | 
|  | // uninitialized fields in its *Request. Such partially-initialized | 
|  | // Requests come from NPN protocol handlers. | 
|  | type initNPNRequest struct { | 
|  | ctx context.Context | 
|  | c   *tls.Conn | 
|  | h   serverHandler | 
|  | } | 
|  |  | 
|  | // BaseContext is an exported but unadvertised http.Handler method | 
|  | // recognized by x/net/http2 to pass down a context; the TLSNextProto | 
|  | // API predates context support so we shoehorn through the only | 
|  | // interface we have available. | 
|  | func (h initNPNRequest) BaseContext() context.Context { return h.ctx } | 
|  |  | 
|  | func (h initNPNRequest) ServeHTTP(rw ResponseWriter, req *Request) { | 
|  | if req.TLS == nil { | 
|  | req.TLS = &tls.ConnectionState{} | 
|  | *req.TLS = h.c.ConnectionState() | 
|  | } | 
|  | if req.Body == nil { | 
|  | req.Body = NoBody | 
|  | } | 
|  | if req.RemoteAddr == "" { | 
|  | req.RemoteAddr = h.c.RemoteAddr().String() | 
|  | } | 
|  | h.h.ServeHTTP(rw, req) | 
|  | } | 
|  |  | 
|  | // loggingConn is used for debugging. | 
|  | type loggingConn struct { | 
|  | name string | 
|  | net.Conn | 
|  | } | 
|  |  | 
|  | var ( | 
|  | uniqNameMu   sync.Mutex | 
|  | uniqNameNext = make(map[string]int) | 
|  | ) | 
|  |  | 
|  | func newLoggingConn(baseName string, c net.Conn) net.Conn { | 
|  | uniqNameMu.Lock() | 
|  | defer uniqNameMu.Unlock() | 
|  | uniqNameNext[baseName]++ | 
|  | return &loggingConn{ | 
|  | name: fmt.Sprintf("%s-%d", baseName, uniqNameNext[baseName]), | 
|  | Conn: c, | 
|  | } | 
|  | } | 
|  |  | 
|  | func (c *loggingConn) Write(p []byte) (n int, err error) { | 
|  | log.Printf("%s.Write(%d) = ....", c.name, len(p)) | 
|  | n, err = c.Conn.Write(p) | 
|  | log.Printf("%s.Write(%d) = %d, %v", c.name, len(p), n, err) | 
|  | return | 
|  | } | 
|  |  | 
|  | func (c *loggingConn) Read(p []byte) (n int, err error) { | 
|  | log.Printf("%s.Read(%d) = ....", c.name, len(p)) | 
|  | n, err = c.Conn.Read(p) | 
|  | log.Printf("%s.Read(%d) = %d, %v", c.name, len(p), n, err) | 
|  | return | 
|  | } | 
|  |  | 
|  | func (c *loggingConn) Close() (err error) { | 
|  | log.Printf("%s.Close() = ...", c.name) | 
|  | err = c.Conn.Close() | 
|  | log.Printf("%s.Close() = %v", c.name, err) | 
|  | return | 
|  | } | 
|  |  | 
|  | // checkConnErrorWriter writes to c.rwc and records any write errors to c.werr. | 
|  | // It only contains one field (and a pointer field at that), so it | 
|  | // fits in an interface value without an extra allocation. | 
|  | type checkConnErrorWriter struct { | 
|  | c *conn | 
|  | } | 
|  |  | 
|  | func (w checkConnErrorWriter) Write(p []byte) (n int, err error) { | 
|  | n, err = w.c.rwc.Write(p) | 
|  | if err != nil && w.c.werr == nil { | 
|  | w.c.werr = err | 
|  | w.c.cancelCtx() | 
|  | } | 
|  | return | 
|  | } | 
|  |  | 
|  | func numLeadingCRorLF(v []byte) (n int) { | 
|  | for _, b := range v { | 
|  | if b == '\r' || b == '\n' { | 
|  | n++ | 
|  | continue | 
|  | } | 
|  | break | 
|  | } | 
|  | return | 
|  |  | 
|  | } | 
|  |  | 
|  | func strSliceContains(ss []string, s string) bool { | 
|  | for _, v := range ss { | 
|  | if v == s { | 
|  | return true | 
|  | } | 
|  | } | 
|  | return false | 
|  | } | 
|  |  | 
|  | // tlsRecordHeaderLooksLikeHTTP reports whether a TLS record header | 
|  | // looks like it might've been a misdirected plaintext HTTP request. | 
|  | func tlsRecordHeaderLooksLikeHTTP(hdr [5]byte) bool { | 
|  | switch string(hdr[:]) { | 
|  | case "GET /", "HEAD ", "POST ", "PUT /", "OPTIO": | 
|  | return true | 
|  | } | 
|  | return false | 
|  | } |