golang本身就提供了http的标志库,在golang中可以轻松的编写http服务,本文主要是因为在编写http服务的过程中,对整个处理流程不是很了解故想了解一下。
package main
import (
"fmt"
"net/http"
)
func hello(w http.ResponseWriter, req *http.Request) {
fmt.Fprintf(w, "hello\n")
}
func headers(w http.ResponseWriter, req *http.Request) {
for name, headers := range req.Header {
for _, h := range headers {
fmt.Fprintf(w, "%v: %v\n", name, h)
}
}
}
func main() {
http.HandleFunc("/hello", hello)
http.HandleFunc("/headers", headers)
http.ListenAndServe(":7777", nil)
}
这是官网上面提供的一个简单的http服务的例子,从结构上看处理逻辑相对比较简单清晰,定义一个路由然后定义一个处理函数,许多go的web框架基本上都是在这个标准库的基础上封装而来,如果访问http://127.0.0.1:7777/hello就可以看见输出的内容为hello。
func ListenAndServe(addr string, handler Handler) error {
server := &Server{Addr: addr, Handler: handler}
return server.ListenAndServe()
}
从代码上来看,该函数很简单,直接通过初始化了一个Server,然后调用了ListenAndServe方法,从而开始处理请求。
func (srv *Server) ListenAndServe() error {
if srv.shuttingDown() { // 检查是否关闭
return ErrServerClosed
}
addr := srv.Addr
if addr == "" {
addr = ":http"
}
ln, err := net.Listen("tcp", addr) // 使用tcp协议处理地址与端口的请求
if err != nil {
return err
}
return srv.Serve(tcpKeepAliveListener{ln.(*net.TCPListener)}) // 开始循环处理
}
主要传入了tcpKeepAliveListener这个类型的监听对象;
type tcpKeepAliveListener struct {
*net.TCPListener
}
func (ln tcpKeepAliveListener) Accept() (net.Conn, error) {
tc, err := ln.AcceptTCP() // 接受请求
if err != nil {
return nil, err
}
tc.SetKeepAlive(true) // 设置keepAlive
tc.SetKeepAlivePeriod(3 * time.Minute) // 设置保存的时间
return tc, nil
}
Server函数处理;
func (srv *Server) Serve(l net.Listener) error {
if fn := testHookServerServe; fn != nil {
fn(srv, l) // call hook with unwrapped listener
}
l = &onceCloseListener{Listener: l} // 当关闭时 只关闭一次该Listen
defer l.Close()
if err := srv.setupHTTP2_Serve(); err != nil {
return err
}
if !srv.trackListener(&l, true) {
return ErrServerClosed
}
defer srv.trackListener(&l, false)
var tempDelay time.Duration // how long to sleep on accept failure
baseCtx := context.Background() // base is always background, per Issue 16220
ctx := context.WithValue(baseCtx, ServerContextKey, srv)
for {
rw, e := l.Accept() // 处理接受的请求
if e != nil {
select {
case <-srv.getDoneChan(): // 检查是否处理完成,如果关闭则退出
return ErrServerClosed
default:
}
if ne, ok := e.(net.Error); ok && ne.Temporary() { // 检查是否有错误 如果有错误则继续等待几首
if tempDelay == 0 {
tempDelay = 5 * time.Millisecond
} else {
tempDelay *= 2
}
if max := 1 * time.Second; tempDelay > max {
tempDelay = max
}
srv.logf("http: Accept error: %v; retrying in %v", e, tempDelay)
time.Sleep(tempDelay)
continue
}
return e
}
tempDelay = 0
c := srv.newConn(rw) // 初始化一个新的连接
c.setState(c.rwc, StateNew) // before Serve can return
go c.serve(ctx) // 开启协程来处理该请求
}
}
// Serve a new connection.
func (c *conn) serve(ctx context.Context) {
c.remoteAddr = c.rwc.RemoteAddr().String() // 获取远端地址
ctx = context.WithValue(ctx, LocalAddrContextKey, c.rwc.LocalAddr())
defer func() {
if err := recover(); err != nil && err != ErrAbortHandler {
const size = 64 << 10
buf := make([]byte, size)
buf = buf[:runtime.Stack(buf, false)]
c.server.logf("http: panic serving %v: %v\n%s", c.remoteAddr, err, buf)
}
if !c.hijacked() {
c.close()
c.setState(c.rwc, StateClosed)
}
}() // 处理错误
if tlsConn, ok := c.rwc.(*tls.Conn); ok { // 检查是否是https连接
if d := c.server.ReadTimeout; d != 0 {
c.rwc.SetReadDeadline(time.Now().Add(d))
}
if d := c.server.WriteTimeout; d != 0 {
c.rwc.SetWriteDeadline(time.Now().Add(d))
}
if err := tlsConn.Handshake(); err != nil {
// If the handshake failed due to the client not speaking
// TLS, assume they're speaking plaintext HTTP and write a
// 400 response on the TLS conn's underlying net.Conn.
if re, ok := err.(tls.RecordHeaderError); ok && re.Conn != nil && tlsRecordHeaderLooksLikeHTTP(re.RecordHeader) {
io.WriteString(re.Conn, "HTTP/1.0 400 Bad Request\r\n\r\nClient sent an HTTP request to an HTTPS server.\n")
re.Conn.Close()
return
}
c.server.logf("http: TLS handshake error from %s: %v", c.rwc.RemoteAddr(), err)
return
}
c.tlsState = new(tls.ConnectionState)
*c.tlsState = tlsConn.ConnectionState()
if proto := c.tlsState.NegotiatedProtocol; validNPN(proto) {
if fn := c.server.TLSNextProto[proto]; fn != nil {
h := initNPNRequest{tlsConn, serverHandler{c.server}}
fn(c.server, tlsConn, h)
}
return
}
}
// HTTP/1.x from here on.
ctx, cancelCtx := context.WithCancel(ctx)
c.cancelCtx = cancelCtx
defer cancelCtx()
c.r = &connReader{conn: c} // 初始化一个读连接
c.bufr = newBufioReader(c.r) //生成一个读buf
c.bufw = newBufioWriterSize(checkConnErrorWriter{c}, 4<<10) // 生成一个写buf,该bufw底层写入就是链接c
for {
w, err := c.readRequest(ctx) // 初始化一个response
if c.r.remain != c.server.initialReadLimitSize() {
// If we read any bytes off the wire, we're active.
c.setState(c.rwc, StateActive)
}
if err != nil {
const errorHeaders = "\r\nContent-Type: text/plain; charset=utf-8\r\nConnection: close\r\n\r\n"
if err == errTooLarge {
// Their HTTP client may or may not be
// able to read this if we're
// responding to them and hanging up
// while they're still writing their
// request. Undefined behavior.
const publicErr = "431 Request Header Fields Too Large"
fmt.Fprintf(c.rwc, "HTTP/1.1 "+publicErr+errorHeaders+publicErr)
c.closeWriteAndWait()
return
}
if isCommonNetReadError(err) {
return // don't reply
}
publicErr := "400 Bad Request"
if v, ok := err.(badRequestError); ok {
publicErr = publicErr + ": " + string(v)
}
fmt.Fprintf(c.rwc, "HTTP/1.1 "+publicErr+errorHeaders+publicErr)
return
}
// Expect 100 Continue support
req := w.req // 获取解析后的request
if req.expectsContinue() {
if req.ProtoAtLeast(1, 1) && req.ContentLength != 0 {
// Wrap the Body reader with one that replies on the connection
req.Body = &expectContinueReader{readCloser: req.Body, resp: w}
}
} else if req.Header.get("Expect") != "" {
w.sendExpectationFailed()
return
}
c.curReq.Store(w)
if requestBodyRemains(req.Body) { // 解析头部信息
registerOnHitEOF(req.Body, w.conn.r.startBackgroundRead)
} else {
w.conn.r.startBackgroundRead() // 如果没有读出到则后台读客户端传入的数据
}
// HTTP cannot have multiple simultaneous active requests.[*]
// Until the server replies to this request, it can't read another,
// so we might as well run the handler in this goroutine.
// [*] Not strictly true: HTTP pipelining. We could let them all process
// in parallel even if their responses need to be serialized.
// But we're not going to implement HTTP pipelining because it
// was never deployed in the wild and the answer is HTTP/2.
serverHandler{c.server}.ServeHTTP(w, w.req) // 通过server设置的handle来处理该请求
w.cancelCtx()
if c.hijacked() {
return
}
w.finishRequest() // 将处理的请求写入输出缓冲区将处理完成数据返回给客户端
if !w.shouldReuseConnection() { // 检查是否处理完该请求后关闭该链接
if w.requestBodyLimitHit || w.closedRequestBodyEarly() {
c.closeWriteAndWait()
}
return
}
c.setState(c.rwc, StateIdle) // 设置为空闲
c.curReq.Store((*response)(nil))
if !w.conn.server.doKeepAlives() {
// We're in shutdown mode. We might've replied
// to the user without "Connection: close" and
// they might think they can send another
// request, but such is life with HTTP/1.1.
return
}
if d := c.server.idleTimeout(); d != 0 {
c.rwc.SetReadDeadline(time.Now().Add(d))
if _, err := c.bufr.Peek(4); err != nil {
return
}
}
c.rwc.SetReadDeadline(time.Time{})
}
}
// Read next request from connection.
func (c *conn) readRequest(ctx context.Context) (w *response, err error) {
if c.hijacked() {
return nil, ErrHijacked
}
var (
wholeReqDeadline time.Time // or zero if none
hdrDeadline time.Time // or zero if none
)
t0 := time.Now() // 获取当前时间
if d := c.server.readHeaderTimeout(); d != 0 { // 是否设置读头部信息超时
hdrDeadline = t0.Add(d)
}
if d := c.server.ReadTimeout; d != 0 { // 是否设置读超时
wholeReqDeadline = t0.Add(d)
}
c.rwc.SetReadDeadline(hdrDeadline)
if d := c.server.WriteTimeout; d != 0 { // 是否设置写超时
defer func() {
c.rwc.SetWriteDeadline(time.Now().Add(d))
}()
}
c.r.setReadLimit(c.server.initialReadLimitSize()) // 设置读时间限制
if c.lastMethod == "POST" {
// RFC 7230 section 3 tolerance for old buggy clients.
peek, _ := c.bufr.Peek(4) // ReadRequest will get err below
c.bufr.Discard(numLeadingCRorLF(peek))
}
req, err := readRequest(c.bufr, keepHostHeader) // 读头部信息并解析头部协议
if err != nil {
if c.r.hitReadLimit() {
return nil, errTooLarge
}
return nil, err
}
if !http1ServerSupportsRequest(req) { // 检查是否支持
return nil, badRequestError("unsupported protocol version")
}
c.lastMethod = req.Method
c.r.setInfiniteReadLimit()
hosts, haveHost := req.Header["Host"] // 获取host信息
isH2Upgrade := req.isH2Upgrade()
if req.ProtoAtLeast(1, 1) && (!haveHost || len(hosts) == 0) && !isH2Upgrade && req.Method != "CONNECT" {
return nil, badRequestError("missing required Host header")
}
if len(hosts) > 1 {
return nil, badRequestError("too many Host headers")
}
if len(hosts) == 1 && !httpguts.ValidHostHeader(hosts[0]) {
return nil, badRequestError("malformed Host header")
}
for k, vv := range req.Header {
if !httpguts.ValidHeaderFieldName(k) {
return nil, badRequestError("invalid header name")
}
for _, v := range vv {
if !httpguts.ValidHeaderFieldValue(v) {
return nil, badRequestError("invalid header value")
}
}
}
delete(req.Header, "Host")
ctx, cancelCtx := context.WithCancel(ctx)
req.ctx = ctx
req.RemoteAddr = c.remoteAddr
req.TLS = c.tlsState
if body, ok := req.Body.(*body); ok { // 获取请求body
body.doEarlyClose = true
}
// Adjust the read deadline if necessary.
if !hdrDeadline.Equal(wholeReqDeadline) {
c.rwc.SetReadDeadline(wholeReqDeadline)
}
w = &response{
conn: c,
cancelCtx: cancelCtx,
req: req,
reqBody: req.Body,
handlerHeader: make(Header),
contentLength: -1,
closeNotifyCh: make(chan bool, 1),
// We populate these ahead of time so we're not
// reading from req.Header after their Handler starts
// and maybe mutates it (Issue 14940)
wants10KeepAlive: req.wantsHttp10KeepAlive(),
wantsClose: req.wantsClose(),
} // 生成一个response 将连接信息都传入
if isH2Upgrade {
w.closeAfterReply = true
}
w.cw.res = w
w.w = newBufioWriterSize(&w.cw, bufferBeforeChunkingSize) // 生成一个写缓冲区
return w, nil
}
从readRequest执行流程来看,主要就是先检查是否设置超时时间,然后通过readRequest函数来读取并解析http头部内容,最后生成一个response,提供给Handle来处理,提供给serverHandler的ServerHttp函数来处理,ServeHTTP函数就是我们再示例代码中注册的两个func
在处理逻辑serverHandler{c.server}.ServeHTTP(w, w.req)这行代码中
type serverHandler struct {
srv *Server
}
func (sh serverHandler) ServeHTTP(rw ResponseWriter, req *Request) {
handler := sh.srv.Handler
if handler == nil {
handler = DefaultServeMux // 默认的handle 本示例代码中就是该默认的
}
if req.RequestURI == "*" && req.Method == "OPTIONS" {
handler = globalOptionsHandler{}
}
handler.ServeHTTP(rw, req) // 调用该handle来处理请求
}
此时调用的就是DefaultServeMux
func (mux *ServeMux) ServeHTTP(w ResponseWriter, r *Request) {
if r.RequestURI == "*" {
if r.ProtoAtLeast(1, 1) {
w.Header().Set("Connection", "close")
}
w.WriteHeader(StatusBadRequest)
return
}
h, _ := mux.Handler(r) // 查找匹配的handle
h.ServeHTTP(w, r) // 调用该handle的处理方式
}
...
func (mux *ServeMux) Handler(r *Request) (h Handler, pattern string) {
// CONNECT requests are not canonicalized.
if r.Method == "CONNECT" {
// If r.URL.Path is /tree and its handler is not registered,
// the /tree -> /tree/ redirect applies to CONNECT requests
// but the path canonicalization does not.
if u, ok := mux.redirectToPathSlash(r.URL.Host, r.URL.Path, r.URL); ok {
return RedirectHandler(u.String(), StatusMovedPermanently), u.Path
}
return mux.handler(r.Host, r.URL.Path)
}
// All other requests have any port stripped and path cleaned
// before passing to mux.handler.
host := stripHostPort(r.Host) // 获取host
path := cleanPath(r.URL.Path) // 获取path
// If the given path is /tree and its handler is not registered,
// redirect for /tree/.
if u, ok := mux.redirectToPathSlash(host, path, r.URL); ok {
return RedirectHandler(u.String(), StatusMovedPermanently), u.Path
}
if path != r.URL.Path {
_, pattern = mux.handler(host, path) // 通过host,path来比对
url := *r.URL
url.Path = path
return RedirectHandler(url.String(), StatusMovedPermanently), pattern
}
return mux.handler(host, r.URL.Path) // 调用handler来匹配handler
}
func (mux *ServeMux) handler(host, path string) (h Handler, pattern string) {
mux.mu.RLock()
defer mux.mu.RUnlock()
// Host-specific pattern takes precedence over generic ones
if mux.hosts {
h, pattern = mux.match(host + path) // 匹配host加path
}
if h == nil {
h, pattern = mux.match(path) // 如果只有path则匹配path
}
if h == nil {
h, pattern = NotFoundHandler(), "" //返回没有找到
}
return
}
func (mux *ServeMux) match(path string) (h Handler, pattern string) {
// Check for exact match first.
v, ok := mux.m[path] // 检查该path是否在路径中
if ok {
return v.h, v.pattern // 如果在则返回handle和pattern
}
// Check for longest valid match. mux.es contains all patterns
// that end in / sorted from longest to shortest.
for _, e := range mux.es {
if strings.HasPrefix(path, e.pattern) { // 进行字符匹配
return e.h, e.pattern
}
}
return nil, ""
}
通过ServerHTTP的处理流程可以看出,先通过全路径匹配,然后找到对应自己编写的handle,通过编写的handle处理的流程。
handle注册的流程如下,在本文示例中http.HandleFunc函数就是注册了处理对应url的handle
func HandleFunc(pattern string, handler func(ResponseWriter, *Request)) {
DefaultServeMux.HandleFunc(pattern, handler)
}
...
// HandleFunc registers the handler function for the given pattern.
func (mux *ServeMux) HandleFunc(pattern string, handler func(ResponseWriter, *Request)) {
if handler == nil {
panic("http: nil handler")
}
mux.Handle(pattern, HandlerFunc(handler))
}
...
func (mux *ServeMux) Handle(pattern string, handler Handler) {
mux.mu.Lock()
defer mux.mu.Unlock()
if pattern == "" {
panic("http: invalid pattern")
}
if handler == nil { // 检查注册函数是否为空
panic("http: nil handler")
}
if _, exist := mux.m[pattern]; exist { // 检查是否已经注册
panic("http: multiple registrations for " + pattern)
}
if mux.m == nil {
mux.m = make(map[string]muxEntry)
}
e := muxEntry{h: handler, pattern: pattern} // 设置handle与匹配方式
mux.m[pattern] = e
if pattern[len(pattern)-1] == '/' {
mux.es = appendSorted(mux.es, e)
}
if pattern[0] != '/' {
mux.hosts = true
}
}
...
此时完成了handle的匹配,并调用了该handle来处理具体的业务逻辑。
func (w *response) finishRequest() {
w.handlerDone.setTrue() // 设置该handle被处理完成
if !w.wroteHeader { // 如果没有头部则写入ok头部信息
w.WriteHeader(StatusOK)
}
w.w.Flush() // 刷新
putBufioWriter(w.w) // 将处理的w刷入
w.cw.close() // 关闭
w.conn.bufw.Flush() // 将业务处理所有的buff都发送给客户端
w.conn.r.abortPendingRead()
// Close the body (regardless of w.closeAfterReply) so we can
// re-use its bufio.Reader later safely.
w.reqBody.Close()
if w.req.MultipartForm != nil {
w.req.MultipartForm.RemoveAll()
}
}
w.conn.bufw就是c.bufw = newBufioWriterSize(checkConnErrorWriter{c}, 4<<10)
通过注册该c,来控制写出的数据发送给客户端;
func (b *Writer) Flush() error {
if b.err != nil {
return b.err
}
if b.n == 0 {
return nil
}
n, err := b.wr.Write(b.buf[0:b.n]) // 写入处理完成的数据,该wr就是前面初始化传入的c
if n < b.n && err == nil {
err = io.ErrShortWrite
}
if err != nil {
if n > 0 && n < b.n {
copy(b.buf[0:b.n-n], b.buf[n:b.n])
}
b.n -= n
b.err = err
return err
}
b.n = 0
return nil
}
至此一个业务逻辑的处理流程就基本完成。
本文主要是简单的梳理了一下go标准库里面http服务器的一个基本的处理流程,大致处理流程清楚之后对go相关的web的框架的处理过程也都大致了解,方便开发过程中灵活运用。由于本人才疏学浅,如有错误请批评指正。