记录一下:
nginx做反向代理,单纯测试框架。
机器配置:
Intel® Core™ i5-3470 CPU @ 3.20GHz 4核心 8G内存
test.go:
<!-- lang: cpp -->
package main
import (
"encoding/json"
"fmt"
"github.com/emicklei/go-restful"
"io"
"net/http"
"runtime"
"os"
)
var count int = 0
func main() {
runtime.GOMAXPROCS(8)
ws := new(restful.WebService)
ws.Route(ws.GET("/").To(hello))
restful.Add(ws)
fmt.Println("Server starting on port ", os.Args[1])
http.ListenAndServe(":"+os.Args[1], nil)
}
func hello(req *restful.Request, resp *restful.Response) {
article := Article{"A Royal Baby", "A slow news week"}
count++
fmt.Println("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", count)
b, _ := json.Marshal(article)
io.WriteString(resp, string(b))
}
type Article struct {
Name string
Body string
}
tornado.py
#!/usr/bin/env python
import tornado.httpserver
import tornado.ioloop
import tornado.options
import tornado.web
from tornado.options import define, options
define("port", default=8888, help="run on the given port", type=int)
class MainHandler(tornado.web.RequestHandler):
def get(self):
self.write("Hello, world")
def main():
tornado.options.parse_command_line()
application = tornado.web.Application([
(r"/", MainHandler),
])
http_server = tornado.httpserver.HTTPServer(application)
http_server.listen(options.port)
tornado.ioloop.IOLoop.instance().start()
if __name__ == "__main__":
main()
nginx 配置:
<!-- lang: shell -->
worker_processes 8;
events {
worker_connections 1024;
}
upstream tornado_online {
server 127.0.0.1:15101;
server 127.0.0.1:15102;
server 127.0.0.1:15103;
server 127.0.0.1:15104;
}
upstream go_online {
server 127.0.0.1:15201;
server 127.0.0.1:15202;
server 127.0.0.1:15203;
server 127.0.0.1:15204;
}
server {
listen 9000;
server_name localhost;
#charset koi8-r;
#access_log logs/host.access.log main;
location / {
proxy_pass http://tornado_online/;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
client_max_body_size 10m;
client_body_buffer_size 128k;
proxy_connect_timeout 90;
proxy_send_timeout 90;
proxy_read_timeout 90;
}
}
server {
listen 9001;
server_name localhost;
#charset koi8-r;
#access_log logs/host.access.log main;
location / {
proxy_pass http://go_online/;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
client_max_body_size 10m;
client_body_buffer_size 128k;
proxy_connect_timeout 90;
proxy_send_timeout 90;
proxy_read_timeout 90;
}
}
启动go:
nohup ./test 15201 &
nohup ./test 15202 &
nohup ./test 15203 &
nohup ./test 15204 &
启动tornado:
nohup python tornado.py –port=15101 &
nohup python tornado.py –port=15102 &
nohup python tornado.py –port=15103 &
nohup python tornado.py –port=15104 &
测试: go
<!-- lang: shell -->
$ab -c 500 -n 1000000 http://192.168.0.108:9001/
This is ApacheBench, Version 2.3 <$Revision: 655654 $>
Copyright 1996 Adam Twiss, Zeus Technology Ltd, http://www.zeustech.net/
Licensed to The Apache Software Foundation, http://www.apache.org/
Benchmarking 192.168.0.108 (be patient)
Completed 100000 requests
Completed 200000 requests
Completed 300000 requests
Completed 400000 requests
Completed 500000 requests
Completed 600000 requests
Completed 700000 requests
Completed 800000 requests
Completed 900000 requests
Completed 1000000 requests
Finished 1000000 requests
Server Software: nginx/1.5.7
Server Hostname: 192.168.0.108
Server Port: 9001
Document Path: /
Document Length: 49 bytes
Concurrency Level: 500
Time taken for tests: 89.459 seconds
Complete requests: 1000000
Failed requests: 0
Write errors: 0
Total transferred: 206002060 bytes
HTML transferred: 49000490 bytes
Requests per second: 11178.28 [#/sec] (mean)
Time per request: 44.730 [ms] (mean)
Time per request: 0.089 [ms] (mean, across all concurrent requests)
Transfer rate: 2248.78 [Kbytes/sec] received
Connection Times (ms)
min mean[+/-sd] median max
Connect: 0 24 123.4 7 2028
Processing: 1 20 19.1 18 630
Waiting: 1 16 18.2 14 623
Total: 2 45 125.9 26 2040
Percentage of the requests served within a certain time (ms)
50% 26
66% 31
75% 34
80% 36
90% 43
95% 51
98% 112
99% 983
100% 2040 (longest request)
➜ test ps aux | grep test
root 25733 11.6 0.3 1739840 28812 pts/7 SNl 15:04 0:25 ./test 15201
root 25737 11.6 0.4 2140036 36104 pts/7 SNl 15:04 0:25 ./test 15202
root 25741 11.6 0.3 1697800 28472 pts/7 SNl 15:04 0:25 ./test 15203
root 25754 11.6 0.3 1738620 28892 pts/7 SNl 15:04 0:25 ./test 15204
cpu: 50%左右
测试:tornado
<!-- lang: shell -->
$ab -c 500 -n 1000000 http://192.168.0.108:9000/
This is ApacheBench, Version 2.3 <$Revision: 655654 $>
Copyright 1996 Adam Twiss, Zeus Technology Ltd, http://www.zeustech.net/
Licensed to The Apache Software Foundation, http://www.apache.org/
Benchmarking 192.168.0.108 (be patient)
Completed 100000 requests
Completed 200000 requests
Completed 300000 requests
Completed 400000 requests
Completed 500000 requests
Completed 600000 requests
Completed 700000 requests
Completed 800000 requests
Completed 900000 requests
Completed 1000000 requests
Finished 1000000 requests
Server Software: nginx/1.5.7
Server Hostname: 192.168.0.108
Server Port: 9000
Document Path: /
Document Length: 12 bytes
Concurrency Level: 500
Time taken for tests: 163.648 seconds
Complete requests: 1000000
Failed requests: 1
(Connect: 0, Receive: 0, Length: 1, Exceptions: 0)
Write errors: 0
Non-2xx responses: 1
Total transferred: 218000099 bytes
HTML transferred: 12000156 bytes
Requests per second: 6110.67 [#/sec] (mean)
Time per request: 81.824 [ms] (mean)
Time per request: 0.164 [ms] (mean, across all concurrent requests)
Transfer rate: 1300.90 [Kbytes/sec] received
Connection Times (ms)
min mean[+/-sd] median max
Connect: 0 6 33.9 1 1011
Processing: 1 75 188.5 40 7108
Waiting: 1 72 188.2 37 7107
Total: 1 81 191.2 45 7110
Percentage of the requests served within a certain time (ms)
50% 45
66% 66
75% 82
80% 92
90% 121
95% 150
98% 1027
99% 1067
100% 7110 (longest request)
➜ helloworld git:(master) ✗ ps aux | grep tornado
root 1747 0.0 0.1 106348 12920 pts/7 SN 15:34 0:00 python tornado.py –port=15101
root 4534 0.0 0.0 15956 1092 pts/7 S+ 15:39 0:00 grep tornado
root 16785 5.0 0.2 112056 18472 pts/7 SN 14:24 3:46 python tornado.py --port=15101
root 16847 5.0 0.2 111260 18036 pts/7 SN 14:24 3:46 python tornado.py --port=15102
root 16879 5.0 0.2 111832 18396 pts/7 SN 14:24 3:47 python tornado.py --port=15103
root 16911 5.0 0.2 111272 18044 pts/7 SN 14:24 3:47 python tornado.py --port=15104
cpu: 100%
总结:
go+nginx的qps是tornado+nginx的1.4倍左右。tornado的响应时间(tpr)是go的1倍左右。tornado会使cpu跑满100%而go只需要50%左右