http://blog.csdn.net/zwleagle/article/details/8851400
http://blog.sina.com.cn/s/blog_a574f78401015v2o.html
http://www.dssz.com/1341360.html
http://www.dssz.com/1341355.html
多线程网络处理服务器demo
- #include <stdio.h>
- #include <stdlib.h>
- #include <errno.h>
- #include <string.h>
- #include <sys/types.h>
- #include <netinet/in.h>
- #include <sys/socket.h>
- #include <sys/wait.h>
- #include <unistd.h>
- #include <arpa/inet.h>
-
-
- #include <fcntl.h>
- #include <sys/epoll.h>
- #include <sys/time.h>
- #include <sys/resource.h>
- #include <pthread.h>
- #include <assert.h>
-
-
-
-
- #ifdef DEBUG_TILERA
- #include <tmc/alloc.h>
-
- #include <arch/cycle.h>
- #include <arch/spr.h>
-
- #include <tmc/cpus.h>
- #include <tmc/sync.h>
- #include <tmc/task.h>
- #endif
-
-
-
-
-
-
-
- #define MAXBUF 1024
- #define MAXEPOLLSIZE 500000
-
- #define MAX_THREAD_NUMBER 200
- int THREAD_NUMBER = 50;
-
-
- int kdpfd;
- struct epoll_event events[MAXEPOLLSIZE];
-
- struct epoll_event thread_events[MAX_THREAD_NUMBER][MAXEPOLLSIZE];
- int fdpool[MAX_THREAD_NUMBER] = {-1};
-
- pthread_t handle_receive_thrdid[MAX_THREAD_NUMBER];
-
-
- int msgcount = 0;
- int timecount = 0;
-
- int count_packet= 0;
-
- pthread_mutex_t connet_count_lock = PTHREAD_MUTEX_INITIALIZER;
- int connect_count = 0;
-
- pthread_mutex_t curfds_lock;
- int curfds;
-
- char buffer[MAX_THREAD_NUMBER][MAXBUF + 1];
- pthread_t thread_count;
-
-
- cpu_set_t cpus;
-
-
-
- void BubbleSort(unsigned char R[],int n)
- {
- int i,j;
- unsigned char temp;
- for (i=0; i<n-1; i++ )
- {
- for (j=n-2; j>=i; j--)
- {
- if (R[j]>R[j+1])
- {
- temp=R[j];
- R[j]=R[j+1];
- R[j+1]=temp;
- }
- }
- }
- }
-
-
-
-
- int setnonblocking(int sockfd)
- {
- if (fcntl(sockfd, F_SETFL, fcntl(sockfd, F_GETFD, 0)|O_NONBLOCK) == -1)
- {
- return -1;
- }
- return 0;
- }
-
-
-
-
-
-
-
- static void *handle_count(void* arg)
- {
- int precount, speed;
-
- while(1)
- {
- precount = msgcount;
- sleep(5);
- timecount += 5;
-
-
- speed = msgcount - precount ;
- printf("The received speed is %d/5seconds, connect %d, tatol packets %d\n",speed, connect_count, msgcount);
- }
-
- return NULL;
- }
-
- static void * handl_receive_msg(void * arg)
- {
- int fdind = 0;
- int nfds = 0;
- int len;
- struct epoll_event ev;
- int fdi;
-
- char* buf ;
-
- fdind = (int)arg;
-
- buf = (char*)&buffer[fdind];
-
-
-
- while(1)
- {
- nfds = epoll_wait(fdpool[fdind], &thread_events[fdind][0], MAXEPOLLSIZE, -1);
- if (nfds == -1)
- {
- perror("epoll_wait");
- break;
- }
-
- for( fdi = 0; fdi < nfds; fdi++)
- {
- if((thread_events[fdind][fdi].events & EPOLLIN)
- )
- {
- while((-1 != (len = recv(thread_events[fdind][fdi].data.fd, buf, MAXBUF, 0)))
- ||((-1 == len) && (EAGAIN != errno)))
- {
-
-
-
-
-
- if (len > 0)
- {
-
-
-
-
- msgcount++;
- BubbleSort(buf ,len);
- }
- else if(len == 0)
- {
-
- epoll_ctl(fdpool[fdind], EPOLL_CTL_DEL, thread_events[fdind][fdi].data.fd,&ev);
- close(thread_events[fdind][fdi].data.fd);
- pthread_mutex_lock(&connet_count_lock);
- connect_count--;
- pthread_mutex_unlock (&connet_count_lock);
-
- break;
- }
- else
- {
-
- printf(" socket %d receive message fail error code: %d, error message: '%s'\n",
- thread_events[fdind][fdi].events, errno, strerror(errno));
-
-
- epoll_ctl(fdpool[fdind], EPOLL_CTL_DEL, thread_events[fdind][fdi].data.fd,&ev);
- close(thread_events[fdind][fdi].data.fd);
- pthread_mutex_lock(&connet_count_lock);
- connect_count--;
- pthread_mutex_unlock (&connet_count_lock);
-
-
-
-
-
- break;
- }
-
- }
- }
-
-
-
-
-
-
-
-
-
-
-
- else
- {
- printf("other event %u\n",thread_events[fdind][fdi].events );
- }
-
-
- }
- }
-
- return NULL;
-
- }
-
-
-
- int fd_index = 0;
-
- int main(int argc, char **argv)
- {
- int listener, new_fd, nfds, n, ret;
- socklen_t len;
- struct sockaddr_in my_addr, their_addr;
- unsigned int myport, lisnum;
- struct epoll_event ev;
- struct rlimit rt;
- int fdind;
- int ind ;
-
- if(5 != argc)
- {
-
- printf("Usage: %s <thread_number(0 ~ 200)> <port(0-65535)> <listen queue number> <IP Address> \n", argv[0]);
- exit(1);
-
- }
-
- if(argv[1])
- THREAD_NUMBER = atoi(argv[1]);
-
- if (argv[2])
- myport = atoi(argv[2]);
- else
- myport = 7838;
-
- if (argv[3])
- lisnum = atoi(argv[3]);
- else
- lisnum = 2;
-
-
-
- #ifdef DEBUG_TILERA
- if (tmc_cpus_get_my_affinity(&cpus) != 0)
- {
- printf("tmc_cpus_get_my_affinity() failed.\n");
- tmc_task_die("tmc_cpus_get_my_affinity() failed.");
- }
- if (tmc_cpus_count(&cpus) < MAX_THREAD)
- {
- printf("\nInsufficient cpus available.\n");
- tmc_task_die("Insufficient cpus available.");
- }
- #endif
-
-
- pthread_mutex_init (&connet_count_lock, NULL);
- pthread_mutex_init (&(curfds_lock), NULL);
-
- for( ind = 0; ind < THREAD_NUMBER; ind++ )
- {
-
- fdpool[ind] = epoll_create(MAXEPOLLSIZE);
-
- }
-
- for( ind = 0; ind < THREAD_NUMBER; ind++)
- {
- pthread_create(&handle_receive_thrdid[ind], NULL, &handl_receive_msg, (void*)ind);
- }
-
- if (pthread_create(&thread_count, NULL, &handle_count, NULL) != 0)
- {
- #ifdef DEBUG_TILERA
- tmc_task_die("pthread_create() failed.");
- #endif
- }
-
-
- rt.rlim_max = rt.rlim_cur = MAXEPOLLSIZE;
- if (setrlimit(RLIMIT_NOFILE, &rt) == -1)
- {
- perror("setrlimit");
- exit(1);
- }
- else printf("set the system resource success!\n");
-
-
- if ((listener = socket(PF_INET, SOCK_STREAM, 0)) == -1)
- {
- perror("socket");
- exit(1);
- }
- else
- printf("socket create success!n");
-
- setnonblocking(listener);
-
- bzero(&my_addr, sizeof(my_addr));
- my_addr.sin_family = PF_INET;
- my_addr.sin_port = htons(myport);
- if (argv[4])
- my_addr.sin_addr.s_addr = inet_addr(argv[4]);
- else
- my_addr.sin_addr.s_addr = INADDR_ANY;
-
- if (bind
- (listener, (struct sockaddr *) &my_addr, sizeof(struct sockaddr))
- == -1)
- {
- perror("bind");
- exit(1);
- }
- else
- printf("IP address and port bing success!\n");
-
- if (listen(listener, lisnum) == -1)
- {
- perror("listen");
- exit(1);
- }
- else
- printf("start to work!\n");
-
-
-
-
-
-
- kdpfd = epoll_create(MAXEPOLLSIZE);
- len = sizeof(struct sockaddr_in);
- ev.events = EPOLLIN;
- ev.data.fd = listener;
- if (epoll_ctl(kdpfd, EPOLL_CTL_ADD, listener, &ev) < 0)
- {
- fprintf(stderr, "epoll set insertion error: fd=%d\n", listener);
- return -1;
- }
- else
- printf("listen socket add to epoll success\n");
-
-
-
-
- curfds = 1;
- while (1)
- {
-
- nfds = epoll_wait(kdpfd, events, MAXEPOLLSIZE, -1);
- if (nfds == -1)
- {
- perror("epoll_wait");
- break;
- }
-
- for (n = 0; n < nfds; ++n)
- {
-
- if (events[n].data.fd == listener)
- {
- new_fd = accept(listener, (struct sockaddr *) &their_addr,
- &len);
- if (new_fd < 0)
- {
- perror("accept");
- continue;
- }
-
-
- pthread_mutex_lock(&connet_count_lock);
- connect_count++;
- pthread_mutex_unlock (&connet_count_lock);
- setnonblocking(new_fd);
- ev.events = EPOLLIN | EPOLLET;
- ev.data.fd = new_fd;
-
- fdind = fd_index % THREAD_NUMBER;
-
- if (epoll_ctl(fdpool[fdind], EPOLL_CTL_ADD, new_fd, &ev) < 0)
- {
- fprintf(stderr, "add socket '%d' to epoll fail %s\n",
- new_fd, strerror(errno));
-
-
- return -1;
- }
-
- fd_index++;
-
-
-
-
-
- }
- else
- {
- printf("other event \n");
- }
- }
- }
- close(listener);
-
-
- return 0;
- }
tilera试用
tilera处理器架构
前段时间同事搞来了一台使用tilera处理器的服务器,没错,就是那个由MIT专家做的、64个核的处理器,我非常好奇,所以也登上去体验体验。
tilera在硬件之上做了一个薄薄的软件层,叫hypervisior,再在这个hypervisior上装了一个linux,然后是gcc/g++/gdb等一套工具链。当然,这个linux是改过的,在内核arch/tile/目录里增加了东西,但是即使是增加了kernel代码,也只能跑在hypervisior上,不能直接跑在tilera硬件上。
我们用的是tilepro处理器,32位,64个核,每个核却只有863MHZ,所以多进程/多线程的程序有福了,我试了一下make -j60,感觉上确实比较快,但是,configure就慢得夸张。
在tilera上安装apache+php的过程中遇到几个小问题,主要是因为这个linux环境比较荒芜:
1. 从源码安装软件时,运行./configure遇到“configure: error: C compiler cannot create executables”,解决方法:
export CC=”gcc”
export CPP=”gcc -E”
2. ./configure还会遇到不能识别当前机器的处理器类型,解决方法:
./configure --build=i386
不用担心这个欺骗性的i386选项,对于可移植的c代码软件,这样不会造成什么问题。
我安装php最后还是失败了,所以拉倒,改装nginx做测试,起了20个nginx进程,通过千兆网卡做压力,却只能达到3000左右的QPS,这显然太低了。于是问了tilera的技术支持,他反馈说他们做过memcached的实验(据他说,facebook已经在用tilera机器专跑memcached),能到60万QPS,但是nginx多进程却很慢,他们也很疑惑,目前还在研究为什么。
看来tilera的软件层目前还偏薄弱,等一段时间吧,等linux-2.6.36稳定,且tilera的3.0版本的开发工具套件变为stable,我们再来关注关注。
====== 2011.01.06 ======
今天tilera公司的顾冉同学发来新消息,他们就用和我们一样的硬件环境(一片tilepro)和软件环境(MDE-2.1.0),10个左右的并发nginx,达到了将近2万QPS,比x86上的apache差一些,但是比我的测试结果已经好很多了。
也许是我的配置有问题,有待以后研究了。