首先,mjpg-streamer目录由以下目录组成:
__ .deps
|__ doc
|__ mjpeg-client //这个目录包含使用的客户端
--|__ mjpg-streamer //这个是usb摄像头采集和传输的目录
--|__ mjpg-streamer-experiment
|__ udp-client
|__ uvc-streamer.
plugins 目录:主要是一些usb摄像头的数据采集和传输的功能子函数。
scripts 目录:貌似没啥用。
www 目录:主要是在使用浏览器浏览时,可以增加html界面上一些功能。
mjpg-streamer.c 文件夹主要实现命令参数的解析及调用相关线程运行功能子函数。
下面我们来看下mjpg-streamer.c 中的源代码:
int main(int argc, char *argv[])
{
char *input = "input_uvc.so --resolution 640x480 --fps 5 --device /dev/video0";
char *output[MAX_OUTPUT_PLUGINS];//10
int daemon=0, i;
size_t tmp=0;
output[0] = "output_http.so --port 8080";
global.outcnt = 0;
global.control = control;
/* parameter parsing */
while(1) {
int option_index = 0, c=0;
static struct option long_options[] = \
{
{"h", no_argument, 0, 0},
{"help", no_argument, 0, 0},
{"i", required_argument, 0, 0},
{"input", required_argument, 0, 0},
{"o", required_argument, 0, 0},
{"output", required_argument, 0, 0},
{"v", no_argument, 0, 0},
{"version", no_argument, 0, 0},
{"b", no_argument, 0, 0},
{"background", no_argument, 0, 0},
{0, 0, 0, 0}
};
c = getopt_long_only(argc, argv, "", long_options, &option_index);
这里主要遇到一个功能函数getopt_long_only() 该函数功能主要是解析命令行选项,也就是将*input中的-h -i -o -v -b的参数解析出来。该函数具体如何使用,可参照getopt_long_only。
switch (option_index) {
/* h, help */
case 0:
case 1:
help(argv[0]);
return 0;
break;
/* i, input */
case 2:
case 3:
input = strdup(optarg);
break;
/* o, output */
case 4:
case 5:
output[global.outcnt++] = strdup(optarg);
break;
/* v, version */
case 6:
case 7:
printf("MJPG Streamer Version: %s\n" \
"Compilation Date.....: %s\n" \
"Compilation Time.....: %s\n", SOURCE_VERSION, __DATE__, __TIME__);
return 0;
break;
/* b, background */
case 8:
case 9:
daemon=1;
break;
default:
help(argv[0]);
return 0;
}
}
option_index保存的是上面的long_options中的坐标值,strdup(optarg) 用来获取相应的标记后的参数。例如当main函数中传入的命令为./mjpg_streamer -i "./input_uvc.so" 时,程序将执行到case3:中,然后将input="./input_uvc.so"。
/* open input plugin */
tmp = (size_t)(strchr(input, ' ')-input);
global.in.plugin = (tmp > 0)?strndup(input, tmp):strdup(input);
global.in.handle = dlopen(global.in.plugin, RTLD_LAZY);
if ( !global.in.handle ) {
LOG("ERROR: could not find input plugin\n");
LOG(" Perhaps you want to adjust the search path with:\n");
LOG(" # export LD_LIBRARY_PATH=/path/to/plugin/folder\n");
LOG(" dlopen: %s\n", dlerror() );
closelog();
exit(EXIT_FAILURE);
}
global.in.init = dlsym(global.in.handle, "input_init");
if ( global.in.init == NULL ) {
LOG("%s\n", dlerror());
exit(EXIT_FAILURE);
}
global.in.stop = dlsym(global.in.handle, "input_stop");
if ( global.in.stop == NULL ) {
LOG("%s\n", dlerror());
exit(EXIT_FAILURE);
}
global.in.run = dlsym(global.in.handle, "input_run");
if ( global.in.run == NULL ) {
LOG("%s\n", dlerror());
exit(EXIT_FAILURE);
}
dlopen()函数负责打开*.so插件,dlsym()负责调用插件中的相关函数。目前意思已经很明显了,当输入./mjpg_streamer -i "./input_uvc.so"时,系统将会调用input_uvc.so中的input_init函数,对输入设备进行初始化。我们打开plugins\input_uvc\input_uvc.c 看下是否有input_init() 函数,确实存在,这说明我前面的理解没错。
既然输入已经初始化了,那输出呢?不急,我们继续看下下面的代码:
/* open output plugin */
for (i=0; i 0)?strndup(output[i], tmp):strdup(output[i]);
global.out[i].handle = dlopen(global.out[i].plugin, RTLD_LAZY);
if ( !global.out[i].handle ) {
LOG("ERROR: could not find output plugin %s\n", global.out[i].plugin);
LOG(" Perhaps you want to adjust the search path with:\n");
LOG(" # export LD_LIBRARY_PATH=/path/to/plugin/folder\n");
LOG(" dlopen: %s\n", dlerror() );
closelog();
exit(EXIT_FAILURE);
}
global.out[i].init = dlsym(global.out[i].handle, "output_init");
if ( global.out[i].init == NULL ) {
LOG("%s\n", dlerror());
exit(EXIT_FAILURE);
}
global.out[i].stop = dlsym(global.out[i].handle, "output_stop");
if ( global.out[i].stop == NULL ) {
LOG("%s\n", dlerror());
exit(EXIT_FAILURE);
}
if ( global.out[i].stop == NULL ) {
LOG("%s\n", dlerror());
exit(EXIT_FAILURE);
}
global.out[i].run = dlsym(global.out[i].handle, "output_run");
if ( global.out[i].run == NULL ) {
LOG("%s\n", dlerror());
exit(EXIT_FAILURE);
}
这段代码就不分析了,相信大家已经很明白了。
if ( global.in.run() ) {
LOG("can not run input plugin\n");
closelog();
return 1;
}
for(i=0; i
mjpg-streamer 就这样结束了,貌似还真不过瘾,下面我们到input_uvc.c和output_http.c文件看下:
input_uvc.c(plugins\input_uvc\)
int input_init(input_parameter *param)
这个函数貌似挺长的,其实代码也就只是实现对usb摄像头的格式、帧、请求buf,队列buf等的一些设置,主要实现也是在函数init_videoIn(videoIn, dev, width, height, fps, format, 1) 中,有兴趣的话,大家可以去看看。
int input_stop(void) {
DBG("will cancel input thread\n");
pthread_cancel(cam);
return 0;
}
输入停止,貌似更加简单,只要将线程取下cancel就ok,那么run肯定是crete threads了。
int input_run(void) {
pglobal->buf = malloc(videoIn->framesizeIn);
if (pglobal->buf == NULL) {
fprintf(stderr, "could not allocate memory\n");
exit(EXIT_FAILURE);
}
pthread_create(&cam, 0, cam_thread, NULL);
pthread_detach(cam);
return 0;
}
猜对了,可惜没分加。下面那些代码基本也没用到,这就不细说了。
int output_init(output_parameter *param) {
servers[param->id].id = param->id;
servers[param->id].pglobal = param->global;
servers[param->id].conf.port = port;
servers[param->id].conf.credentials = credentials;
servers[param->id].conf.www_folder = www_folder;
servers[param->id].conf.nocommands = nocommands;
}
int output_stop(int id) {
DBG("will cancel server thread #%02d\n", id);
pthread_cancel(servers[id].threadID);
return 0;
}
int output_run(int id) {
DBG("launching server thread #%02d\n", id);
/* create thread and pass context to thread function */
pthread_create(&(servers[id].threadID), NULL, server_thread, &(servers[id]));
pthread_detach(servers[id].threadID);
return 0;
}