C++库的源代码位于:
hadoop-2.4.0-src/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs
CC = gcc DEFINES = -DG_ARCH_X86_64 CFLAGS += -fPIC -D_LARGEFILE_SOURCE -D_FILE_OFFSET_BITS=64 -pipe -O3 -D_REENTRANT $(DEFINES) CXXFLAGS += -pipe -O3 -D_REENTRANT $(DEFINES) -rdynamic AR = ar cqs LFLAGS = -rdynamic OBJECTS = exception.o expect.o hdfs.o jni_helper.o native_mini_dfs.o TARGET = libhdfs.a #command, don't change CHK_DIR_EXISTS= test -d DEL_FILE = rm -f first: all ####### Implicit rules .SUFFIXES: .o .c .cpp .cc .cxx .C .cu .cpp.o: $(CXX) -c $(CXXFLAGS) $(INCPATH) -o "$@" "$<" .cc.o: $(CXX) -c $(CXXFLAGS) $(INCPATH) -o "$@" "$<" .cxx.o: $(CXX) -c $(CXXFLAGS) $(INCPATH) -o "$@" "$<" .C.o: $(CXX) -c $(CXXFLAGS) $(INCPATH) -o "$@" "$<" .c.o: $(CC) -c $(CFLAGS) $(INCPATH) -o "$@" "$<" ####### Build rules all: $(AR) $(AR): $(TARGET) $(TARGET): $(OBJECTS) $(AR) $(TARGET) $(OBJECTS) clean: -$(DEL_FILE) $(OBJECTS) $(TARGET)
gcc -c -fPIC -D_LARGEFILE_SOURCE -D_FILE_OFFSET_BITS=64 -pipe -O3 -D_REENTRANT -DG_ARCH_X86_64 -o "exception.o" "exception.c" gcc -c -fPIC -D_LARGEFILE_SOURCE -D_FILE_OFFSET_BITS=64 -pipe -O3 -D_REENTRANT -DG_ARCH_X86_64 -o "expect.o" "expect.c" gcc -c -fPIC -D_LARGEFILE_SOURCE -D_FILE_OFFSET_BITS=64 -pipe -O3 -D_REENTRANT -DG_ARCH_X86_64 -o "hdfs.o" "hdfs.c" gcc -c -fPIC -D_LARGEFILE_SOURCE -D_FILE_OFFSET_BITS=64 -pipe -O3 -D_REENTRANT -DG_ARCH_X86_64 -o "jni_helper.o" "jni_helper.c" gcc -c -fPIC -D_LARGEFILE_SOURCE -D_FILE_OFFSET_BITS=64 -pipe -O3 -D_REENTRANT -DG_ARCH_X86_64 -o "native_mini_dfs.o" "native_mini_dfs.c" ar cqs libhdfs.a exception.o expect.o hdfs.o jni_helper.o native_mini_dfs.o
hadoop-2.4.0-src/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test找到测试源代码,对该文件夹中所有测试代码进行编译.这里再提供一个简单的makefile,内容如下:
LIBS = -L$(JAVA_HOME)/jre/lib/amd64/server/ -ljvm -L../ -lhdfs INCPATH = -I$(JAVA_HOME)/include -I$(JAVA_HOME)/include/linux -I. -I.. all: gcc -o hdfs_ops test_libhdfs_ops.c $(INCPATH) $(LIBS) gcc -o hdfs_read test_libhdfs_read.c $(INCPATH) $(LIBS) gcc -o hdfs_write test_libhdfs_write.c $(INCPATH) $(LIBS) gcc -o hdfs_zerocopy test_libhdfs_zerocopy.c $(INCPATH) $(LIBS)
gcc -o hdfs_ops test_libhdfs_ops.c -I/d0/data/lichao/software/java/jdk1.7.0_55/include -I/d0/data/lichao/software/java/jdk1.7.0_55/include/linux -I. -I.. -L/d0/data/lichao/software/java/jdk1.7.0_55/jre/lib/amd64/server/ -ljvm -L../ -lhdfs gcc -o hdfs_read test_libhdfs_read.c -I/d0/data/lichao/software/java/jdk1.7.0_55/include -I/d0/data/lichao/software/java/jdk1.7.0_55/include/linux -I. -I.. -L/d0/data/lichao/software/java/jdk1.7.0_55/jre/lib/amd64/server/ -ljvm -L../ -lhdfs gcc -o hdfs_write test_libhdfs_write.c -I/d0/data/lichao/software/java/jdk1.7.0_55/include -I/d0/data/lichao/software/java/jdk1.7.0_55/include/linux -I. -I.. -L/d0/data/lichao/software/java/jdk1.7.0_55/jre/lib/amd64/server/ -ljvm -L../ -lhdfs gcc -o hdfs_zerocopy test_libhdfs_zerocopy.c -I/d0/data/lichao/software/java/jdk1.7.0_55/include -I/d0/data/lichao/software/java/jdk1.7.0_55/include/linux -I. -I.. -L/d0/data/lichao/software/java/jdk1.7.0_55/jre/lib/amd64/server/ -ljvm -L../ -lhdfs
seq 1 10 > tmpfile hadoop fs -mkdir /data hadoop fs -put tmpfile /data hadoop fs -cat /data/tmpfile 1 2 3 4 5 6 7 8 9 10ok。现在运行生成的hdfs_read程序,测试一下hdfs的64位C++接口:
./hdfs_read /data/tmpfile 21 32
运行信息如下:
log4j:WARN No appenders could be found for logger (org.apache.hadoop.metrics2.lib.MutableMetricsFactory). log4j:WARN Please initialize the log4j system properly. log4j:WARN See http://logging.apache.org/log4j/1.2/faq.html#noconfig for more info. 1 2 3 4 5 6 7 8 9 10