jar包最小静态依赖实现

1、首先使用JarAnalyzer 开源工具rundotsummary.bat 命令对目标目录下的jar包分析生成hehe.grph文件

http://www.kirkk.com/main/zip/JarAnalyzer-1.2.zip

如使用hadoop1.1.2中wordcount的例子

digraph G {
    commons_beanutils_1_7_0 -> commons_collections_3_2_1;
    commons_beanutils_1_7_0 -> commons_logging_1_1_1;
    commons_configuration_1_6 -> commons_lang_2_4;
    commons_configuration_1_6 -> commons_logging_1_1_1;
    commons_configuration_1_6 -> commons_beanutils_1_7_0;
    commons_configuration_1_6 -> commons_collections_3_2_1;
    commons_configuration_1_6 -> commons_digester_1_8;
    commons_configuration_1_6 -> commons_codec_1_4;
    commons_digester_1_8 -> commons_beanutils_1_7_0;
    commons_digester_1_8 -> commons_logging_1_1_1;
    commons_httpclient_3_0_1 -> commons_logging_1_1_1;
    commons_httpclient_3_0_1 -> commons_codec_1_4;
    commons_logging_1_1_1 -> log4j_1_2_15;
    hadoop_core_1_1_2 -> jackson_core_asl_1_8_8;
    hadoop_core_1_1_2 -> commons_logging_1_1_1;
    hadoop_core_1_1_2 -> commons_io_2_1;
    hadoop_core_1_1_2 -> commons_net_3_1;
    hadoop_core_1_1_2 -> jetty_util_6_1_26;
    hadoop_core_1_1_2 -> jetty_6_1_26;
    hadoop_core_1_1_2 -> commons_daemon_1_0_1;
    hadoop_core_1_1_2 -> jasper_runtime_5_5_12;
    hadoop_core_1_1_2 -> commons_cli_1_2;
    hadoop_core_1_1_2 -> commons_codec_1_4;
    hadoop_core_1_1_2 -> log4j_1_2_15;
    hadoop_core_1_1_2 -> jackson_mapper_asl_1_8_8;
    hadoop_core_1_1_2 -> commons_httpclient_3_0_1;
    hadoop_core_1_1_2 -> commons_lang_2_4;
    hadoop_core_1_1_2 -> commons_configuration_1_6;
    hadoop_core_1_1_2 -> commons_math_2_1;
    hadoop_core_1_1_2 -> slf4j_api_1_4_3;
    jackson_mapper_asl_1_8_8 -> jackson_core_asl_1_8_8;
    jasper_runtime_5_5_12 -> commons_logging_1_1_1;
    jasper_runtime_5_5_12 -> commons_el_1_0;
    jetty_6_1_26 -> jetty_util_6_1_26;
    jetty_util_6_1_26 -> slf4j_api_1_4_3;
    slf4j_api_1_4_3 -> slf4j_log4j12_1_4_3;
    slf4j_log4j12_1_4_3 -> slf4j_api_1_4_3;
    slf4j_log4j12_1_4_3 -> log4j_1_2_15;
}
import java.io.BufferedReader;
import java.io.FileReader;
import java.util.HashMap;
import java.util.LinkedHashMap;
import java.util.LinkedHashSet;
import java.util.Map;

//基于JarAnalyzer生成的grph文件分析
public class GetMinJar {

	private static HashMap<String, LinkedHashSet<String>> lines=new HashMap<>();

	public static void main(String[] args) {

		try {
			//FileInputStream fileInputStream=new FileInputStream("hehe.grph");
			String line="";
			String rootData="hadoop_core_1_1_2";
			String grphPath="hehe.grph";
			if(args.length>1)
			{
				rootData=args[0];
				grphPath=args[1];
			}
			if(args.length!=2)
			{
				System.out.println("useage:java -jar GetMinJar  hadoop_core_1_1_2 hehe.grph ");

			}

			FileReader fileReader=new FileReader(grphPath);
			BufferedReader bufferedReader=new BufferedReader(fileReader);

			while((line=bufferedReader.readLine())!=null)
			{
				String[] datas=line.split("->|;");
				if(datas.length>1)
				{
					String key=datas[0].trim();
					LinkedHashSet<String> val=lines.get(key);
					if(val!=null)
					{
						val.add(datas[1].trim());
					}else{
						LinkedHashSet<String> set=new LinkedHashSet<String>();
						set.add(datas[1].trim());
						lines.put(datas[0].trim(),set);
					}

				}
			}
			bufferedReader.close();
			fileReader.close();
			LinkedHashMap<String, String> results=new LinkedHashMap<>();
			hehe(rootData,results);

			System.out.println(rootData.replaceAll("_", "-")+".jar 依赖于"+results.size()+"个jar包===========");


			for(Map.Entry<String, String> entry:results.entrySet() )
			{
				String key=entry.getKey();
				key=key.replaceAll("_", "-");
				//String value=entry.getValue();
				System.out.println(key+".jar");
			}

		} catch (Exception e) {
			e.printStackTrace();
		}

	}


	private static void hehe(String data,LinkedHashMap<String, String> results) {

		LinkedHashSet<String> val=lines.get(data);
		if(val!=null)
		{
			for (String string : val) {
				results.put(string,"");
				//检测是否递归死循环
				LinkedHashSet<String> tmp=lines.get(string);
				if(tmp!=null)
				{
					if(tmp.contains(data))
					{
						continue;
					}
				}
				hehe(string,results);

			}

		}

	}

}

得出的只是静态依赖的最小jar包,动态加载的要程序运行之后才知道。用来去除无用的jar包,自己要小心的测试。

hadoop1.1.2的wordcount例子最小依赖Jar包为:

commons-beanutils-1.7.0.jar
commons-cli-1.2.jar
commons-codec-1.4.jar
commons-collections-3.2.1.jar
commons-configuration-1.6.jar
commons-daemon-1.0.1.jar
commons-digester-1.8.jar
commons-el-1.0.jar
commons-httpclient-3.0.1.jar
commons-io-2.1.jar
commons-lang-2.4.jar
commons-logging-1.1.1.jar
commons-math-2.1.jar
commons-net-3.1.jar
hadoop-core-1.1.2.jar
jackson-core-asl-1.8.8.jar
jackson-mapper-asl-1.8.8.jar
jasper-runtime-5.5.12.jar
jetty-6.1.26.jar
jetty-util-6.1.26.jar
log4j-1.2.15.jar
slf4j-api-1.4.3.jar
slf4j-log4j12-1.4.3.jar


你可能感兴趣的:(jar包最小静态依赖实现)