调试环境

调试环境

我使用的调试的环境是:

  • idea
  • sqoop 1.99.6

调试使用的代码

  /**
 * Created by zj on 16-6-24.
 */
import org.apache.sqoop.client.SqoopClient;
import org.apache.sqoop.model.*;
import org.apache.sqoop.validation.*;   //for status class
import org.apache.sqoop.submission.counter.*;

import java.util.Collections;
import java.util.*;

public class DT {

    //a helper function . so that user has to iterate the list of validation messages.
    // printMessage(link.getConnectorLinkConfig().getConfigs());
    private static void printMessage(List configs) {
        for (MConfig config : configs) {
            List> inputlist = config.getInputs();
            if (config.getValidationMessages() != null) {
                // print every validation message
                for (Message message : config.getValidationMessages()) {
                    System.out.println("Config validation message: " + message.getMessage());
                }
            }
            for (MInput minput : inputlist) {
                if (minput.getValidationStatus() == Status.WARNING) {
                    for (Message message : config.getValidationMessages()) {
                        System.out.println("Config Input Validation Warning: " + message.getMessage());
                    }
                } else if (minput.getValidationStatus() == Status.ERROR) {
                    for (Message message : config.getValidationMessages()) {
                        System.out.println("Config Input Validation Error: " + message.getMessage());
                    }
                }
            }
        }
    }

    public static void main(String [] args){
        //Initialization
        String url = "http://localhost:12000/sqoop/";
        SqoopClient client = new SqoopClient(url);


        Collection connectorCollections=client.getConnectors();
        for(MConnector mc: connectorCollections) {
            System.out.println(mc.getUniqueName());
            System.out.println(mc.getClassName());
            System.out.println(mc.getPersistenceId());
        }

 /* I have create the link before, and the link object is persisted in the database of derby
        // create a placeholder for link
        long connectorId = 1;
        MLink link = client.createLink(connectorId);
        link.setName("mysql_link");
        link.setCreationUser("zjlearn");
        MLinkConfig linkConfig = link.getConnectorLinkConfig();

        // fill in the link config values
        linkConfig.getStringInput("linkConfig.connectionString").setValue("jdbc:mysql://localhost/my");
        linkConfig.getStringInput("linkConfig.jdbcDriver").setValue("com.mysql.jdbc.Driver");
        linkConfig.getStringInput("linkConfig.username").setValue("root");
        linkConfig.getStringInput("linkConfig.password").setValue("root");



        // save the link object that was filled
        Status status = client.saveLink(link);
        if(status.canProceed()) {
            System.out.println("Created Link with Link Id : " + link.getPersistenceId());
        } else {
            System.out.println("Something went wrong creating the link");
        }
        //create the link2
        long connectorId2 = 2;
        MLink link2 = client.createLink(connectorId2);
        link2.setName("hdfs_link");
        link2.setCreationUser("zjlearn");
        MLinkConfig linkConfig2 = link.getConnectorLinkConfig();

        // fill in the link config values ,  for hdfs connector


        // save the link object that was filled
        Status status2 = client.saveLink(link);
        if(status2.canProceed()) {
            System.out.println("Created Link with Link Id : " + link2.getPersistenceId());
        } else {
            System.out.println("Something went wrong creating the link");
        }
        */



        //create  the job
        //Creating dummy job object
        long fromLinkId = 1;// link for jdbc connector
        long toLinkId = 2; // link for HDFS connector
        MJob job = client.createJob(fromLinkId, toLinkId);
        job.setName("mysql_hafs_dt");
        job.setCreationUser("zjlearn");
        // set the "FROM" link job config values
        MFromConfig fromJobConfig = job.getFromJobConfig();
        fromJobConfig.getStringInput("fromJobConfig.schemaName").setValue("sqoop");
        fromJobConfig.getStringInput("fromJobConfig.tableName").setValue("sqoop");
        fromJobConfig.getStringInput("fromJobConfig.partitionColumn").setValue("id");
        // set the "TO" link job config values
        MToConfig toJobConfig = job.getToJobConfig();
        toJobConfig.getStringInput("toJobConfig.outputDirectory").setValue("/usr/tmp");
        // set the driver config values
        MDriverConfig driverConfig = job.getDriverConfig();
        driverConfig.getStringInput("throttlingConfig.numExtractors").setValue("3");

        Status status3 = client.saveJob(job);
        if(status3.canProceed()) {
            System.out.println("Created Job with Job Id: "+ job.getPersistenceId());
        } else {
            System.out.println("Something went wrong creating the job");
        }

        //start the job
        long jobId= job.getPersistenceId();
        MSubmission Jobsubmission = client.startJob(jobId);
        System.out.println("Job Submission Status : " + Jobsubmission.getStatus());
        if(Jobsubmission.getStatus().isRunning() && Jobsubmission.getProgress() != -1) {
            System.out.println("Progress : " + String.format("%.2f %%", Jobsubmission.getProgress() * 100));
        }
        System.out.println("Hadoop job id :" + Jobsubmission.getExternalJobId());  //the method has change  getExternalJobId
        System.out.println("Job link : " + Jobsubmission.getExternalLink());
        Counters counters = Jobsubmission.getCounters();
        if(counters != null) {
            System.out.println("Counters:");
            for(CounterGroup group : counters) {
                System.out.print("\t");
                System.out.println(group.getName());
                for(Counter counter : group) {
                    System.out.print("\t\t");
                    System.out.print(counter.getName());
                    System.out.print(": ");
                    System.out.println(counter.getValue());
                }
            }
        }
        //there is no method  getExceptionInfo in the Jobsubmission in the sqoop2 but a getErroe method
        if(Jobsubmission.getError() != null) {
            System.out.println("Exception info : " +Jobsubmission.getError());
        }

        //Check job status for a running job
        MSubmission jobStatusSubmission = client.getJobStatus(jobId);
        if(jobStatusSubmission.getStatus().isRunning() && jobStatusSubmission.getProgress() != -1) {
            System.out.println("Progress : " + String.format("%.2f %%", jobStatusSubmission.getProgress() * 100));
        }

        //Stop a running job. there is an error in the web page
        client.stopJob(jobId);

        //get the execute

    }
}

maven依赖

下面是项目使用到的依赖


    org.apache.sqoop
    sqoop-client
    1.99.6


    org.apache.sqoop
    sqoop-server
    1.99.6

代码调试方法

这部分主要参考

  • http://blog.csdn.net/xichenguan/article/details/39228301
  • http://qifuguang.me/2015/09/18/IntelliJ%E8%BF%9C%E7%A8%8B%E8%B0%83%E8%AF%95%E6%95%99%E7%A8%8B/
  • http://linux.it.net.cn/e/server/Tomcat/2015/0202/12850.html

Sqoop2是一个CS的架构,客户端包括sqoop-shell和sqoop-client,服务器端包括sqoop-server,sqoop-server就是一个部署在Tomcat下的web应用,由几个servlet组成。

调试sqoop-shell

调试sqoop-shell就是在启动sqoop client main方法时,加上调试参数。

sqoop-shell的启动方式为:sqoop.sh client,找到解决问题的起点,查找sqoop.sh脚本,在其中找到这样一段:

  client)
    # Build class path with full path to each library
    for f in $CLIENT_LIB/*.jar; do
      CLASSPATH="${CLASSPATH}:$f"
    done

    EXEC_JAVA='java'
    if [ -n "${JAVA_HOME}" ] ; then
        EXEC_JAVA="${JAVA_HOME}/bin/java"
    fi
    ${EXEC_JAVA} -classpath ${CLASSPATH} org.apache.sqoop.shell.SqoopShell $2
    ;;

这一部分的代码是运行sqoop.sh client 命令时将会执行的代码。可以看到:

${EXEC_JAVA} -classpath ${CLASSPATH} org.apache.sqoop.shell.SqoopShell $2,

这行shell脚本就是启动sqoop客户端的main方法的地方了,在其中加入:

-Xdebug -Xnoagent-Xrunjdwp:transport=dt_socket,address=8199,server=y,suspend=n

这一行代码成了下面这样:

${EXEC_JAVA} -classpath ${CLASSPATH} -Xdebug-Xnoagent -Xrunjdwp:transport=dt_socket,address=8199,server=y,suspend=norg.apache.sqoop.shell.SqoopShell $2

** 注意,放到一行里,不要换行。Server不是指的IP。**

参数说明:
-Xdebug : 启用调试模式
-Xrunjdwp : 加载JVM的JPDA参考实现库
transport=dt_socket :Socket连接,可选dt_shmem 通过共享内存的方式连接到调试服务器
address=8000 :调试服务器监听的端口
server=y : 是否是服务器端,n为客户端
suspend=n : 启动过程是否加载暂停,y为启动时暂停,方便调试启动过程

参数具体的意义可以查看相关文档,IBM上有个系列的文章,是专门讲JAVA的调试体系的,叫做《深入 Java 调试体系深入 Java 调试体系深入JAVA调试体系》,讲的很细很好,相信对于很多人来说,会填补一块JAVA知识的空白。

这样就设置好了,当运行sqoop.sh client 启动sqoop shell的时候,会看到输出中包含下面的内容:

Listening for transport dt_socket ataddress: 8199

在IDE,比如Eclipse里,选择远程调试,在Eclipse中是Remote Java Application。主要是填程序所在机器的网络地址和端口号,在这个例子中,端口号就是8199。

Eclipse这端开始Debug之后,设置好断点,然后再Sqoop所在的机器启动的sqoopshell中进行操作,运行到断点,就会停住,在Eclipse这端跟调试本地程序一样。

调试sqoop-server

因为sqoop-server就是一个JavaWeb应用,所以设置sqoop-server远程调试,就是设置Tomcat为远程调试。运行Tomcat的catalina.sh命令可以看到,Tomcat已经为我们提供了jpda选项:(sqoop2的相关的catalina.sh文件在SQOOP_HOME/server/bin文件夹下面, 运行其可以得到如下的信息)

Usage: catalina.sh ( commands ... )
commands:
  debug             Start Catalina in a debugger
  debug -security   Debug Catalina with a security manager
  jpda start        Start Catalina under JPDA debugger
  run               Start Catalina in the current window
  run -security     Start in the current window with security manager
  start             Start Catalina in a separate window
  start -security   Start in a separate window with security manager
  stop              Stop Catalina, waiting up to 5 seconds for the process to end
  stop n            Stop Catalina, waiting up to n seconds for the process to end
  stop -force       Stop Catalina, wait up to 5 seconds and then use kill -KILL if still running
  stop n -force     Stop Catalina, wait up to n seconds and then use kill -KILL if still running
  version           What version of tomcat are you running?
Note: Waiting for the process to end and use of the -force option require that $CATALINA_PID is defined

由于这个内置的Tomcat的启动是由sqoop.sh脚本控制的,启动命令如下:

sqoop.sh server start

于是去查看sqoop.sh脚本的代码,找到如下部分:

 server)
    if [ $# = 1 ]; then
      echo "Usage: sqoop.sh server "
      exit
    fi
    actionCmd=$2

    source ${BASEDIR}/bin/sqoop-sys.sh
    setup_catalina_opts

    # There seems to be a bug in catalina.sh whereby catalina.sh doesn't respect
    # CATALINA_OPTS when stopping the tomcat server. Consequently, we have to hack around
    # by specifying the CATALINA_OPTS properties in JAVA_OPTS variable
    if [ "$actionCmd" == "stop" ]; then
      export JAVA_OPTS="$JAVA_OPTS $CATALINA_OPTS"
    fi

    # Remove the first 2 command line arguments (server and action command (start/stop)) so we can pass
    # the rest to catalina.sh script
    shift
    shift

    $CATALINA_BIN/catalina.sh $actionCmd "$@"
    ;;

从sqoop.sh server start命令来看,actionCmd就是start了,也就是说,sqoop.sh server start命令最终执行catalina.sh命令时是传入的start,即catalina.sh start。我们想运行:

catalina.sh jpda start

于是把原来

$CATALINA_BIN/catalina.sh $actionCmd "$@"

这行脚本的中直接加入jpda,最终的脚本为:

$CATALINA_BIN/catalina.sh jpda $actionCmd"$@"

上边我们设置sqoop shell 的jpda的时候,是在JVM启动时加入参数,但是从catalina.sh打印出的帮助信息来看,没有传入参数的地方,想到可能是在配置文件或者脚本中有环境变量:

最后在catalina.sh中找到下面的配置:

if [ "$1" = "jpda" ] ; then
  if [ -z "$JPDA_TRANSPORT" ]; then
    JPDA_TRANSPORT="dt_socket"
  fi
  if [ -z "$JPDA_ADDRESS" ]; then
    JPDA_ADDRESS="8000"
  fi
  if [ -z "$JPDA_SUSPEND" ]; then
    JPDA_SUSPEND="n"
  fi
  if [ -z "$JPDA_OPTS" ]; then
    JPDA_OPTS="-agentlib:jdwp=transport=$JPDA_TRANSPORT,address=$JPDA_ADDRESS,server=y,suspend=$JPDA_SUSPEND"
  fi
  CATALINA_OPTS="$CATALINA_OPTS $JPDA_OPTS"
  shift
fi

这是默认的配置,也可以自己更改。

完成上边的配置后,在Eclipse这端,调试sqoop-server项目,Remote Java Application,填上远程JVM地址,端口号,这个例子中是8000,点击debug就可以了。

你可能感兴趣的:(调试环境)