分布式job

1、引入jar


            com.dangdang
            elastic-job-lite-core
            2.1.5
        

2、任务逻辑

public class ArchivieJob implements SimpleJob {
    @Override
    public void execute(ShardingContext shardingContext) {
        //1、从原表中查询出1条记录(未归档)
        String selectSql = "select * from resume where state = '未归档' limit 1";
        List> list = JDBCUtil.executeQuery(selectSql);
        if(list == null && list.size() == 0){
            System.out.println("数据已经处理完毕!!!");
            return;
        }
        //2、从未归档改为已归档
        Map stringObjectMap = list.get(0);
        long id = (long) stringObjectMap.get("id");
        String name = (String) stringObjectMap.get("name");
        String education = (String) stringObjectMap.get("education");

        System.out.println("==========>" + id + " name:" + name + " education:" + education);

        String updateSql = "update resume set state = '已归档' where id = ?";
        JDBCUtil.executeUpdate(updateSql,id);

        String insertSql = "insert into resume_bak select * from resume where id = ?";
        JDBCUtil.executeUpdate(insertSql,id);
    }
}
public class JDBCUtil {

    private static String driver = "com.mysql.jdbc.Driver";

    private static String url = "jdbc:mysql://101.132.167.18:3306/mydatabase";

    private static String username = "root";

    private static String password = "root";

    static {
        try {
            Class.forName(driver);
        } catch (ClassNotFoundException e) {
            e.printStackTrace();
        }
    }

    public static List> executeQuery(String sql, Object ...arg){
        Connection con = getConnection();
        ResultSet rs = null;
        PreparedStatement ps = null;
        try{
            ps = con.prepareStatement(sql);
            for (int i = 0; i < arg.length; i++) {
                ps.setObject(i+1,arg[i]);
            }
            rs = ps.executeQuery();
            //定义一个空集合存储数据
            List> list = new ArrayList<>();
            //获取结果集的个数
            int count = rs.getMetaData().getColumnCount();
            //对结果集遍历每一条数据是一个Mapj集合,列是k,值是v
            while(rs.next()){
                //一个空的map集合,用来存放每条数据
                Map map = new HashMap<>();
                for (int i = 0; i < count; i++) {
                    Object obj = rs.getObject(i+1);
                    String key = rs.getMetaData().getColumnName(i+1);
                    map.put(key,obj);
                }
                list.add(map);
            }
        } catch (SQLException throwables) {
            throwables.printStackTrace();
        }finally {
            close(rs,ps,con);
        }
        return null;
    }

    public static void executeUpdate(String sql,Object ...arg){
        Connection con = getConnection();
        PreparedStatement ps = null;
        try {
            ps = con.prepareStatement(sql);
            for (int i = 0; i < arg.length; i++) {
                ps.setObject(i+1,arg[i]);
            }
            ps.executeUpdate();
        } catch (SQLException throwables) {
            throwables.printStackTrace();
        }finally {
            close(null,ps,con);
        }
    }

    public static Connection getConnection() {
        try {
            return DriverManager.getConnection(url, username, password);
        } catch (SQLException throwables) {
            throwables.printStackTrace();
        }
        return null;
    }

    public static void close(ResultSet rs, PreparedStatement ps, Connection con){
        if(rs != null){
            try {
                rs.close();
            } catch (SQLException throwables) {
                throwables.printStackTrace();
            }finally {
                if(ps != null){
                    try {
                        ps.close();
                    } catch (SQLException throwables) {
                        throwables.printStackTrace();
                    }finally {
                        try {
                            con.close();
                        } catch (SQLException throwables) {
                            throwables.printStackTrace();
                        }
                    }
                }
            }
        }
    }
}

建表,初始化数据

create table resume(id long,name varchar(10),education varchar(10),state varchar(10));
insert into resume values(1,'张三','博士','未归档');
insert into resume values(2,'张二','博士','未归档');
insert into resume values(3,'李四','硕士','未归档');
insert into resume values(4,'李六','硕士','未归档');
insert into resume values(5,'王六','硕士','未归档');
insert into resume values(6,'王无','硕士','未归档');
insert into resume values(7,'赵六','本科','未归档');
insert into resume values(8,'赵八','本科','未归档');
insert into resume values(9,'小刚','本科','未归档');
insert into resume values(10,'小亮','本科','未归档');
insert into resume values(11,'小明','本科','未归档');
insert into resume values(12,'小花','专科','未归档');
insert into resume values(13,'龙龙','专科','未归档');
insert into resume values(14,'蓓蓓','博科','未归档');
insert into resume values(15,'阿珍','博科','未归档');
insert into resume values(16,'阿强','博士、','未归档');
insert into resume values(17,'杰克','博士、','未归档');
insert into resume values(18,'汤姆','博士、','未归档');
insert into resume values(19,'山姆','博士、','未归档');
insert into resume values(20,'山羊','博士、','未归档');

 3、执行器

public class ElasticJobMain {
    public static void main(String[] args) {
        //配置分布式协调服务中心(zookeeper)
        ZookeeperConfiguration zookeeperConfiguration = new ZookeeperConfiguration("101.132.167.18:2181","data-archive-job");
        CoordinatorRegistryCenter coordinatorRegistryCenter = new ZookeeperRegistryCenter(zookeeperConfiguration);
        coordinatorRegistryCenter.init();

        //配置任务(时间事件、定时任务业务逻辑、调度器)
        JobCoreConfiguration jobCoreConfiguration = JobCoreConfiguration.newBuilder("archive-job","*/2 * * * * ?",1).build();
        SimpleJobConfiguration simpleJobConfiguration = new SimpleJobConfiguration(jobCoreConfiguration,ArchivieJob.class.getName());
        //创建LiteJobConfiguration
        LiteJobConfiguration liteJobConfiguration = LiteJobConfiguration.newBuilder(simpleJobConfiguration).overwrite(true).build();
        new JobScheduler(coordinatorRegistryCenter,liteJobConfiguration).init();
    }
}

备注:可以使用 zooinspector 工具查看 zookeeper ,解压 zooinspector 安装包,执行 

java -jar zookeeper-dev-ZooInspector.jar

你可能感兴趣的:(cluster,分布式,java,数据库)