Sybase 大数据量(100W条)分页 jdbc实现目前要求无排序


首次分页大概为10秒左右。以后翻页基本上是瞬间完成。要求是无排序情况下。

分析:sybase不提供分页方法。top函数还不能放在子查询语句中。使用hibernate分页前几页和后几页没问题,如果数据量大翻到30000页时就内存溢出了。而且效率慢。

使用存储过程分页大数据量也不适合。效率低而且不支持并发。

由于 select top pageSize column1,column2,column3 from tableName where id > ?这种语句也相当快但如果数据列多会有大量流量产生。

select count(*) from table   这个语句比较慢100W条也得10秒

综上所述,分三个步骤

1,把所数据总数取出来

      如果每次都从数据库取会浪费不必要的时间。所以第一次加载的时候使用缓存保存起来。

     SQL:select count(*) from table

2,找到需呀数据的id

      如果数据量比较大100W条,如果查找第50W条速度也比较慢。所以第一次加载的时候需要缓存,缓存的内容为数据的id以及位置。

      例如:100W条数据缓存50个位置,每2W条一个 。那么从100W条数据中找某一个id就相当于从2W条中找

     SQL:select top pageSize+startNumber id from tableName where id > ?

3,把数据取出来

     SQL:select top pageSize column1,column2,column3 from tableName where id >=?

 

/**
   * jdbc分页add by wangmeng 2013-4-18
   * 要求单表,无子查询,无关联查询 
   * @param sql 执行sql语句
   * @param cls 封装数据表
   * @param id  id列名
   * @param startNum 从哪条开始。0...n
   * @param pageSize 每页条数
   * @return
   */
  public Page findPageBySql( final String sql, Class cls,final String id, int startNum,
      final int pageSize) {
    final Page page = new Page();
    try{
            String execsql = sql;
            String sql2 = sql.toLowerCase();
            long btime = System.currentTimeMillis();
            long etime ;
                  final Connection con = JDBCUtil.getConnection();
                  PreparedStatement stmt;
                  ResultSet rs ;
            String counthql=sql2;
            int cacount = CacheUtil.getTotalSize(sql);//读取总数缓存
                  int total = 0;
            if(cacount == -1){//没有缓存
              if(counthql.indexOf("order")>-1){
                counthql="select count(*) "+counthql.substring(counthql.indexOf("from"), counthql.lastIndexOf("order"));
              }else{
                counthql="select count(*) "+counthql.substring(counthql.indexOf("from"), counthql.length());
              }
  
              System.out.println(counthql);
  
              btime = System.currentTimeMillis();
                    stmt = con.prepareStatement(counthql);
                    rs = stmt.executeQuery();
                    rs.next();
                    page.setTotalCount(rs.getInt(1));
                    total = rs.getInt(1);
                    CacheUtil.setTotal(sql, total);
            }else{
              total = (Integer)cacount;
                    page.setTotalCount(total);
            }
                  if(total <=0){
                  	return page;
                  }
            etime = System.currentTimeMillis();
            System.out.println("countsql处理时间:"+(etime - btime));

            btime = System.currentTimeMillis();
                  if(total < 1000){//小数据量处理
              System.out.println(execsql);
                    stmt = con.prepareStatement(execsql);
                    rs = stmt.executeQuery();
                    int var = 0;
                    while(var++ entry = CacheUtil.getFloorEntry(sql, startNum);
                  if(entry == null){
                    
                  }
                  startNum -= entry.getKey();
                  if(idsql.contains("where")){
                    idsql +=" and "+id+" >= "+entry.getValue();
                  }else{
                    idsql +="where "+id+" >= "+entry.getValue();
                  }
                  idsql = "select top "+(startNum+pageSize)+idsql.substring(idsql.indexOf("select")+6);
                }else{//没缓存增加
                  new Thread(){
                    @Override
                    public void run() {
                      try {
                        Connection c = JDBCUtil.getConnection();
                        ResultSet rs = c.prepareStatement(cachidsql+ " order by "+id).executeQuery();
                        int i =0;
                        int cap = CacheUtil.getIndexSize(sql);
                        Map map = new HashMap();
                              while(rs.next() ){
                              	if(i % cap ==0){
                              		map.put(i, rs.getInt(1));
                              	}
                              	i++;
                        }
                              CacheUtil.initPageIndex(sql, map);
                      } catch (Exception e) {
                        // TODO Auto-generated catch block
                        e.printStackTrace();
                      }
                    }
                  }.start();
                }
                idsql += " order by "+id;
              }
              System.out.println(idsql);
              
                    stmt = con.prepareStatement(idsql);
                    rs = stmt.executeQuery();
                    int var = 0;
                    while(var++=?方式
                sbsql.append(" where ").append(id).append(">=").append(ids.get(0));
                execsql = "select top "+pageSize+execsql.substring(execsql.toLowerCase().indexOf("select")+6);
              }else{//有排序使用id=? or id=?
                sbsql.append(" where (");
                for (int j = 0; j < ids.size(); j++) {
                  if(sbsql.indexOf("("+id) != -1){
                    sbsql.append(" or ");
                  }
                  sbsql.append(id +" = ").append(ids.get(j));
                }
                sbsql.append(")");
              }
              if(!execsql.toLowerCase().contains("where")){
                execsql += sbsql.toString();
              }else {
                execsql = execsql.substring(0, execsql.toLowerCase().indexOf("where"))+sbsql.toString();
              }
              System.out.println(execsql);
              QueryBySqlResultSet qbc = new QueryBySqlResultSet(execsql,cls,pageSize);//使用hibernate取数据
//				            stmt = con.prepareStatement(execsql);
//				            rs = stmt.executeQuery();
              List list  = (List)getHibernateTemplate().execute(qbc);
              etime = System.currentTimeMillis();
              System.out.println("查询数据时间:"+(etime - btime));
              page.setData(list);
                  }
            btime = System.currentTimeMillis();
            new Thread(){
              public void run() {
                try {
                  JDBCUtil.closeConnection(con);
                } catch (SQLException e) {
                  // TODO Auto-generated catch block
                  e.printStackTrace();
                }
              };
            }.start();
            etime = System.currentTimeMillis();
            System.out.println("rs关闭时间:"+(etime - btime));
    }catch(Exception e){
      e.printStackTrace();
    }
    return page;
  }

CacheUtil缓存工具类

import java.util.LinkedHashMap;
import java.util.Map;
import java.util.Map.Entry;

import com.metarnet.eoms.common.base.model.SQLCacheInfo;
/**
 * 给数据库表记录数和Id增加缓存
 * @author wangmeng 
 *
 */
public class CacheUtil {
  private static LinkedHashMap lmap = new LinkedHashMap(){
    private static final long serialVersionUID = -3432076593791024110L;
    //创建一个LinkedHashMap匿名内部类最大size是30超过30自动删除第一个
    private final static int MAX_SIZE = 30;
    protected boolean removeEldestEntry(java.util.Map.Entry eldest) {
      return size()>MAX_SIZE;
    };
  };
  private CacheUtil(){};
  /**
   * 缓存总数
   * @param key
   * @param value
   */
  public static void  setTotal(String key,int value){
    SQLCacheInfo info = (SQLCacheInfo)lmap.get(key);
    if(info == null){
      info = new SQLCacheInfo();
    }
    info.setTotalSize(value);
    lmap.put(key, info);
  }
  public static int getTotalSize(String key){
    SQLCacheInfo info = (SQLCacheInfo)lmap.get(key);
    if(info == null){
      return -1;
    }
    return info.getTotalSize();
  }
  /**
   * 缓存id位置
   * @param key
   * @param map
   */
  public static void initPageIndex(String key, Map map){
    SQLCacheInfo info = (SQLCacheInfo)lmap.get(key);
    if(info == null){
      info = new SQLCacheInfo();
    }
    info.put(map);
  }
  /**
   * 返回id位置信息
   * 
   * @param key 
   * @param index
   * @return
   */
  public static Entry getFloorEntry(String key,Integer index){
    SQLCacheInfo info = (SQLCacheInfo)lmap.get(key);
    if(info == null){
      return null;
    }
    return info.getEntry(index);
  }
  public static Entry getCeilEntry(String key,Integer index){
    SQLCacheInfo info = (SQLCacheInfo)lmap.get(key);
    if(info == null){
      return null;
    }
    return info.getCeilEntry(index);
  }
  /**
   * 返回缓存id位置的数量
   * @param key
   * @return
   */
  public static int getIndexSize(String key){
    SQLCacheInfo info = (SQLCacheInfo)lmap.get(key);
    if(info == null){
      return -1;
    }
    return info.getIndexSize();
  }
  public static boolean isInitIndex(String key){
    SQLCacheInfo info = (SQLCacheInfo)lmap.get(key);
    if(info == null){
      throw new RuntimeException(key+"没有找到");
    }
    return info.isInitIndex();
  }
}

缓存信息SQLCacheInfo

import java.util.HashMap;
import java.util.Map;
import java.util.TreeMap;
import java.util.Map.Entry;

public class SQLCacheInfo {
  /**
   * 第多少条,id值多少
   */
  private  TreeMap IndexId = new TreeMap();
  public static final int MOD = 50;
  public static final int CAPTION = 10000;
  private int totalSize;
  public int getTotalSize() {
    return totalSize;
  }
  public void setTotalSize(int totalSize) {
    this.totalSize = totalSize;
  }
  /**
   * 根据条数返回id值
   * @param index
   * @return
   */
  public Entry getEntry(int index){
    return IndexId.floorEntry(index);
  }
  public Entry getCeilEntry(int index){
    return IndexId.ceilingEntry(index);
  }
  public void put(Map map){
    IndexId.putAll(map);
  }
  public int getIndexSize(){
    if(totalSize < 10000)
      return -1;
    return Math.max(CAPTION,(totalSize / 50));
  }
  public static void main(String[] args) {
    SQLCacheInfo info = new SQLCacheInfo();
    HashMap map = new HashMap();
    map.put(10, 2);
    map.put(40, 2);
    map.put(60, 2);
    map.put(80, 2);
    info.put(map);
    System.out.println(info.getEntry(100).getKey());
  }
  public boolean isInitIndex(){
    return this.IndexId.size()>0;
  }
}




Reference:

http://www.tuicool.com/articles/ryIjIn

你可能感兴趣的:(Sybase)