基于流式解析JSON内容

阅读更多

        传统的解析JSON是要完全构建好一个完整的JSON文本内容然后通过JSON框架去解析反序列化,但是当JSON内容非常长,比如,一个JSON列表中存储有N个元组,而且非常长有上万个。这样在传输JSON文本内容的时候如何实现流式传输呢?方法当然是有的,在Solr的Xport代码中有就有这样一段代码,类名是:JSONTupleStream。

        

        

import java.io.IOException;
import java.io.Reader;
import java.util.List;
import java.util.Map;

import org.apache.solr.client.solrj.io.stream.SolrStream;
import org.noggit.JSONParser;
import org.noggit.ObjectBuilder;

public class JSONTupleStream {
  private List path;  // future... for more general stream handling
  private Reader reader;
  private JSONParser parser;
  private boolean atDocs;

  public JSONTupleStream(Reader reader) {
    this.reader = reader;
    this.parser = new JSONParser(reader);
  }

  /** returns the next Tuple or null */
  public Map next() throws IOException {
    if (!atDocs) {
      boolean found = advanceToDocs();
      atDocs = true;
      if (!found) return null;
    }
    // advance past ARRAY_START (in the case that we just advanced to docs, or OBJECT_END left over from the last call.
    int event = parser.nextEvent();
    if (event == JSONParser.ARRAY_END) return null;

    Object o = ObjectBuilder.getVal(parser);
    // right now, getVal will leave the last event read as OBJECT_END

    return (Map)o;
  }

  public void close() throws IOException {
    reader.close();
  }


  private void expect(int parserEventType) throws IOException {
    int event = parser.nextEvent();
    if (event != parserEventType) {
      throw new IOException("JSONTupleStream: expected " + JSONParser.getEventString(parserEventType) + " but got " + JSONParser.getEventString(event) );
    }
  }

  private void expect(String mapKey) {


  }

  private boolean advanceToMapKey(String key, boolean deepSearch) throws IOException {
    for (;;) {
      int event = parser.nextEvent();
      switch (event) {
        case JSONParser.STRING:
          if (key != null) {
            String val = parser.getString();
            if (key.equals(val)) {
              return true;
            } else if("error".equals(val)) {
              handleError();
            }
          }
          break;
        case JSONParser.OBJECT_END:
          return false;
        case JSONParser.OBJECT_START:
          if (deepSearch) {
            boolean found = advanceToMapKey(key, true);
            if (found) {
              return true;
            }
          } else {
            advanceToMapKey(null, false);
          }
          break;
        case JSONParser.ARRAY_START:
          skipArray(key, deepSearch);
          break;
      }
    }
  }

  private void handleError() throws IOException {
    for (;;) {
      int event = parser.nextEvent();
      if(event == JSONParser.STRING) {
        String val = parser.getString();
        if("msg".equals(val)) {
          event = parser.nextEvent();
          if(event == JSONParser.STRING) {
            String msg = parser.getString();
            if(msg != null) {
              throw new SolrStream.HandledException(msg);
            }
          }
        }
      } else if (event == JSONParser.OBJECT_END) {
        throw new IOException("");
      }
    }
  }

  private void skipArray(String key, boolean deepSearch) throws IOException {
    for (;;) {
      int event = parser.nextEvent();
      switch (event) {
        case JSONParser.OBJECT_START:
          advanceToMapKey(key, deepSearch);
          break;
        case JSONParser.ARRAY_START:
          skipArray(key, deepSearch);
          break;
        case JSONParser.ARRAY_END:
          return;
      }
    }
  }


  private boolean advanceToDocs() throws IOException {
    expect(JSONParser.OBJECT_START);
    advanceToMapKey("numFound", true);
    expect(JSONParser.LONG);
   // int event = parser.nextEvent();
    //if (event == JSONParser.ARRAY_END) return null;

   System.out.println( ObjectBuilder.getVal(parser));
    
    boolean found = advanceToMapKey("docs", true);
    expect(JSONParser.ARRAY_START);
    return found;
  }



}

 

再写一段测脚本:

import java.io.StringReader;
import java.util.Map;

import com.dfire.tis.solrextend.core.JSONTupleStream;

import junit.framework.TestCase;

public class TestTupleStreamParser extends TestCase {

    public void test() throws Exception {

        StringReader reader = new StringReader("{\"responseHeader\": {\"status\": 0}, \"response\":{\"numFound\":" + 100
                + ", \"docs\":[{name:'baisui'},{name:'xxxx'}]}");
        JSONTupleStream tupleStream = new JSONTupleStream(reader);
        while (true) {
            Map tuple = tupleStream.next();
            if (tuple != null) {
                System.out.println(tuple.get("name"));

            } else {
                break;
            }
        }
    }
}

 这样就可以做到,发送端不停得发送,客户端可以做到不断的去消费,这样做的好处是,当服务端客户端传输大量结构化数据的时候,可以避免首先要先开辟一个很大的内存空间,这样可以有效防止对内存溢出,同时,也可以有效提高系统吞吐率。

 

你可能感兴趣的:(json,流式解析)