elasticsearch-6.1.1 java 开发代码注意事项

1、把产品包中的lib,modules子目录下的jar都放到产品包中
另外maven的话增加依赖

org.elasticsearch.client
transport
6.1.1

2、简单的测试如下:


import java.io.BufferedReader;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.FileReader;
import java.io.IOException;
import java.net.InetAddress;
import java.net.URL;
import java.net.UnknownHostException;
import java.util.Date;
import java.util.List;
import java.util.Properties;
import java.util.Random;

import org.apache.commons.io.FileUtils;
import org.elasticsearch.action.bulk.BackoffPolicy;
import org.elasticsearch.action.bulk.BulkProcessor;
import org.elasticsearch.action.bulk.BulkRequest;
import org.elasticsearch.action.bulk.BulkRequestBuilder;
import org.elasticsearch.action.bulk.BulkResponse;
import org.elasticsearch.client.Client;
import org.elasticsearch.client.Requests;
import org.elasticsearch.client.transport.TransportClient;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.transport.TransportAddress;
import org.elasticsearch.common.unit.ByteSizeUnit;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.transport.client.PreBuiltTransportClient;

public class ElasticSearchBulkIn {

 private static String host;
 private static int port;
 private static String clusterName;
 
 private static String index;
 private static String type;
 public static int thread = 20;
 public static Long records=10000000000L;
 public static String filename = "file";
 
public static void main(String[] args) {
    loadProperties();
    TransportClient client=null;
    long start = System.currentTimeMillis();
    try {

        Settings settings = Settings.builder().put("cluster.name", clusterName).put("client.transport.sniff", true).build();
        
        client = new PreBuiltTransportClient(settings)
        .addTransportAddress(new TransportAddress(InetAddress.getByName(host), port));

        bulkIndexBySize(client);                                
        
    } catch (Exception e) {
        e.printStackTrace();
    }finally{
        if(client!=null){
            client.close();
        }

        System.out.println("Bulk execution completed  :" +(System.currentTimeMillis() - start)/1000+" S");
    }
}

public static void bulkIndexBySize(Client client) {
    String index = "index100e";

    //delete index if exists

// if (client.admin().indices().prepareExists(index).execute().actionGet().isExists())
// client.admin().indices().prepareDelete(index).execute().actionGet();

    final BulkProcessor bp = BulkProcessor.builder(client, new BulkProcessor.Listener() {

        @Override
        public void afterBulk(long executionId, BulkRequest request, BulkResponse response) {
             System.out.println("Bulk execution completed [" + executionId + "].\n" +
                        "Took (ms): " + response.getIngestTookInMillis() + "\n" +
                        "Failures: " + response.hasFailures() + "\n" +
                        "Count: " + response.getItems().length);
             
        }

        @Override
        public void afterBulk(long executionId, BulkRequest request, Throwable failure) {
             System.out.println("Bulk execution failed [" + executionId + "].\n" +
                     failure.toString());
        }

        @Override
        public void beforeBulk(long arg0, BulkRequest arg1) {
            // TODO Auto-generated method stub
            
        }
    })
            .setConcurrentRequests(4)                           //default 1
            .setBulkActions(-1)                                 //default 1000
            .setBulkSize(new ByteSizeValue(5, ByteSizeUnit.MB))//default 5M
            .setFlushInterval(TimeValue.timeValueSeconds(500)) //default 5s
            .setBackoffPolicy(                                  //default  with 8 retries and a start delay of 50ms
                    BackoffPolicy.exponentialBackoff(TimeValue.timeValueMillis(100), 3))
            .build();

    try {
        
        StringBuffer sbf =file2buffer(filename);
        for(int i=0;i<1000;i++){
                                        
            try {
                XContentBuilder source = XContentFactory.jsonBuilder().startObject()
                        .field("title1", getRandomString2(sbf,30))
                        .field("title2", getRandomString2(sbf,30))
                        .field("conten1", getRandomString2(sbf,255))
                        .field("conten2", getRandomString2(sbf,255))
                        .endObject();

                bp.add(Requests.indexRequest(index).type(type).source(source));
            } catch (IOException e) {
                e.printStackTrace();
            }
        }
        
        Settings settings = Settings.builder().build();
        settings = Settings.builder()
                .put("refresh_interval", "5s").put("index.translog.interval", "10s").put("index.translog.sync_interval","10s")
                .put("index.translog.durability","async")
                .build();

        client.admin().indices().prepareUpdateSettings(index).setSettings(settings).execute().actionGet();
    }catch(Exception e) {
        e.printStackTrace();
    }
    finally {
        bp.close();
    }
}

private static void loadProperties() {
    File serverInfo = null;
    URL location = Thread.currentThread().getContextClassLoader().getResource("server_info_http.properties");
    if (location == null) {
        serverInfo = new File(System.getProperty("usr.dir"), "server_info.properties");
    } else {
        serverInfo = new File(location.getPath());
    }
    FileInputStream fis;
    try {
        fis = new FileInputStream(serverInfo);
        Properties properties = new Properties();
        properties.load(fis);
        host = properties.getProperty("host", "localhost");
        port = Integer.parseInt(properties.getProperty("port", "9300"));
        clusterName = properties.getProperty("cluster_name", "esss130");
        index = properties.getProperty("index", "index1");
        thread = Integer.parseInt(properties.getProperty("thread", "10"));
        filename = properties.getProperty("filename", "test.txt");
        records = Long.parseLong(properties.getProperty("records", "10000000"));
    } catch (FileNotFoundException e) {
        e.printStackTrace();
    } catch (IOException e) {
        e.printStackTrace();
    }

}

}

3、index和type需要先定义一下,type的mapping定义如下,不要遗漏了-H 'Content-Type: application/json':
curl -XPOST http://11.0.0.35:9200/index100e/text/_mapping -H 'Content-Type: application/json' -d '
{
"text": {
"_all": {
"analyzer": "ik_max_word",
"search_analyzer": "ik_max_word",
"term_vector": "no",
"store": "false"
},
"properties": {
"content1": {
"type": "text",
"analyzer": "ik_max_word",
"search_analyzer": "ik_max_word"
},
"content2": {
"type": "text",
"analyzer": "ik_max_word",
"search_analyzer": "ik_max_word"
},
"title1" : {
"type" : "text",
"index" : "not_analyzed"
},
"title2" : {
"type" : "text",
"index" : "not_analyzed"
}
}
}
}
'

你可能感兴趣的:(elasticsearch-6.1.1 java 开发代码注意事项)