注:node 版本不低于 v10.0.0
wget https://nodejs.org/dist/v14.17.3/node-v14.17.3-linux-x64.tar.xz -O /opt/node-v14.17.3-linux-x64.tar.xz
tar -xvf /opt/node-v14.17.3-linux-x64.tar.xz
配置环境变量
vim ~/.bashrc
# 追加以下内容
#node
export NODE_HOME=/opt/node-v14.17.3-linux-x64
export PATH=$NODE_HOME/bin:$PATH
# 刷新
source ~/.bashrc
查看是否出现版本
[root@localhost ~]# node -v
v14.17.3
[root@localhost ~]# npm -v
6.14.13
npm install elasticdump
出现安装成功提示
+ [email protected]
added 112 packages from 198 contributors and audited 112 packages in 19.171s
安装成功后会在当前目录生成node_modules
目录,里面包含 elasticdump
主目录
bin
目录下面有两个可执行文件 elasticdump(单索引操作)
、multielasticdump(多索引操作)
为了方便使用最好配置个环境变量
vim ~/.bashrc
# 追加以下内容
#node
export DUMP_HOME=/root/node_modules/elasticdump
export PATH=$DUMP_HOME/bin:$PATH
# 刷新
source ~/.bashrc
.
├── bin
│ ├── elasticdump
│ └── multielasticdump
├── elasticdump.js
├── lib
│ ├── add-auth.js
│ ├── argv.js
│ ├── aws4signer.js
│ ├── help.txt
│ ├── ioHelper.js
│ ├── is-url.js
│ ├── jsonparser.js
│ ├── parse-base-url.js
│ ├── parse-meta-data.js
│ ├── processor.js
│ ├── splitters
│ ├── transports
│ └── version-check.js
├── LICENSE.txt
├── package.json
├── README.md
└── transforms
└── anonymize.js
# Copy an index from production to staging with analyzer and mapping:
elasticdump \
--input=http://production.es.com:9200/my_index \
--output=http://staging.es.com:9200/my_index \
--type=analyzer
elasticdump \
--input=http://production.es.com:9200/my_index \
--output=http://staging.es.com:9200/my_index \
--type=mapping
elasticdump \
--input=http://production.es.com:9200/my_index \
--output=http://staging.es.com:9200/my_index \
--type=data
# Backup index data to a file:
elasticdump \
--input=http://production.es.com:9200/my_index \
--output=/data/my_index_mapping.json \
--type=mapping
elasticdump \
--input=http://production.es.com:9200/my_index \
--output=/data/my_index.json \
--type=data
# Backup and index to a gzip using stdout:
elasticdump \
--input=http://production.es.com:9200/my_index \
--output=$ \
| gzip > /data/my_index.json.gz
# Backup the results of a query to a file
elasticdump \
--input=http://production.es.com:9200/my_index \
--output=query.json \
--searchBody="{\"query\":{\"term\":{\"username\": \"admin\"}}}"
# Specify searchBody from a file
elasticdump \
--input=http://production.es.com:9200/my_index \
--output=query.json \
--searchBody=@/data/searchbody.json
# Copy a single shard data:
elasticdump \
--input=http://es.com:9200/api \
--output=http://es.com:9200/api2 \
--input-params="{\"preference\":\"_shards:0\"}"
# Backup aliases to a file
elasticdump \
--input=http://es.com:9200/index-name/alias-filter \
--output=alias.json \
--type=alias
# Import aliases into ES
elasticdump \
--input=./alias.json \
--output=http://es.com:9200 \
--type=alias
# Backup templates to a file
elasticdump \
--input=http://es.com:9200/template-filter \
--output=templates.json \
--type=template
# Import templates into ES
elasticdump \
--input=./templates.json \
--output=http://es.com:9200 \
--type=template
# Split files into multiple parts
elasticdump \
--input=http://production.es.com:9200/my_index \
--output=/data/my_index.json \
--fileSize=10mb
# Import data from S3 into ES (using s3urls)
elasticdump \
--s3AccessKeyId "${access_key_id}" \
--s3SecretAccessKey "${access_key_secret}" \
--input "s3://${bucket_name}/${file_name}.json" \
--output=http://production.es.com:9200/my_index
# Export ES data to S3 (using s3urls)
elasticdump \
--s3AccessKeyId "${access_key_id}" \
--s3SecretAccessKey "${access_key_secret}" \
--input=http://production.es.com:9200/my_index \
--output "s3://${bucket_name}/${file_name}.json"
# Import data from MINIO (s3 compatible) into ES (using s3urls)
elasticdump \
--s3AccessKeyId "${access_key_id}" \
--s3SecretAccessKey "${access_key_secret}" \
--input "s3://${bucket_name}/${file_name}.json" \
--output=http://production.es.com:9200/my_index
--s3ForcePathStyle true
--s3Endpoint https://production.minio.co
# Export ES data to MINIO (s3 compatible) (using s3urls)
elasticdump \
--s3AccessKeyId "${access_key_id}" \
--s3SecretAccessKey "${access_key_secret}" \
--input=http://production.es.com:9200/my_index \
--output "s3://${bucket_name}/${file_name}.json"
--s3ForcePathStyle true
--s3Endpoint https://production.minio.co
# Import data from CSV file into ES (using csvurls)
elasticdump \
# csv:// prefix must be included to allow parsing of csv files
# --input "csv://${file_path}.csv" \
--input "csv:///data/cars.csv"
--output=http://production.es.com:9200/my_index \
--csvSkipRows 1 # used to skip parsed rows (this does not include the headers row)
--csvDelimiter ";" # default csvDelimiter is ','
# backup ES indices & all their type to the es_backup folder
multielasticdump \
--direction=dump \
--match='^.*$' \
--input=http://production.es.com:9200 \
--output=/tmp/es_backup
# Only backup ES indices ending with a prefix of `-index` (match regex).
# Only the indices data will be backed up. All other types are ignored.
# NB: analyzer & alias types are ignored by default
multielasticdump \
--direction=dump \
--match='^.*-index$'\
--input=http://production.es.com:9200 \
--ignoreType='mapping,settings,template' \
--output=/tmp/es_backup
常用参数:
--direction dump/load 导出/导入
--ignoreType 被忽略的类型,data,mapping,analyzer,alias,settings,template
--includeType 包含的类型,data,mapping,analyzer,alias,settings,template
--suffix 加前缀,es6-${index}
--prefix 加后缀,${index}-backup-2018-03-13
源es地址:http://192.168.1.140:9200
源es索引名:source_index
目标es地址:http://192.168.1.141:9200
目标es索引名:target_index
直接将两个ES的数据同步
elasticdump \
--input=http://192.168.1.140:9200/source_index \
--output=http://192.168.1.141:9200/target_index \
--type=mapping
elasticdump \
--input=http://192.168.1.140:9200/source_index \
--output=http://192.168.1.141:9200/target_index \
--type=data \
--limit=2000 # 每次操作的objects数量,默认100,数据量大的话,可以调大加快迁移速度
# 导出
elasticdump \
--input=http://192.168.1.140:9200/source_index \
--output=/data/source_index_mapping.json \
--type=mapping
elasticdump \
--input=http://192.168.1.140:9200/source_index \
--output=/data/source_index.json \
--type=data \
--limit=2000
# 导入
elasticdump \
--input=/data/source_index_mapping.json \
--output=http://192.168.1.141:9200/source_index \
--type=mapping
elasticdump \
--input=/data/source_index.json \
--output=http://192.168.1.141:9200/source_index \
--type=data \
--limit=2000
# 导出
multielasticdump \
--direction=dump \
--match='^.*$' \
--input=http://192.168.1.140:9200 \
--output=/tmp/es_backup \
--includeType='data,mapping' \
--limit=2000
# 导入
multielasticdump \
--direction=load \
--match='^.*$' \
--input=/tmp/es_backup \
--output=http://192.168.1.141:9200 \
--includeType='data,mapping' \
--limit=2000 \
将es索引备份成gz文件,减少储存压力
elasticdump \
--input=http://192.168.1.140:9200/source_index \
--output=$ \
--limit=2000 \
| gzip > /data/source_index.json.gz
#!/bin/bash
echo -n "源ES地址: "
read source_es
echo -n "目标ES地址: "
read target_es
echo -n "源索引名: "
read source_index
echo -n "目标索引名: "
read target_index
DUMP_HOME=/root/node_modules/elasticdump/bin
${DUMP_HOME}/elasticdump --input=${source_es}/${source_index} --output=${target_es}/${target_index} --type=mapping
${DUMP_HOME}/elasticdump --input=${source_es}/${source_index} --output=${target_es}/${target_index} --type=data --limit=2000
#!/bin/bash
source_es=http://192.168.1.140:9200
target_index=tspa-template-question-answer
data_dir=/opt/es_backup
DUMP_HOME=/root/node_modules/elasticdump/bin
if [ ! -d "${data_dir}" ]; then
mkdir ${data_dir}
fi
${DUMP_HOME}/elasticdump --input=${source_es}/${target_index} --output=/${data_dir}/${target_index}_mapping.json --type=mapping
${DUMP_HOME}/elasticdump --input=${source_es}/${target_index} --output=/${data_dir}/${target_index}.json --type=data --limit=2000
zip -jqrm ${data_dir}/$(date '+%Y%m%d-%H%M').zip ${data_dir}/*.json
#!/bin/bash
echo -n "目标ES地址:"
read target_es
echo -n "源索引名:"
read source_index
echo -n "map文件名:"
read map_file
echo -n "data文件名:"
read data_file
DUMP_HOME=/root/node_modules/elasticdump/bin
${DUMP_HOME}/elasticdump --input=${map_file} --output=${target_es}/${source_index} --type=mapping
${DUMP_HOME}/elasticdump --input=${data_file} --output=${target_es}/${source_index} --type=data --limit=2000
#!/bin/bash
source_es=http://192.168.1.140:9200
data_dir=/opt/es_backup
DUMP_HOME=/root/node_modules/elasticdump/bin
if [ ! -d "${data_dir}" ]; then
mkdir ${data_dir}
fi
${DUMP_HOME}/multielasticdump --direction=dump --match='^.*$' --input=${source_es} --output=${data_dir} --includeType='data,mapping' --limit=2000
zip -jqrm ${data_dir}/$(date '+%Y%m%d-%H%M').zip ${data_dir}/*.json