CREATE USER 'ranger'@'%' IDENTIFIED BY 'ranger';
)。GRANT ALL PRIVILEGES ON ranger.* TO 'ranger'@'%';
)FLUSH PRIVILEGES;
)解压ranger-2.0.0-admin.tar.gz
安装内置solr
编辑solr安装配置文件: vim $RANGER_ADMIN_HOME/contrib/solr_for_audit_setup/install.properties
JAVA_HOME=/opt/jdk1.8.0_201
SOLR_USER=solr
SOLR_GROUP=solr
SOLR_INSTALL=true #设置为true标示自动安装
SOLR_DOWNLOAD_URL=http://mirror.bit.edu.cn/apache/lucene/solr/8.5.1/solr-8.5.1.tgz # 修改solr安装包下载地址
SOLR_INSTALL_FOLDER=/opt/solr # 安装目录
SOLR_RANGER_HOME=/opt/solr/ranger_audit_server
SOLR_RANGER_PORT=6083
SOLR_DEPLOYMENT=standalone
SOLR_RANGER_DATA_FOLDER=/opt/solr/ranger_audit_server/data
执行$RANGER_ADMIN_HOME/contrib/solr_for_audit_setup/setup.sh
进行solr安装。
切换到solr用户启动solr /opt/solr/ranger_audit_server/scripts/start_solr.sh
(停止solr命令为相同目录下的stop_solr.sh)
访问http://hostname:6083
,如果能正常访问页面,表示solr安装成功。
ranger-admin安装。
# DB
DB_FLAVOR=MYSQL
SQL_CONNECTOR_JAR=${your path}/mysql-connector-java-8.0.16.jar
db_root_user=root
db_root_password=root
db_host=${your mysql server host}:3306
#
# DB UserId used for the Ranger schema
#
db_name=ranger
db_user=ranger
db_password=ranger
audit_store=solr
audit_solr_urls=http://${your solr server host}:6083/solr/ranger_audits
$RANGER_ADMIN_HOME/setup.sh
ranger-admin start
http://hostname:6080/
,如果能正常访问,表示ranger安装成功。编译ranger-kafka 插件(略)
解压kafka 插件 ranger-2.0.0-kafka-plugin.tar
配置install.properties,修改或添加如下项,其他保持默认 vim $ranger-2.0.0-kafka-plugin-HOME/install.properties
COMPONENT_INSTALL_DIR_NAME=$KAFKA_HOME
POLICY_MGR_URL=http://${your ranger server hostname}:6080/ # ranger
SQL_CONNECTOR_JAR=${your path}/mysql-connector-java-8.0.16.jar
XAAUDIT.SOLR.ENABLE=true
XAAUDIT.SOLR.URL=http://${your solr server hostname}:6083/solr/ranger_audits
JAVA_HOME=/opt/jdk1.8.0_201
REPOSITORY_NAME=kafkadev
XAAUDIT.SUMMARY.ENABLE=true
XAAUDIT.SOLR.SOLR_URL=http://${your solr server hostname:6083/solr/ranger_audits
CUSTOM_USER=${username}
CUSTOM_GROUP=${username}
拷贝配置好的ranger-2.0.0-kafka-plugin到其他kafka broker节点。
在各个kafka broker节点执行ranger-kafka 插件中的脚本${ranger-2.0.0-kafka-plugin_HOME}/enable-kafka-plugin.sh
,插件会自动拷贝(创建软连接)插件目录下lib下的包到kafka的libs目录下,并在kafka的config目录下创建对应的以ranger开头的配置文件。
查看配置文件是否已经创建(ranger开头的文件)。
sudo yum install -y krb5-server
yum install -y krb5-workstation
vim /var/kerberos/krb5kdc/kdc.conf
[kdcdefaults]
kdc_ports = 88
kdc_tcp_ports = 88
[realms]
BUGBOY.COM = { # 修改为自己的域名
#master_key_type = aes256-cts
acl_file = /var/kerberos/krb5kdc/kadm5.acl
dict_file = /usr/share/dict/words
admin_keytab = /var/kerberos/krb5kdc/kadm5.keytab
supported_enctypes = aes256-cts:normal aes128-cts:normal des3-hmac-sha1:normal arcfour-hmac:normal camellia256-cts:normal camellia128-cts:normal des-hmac-sha1:normal des-cbc-md5:normal des-cbc-crc:normal
}
/var/kerberos/krb5kdc/kadm5.acl
文件 */[email protected] *
/etc/krb5.kdc
,并拷贝到其他客户端/etc 目录下 # Configuration snippets may be placed in this directory as well
includedir /etc/krb5.conf.d/
[logging]
default = FILE:/var/log/krb5libs.log
kdc = FILE:/var/log/krb5kdc.log
admin_server = FILE:/var/log/kadmind.log
[libdefaults]
dns_lookup_realm = false
ticket_lifetime = 24h
renew_lifetime = 7d
forwardable = true
rdns = false
pkinit_anchors = /etc/pki/tls/certs/ca-bundle.crt
# 修改域名为自己设置的域名
default_realm = BUGBOY.COM
default_ccache_name = KEYRING:persistent:%{uid}
[realms]
BUGBOY.COM = { # 修改域名为自己设置的域名
kdc = kdchost.bugboy.com #kdchost为kdc 服务器所在的主机名
admin_server = kdchost.bugboy.com #kdchost为kadmin 所在的主机名
}
# 修改域名为自己设置的域名
[domain_realm]
.bugboy.com = BUGBOY.COM
bugboy.com = BUGBOY.COM
# ip 地址为kdc服务器的ip地址
192.168.254.66 kdchost kdchost.bugboy.com
sudo systemctl enable krb5kdc.service
或者sudo chkconfig --level 35 krb5kdc on
sudo systemctl enable kadmin.service
或者 sudo chkconfig --level 35 kadmin on
kdb5_util create -s -r BUGBOY.COM
sudo systemctl start krb5kdc.service
或者 sudo service krb5kdc start
sudo systemctl start kadmin.service
或者 sudo service kadmin start
sudo kadmin.local
addprinc -randkey root/admin
addprinc -randkey kafka/[email protected]
addprinc -randkey kafka/[email protected]
addprinc -randkey kafka/[email protected]
xst -k kafka.keytab kafka/[email protected]
xst -k kafka.keytab kafka/[email protected]
xst -k kafka.keytab kafka/[email protected]
sudo kinit -kt kafka.keytab路径 kafka/[email protected]
KafkaServer {
com.sun.security.auth.module.Krb5LoginModule required
useKeyTab=true
storeKey=true
keyTab="${KAFKA_HOME/keytabs/kafka.keytab}" # KAFKA_HOME需要修改为KAFKA安装目录的路径
principal="kafka/[email protected]";
};
# JVM performance options
的地方即KAFKA_JVM_PERFORMANCE_OPTS追加-Djava.security.krb5.conf=/etc/krb5.conf $KAFKA_JAAS,并拷贝到其他broker对应目录下。结果如下: # JVM performance options
if [ -z "$KAFKA_JVM_PERFORMANCE_OPTS" ]; then
KAFKA_JVM_PERFORMANCE_OPTS="-server -XX:+UseG1GC -XX:MaxGCPauseMillis=20 -XX:InitiatingHeapOccupancyPercent=35 -XX:+ExplicitGCInvokesConcurrent -Djava.awt.headless=true -Djava.security.krb5.conf=/etc/krb5.conf $KAFKA_JAAS"
fi
EXTRA_ARGS=${EXTRA_ARGS-'-name kafkaServer -loggc'}
COMMAND=$1
case $COMMAND in
-daemon)
EXTRA_ARGS="-daemon "$EXTRA_ARGS
shift
;;
*)
;;
esac
# 增加kafka server对应的KAFKA_JAAS变量,其中$KAFKA_HOME需要保证为有效路径。
export KAFKA_JAAS="-Djava.security.auth.login.config=$KAFKA_HOME/jassconf/kafka_server_jaas.conf"
exec $base_dir/kafka-run-class.sh $EXTRA_ARGS kafka.Kafka "$@"
authorizer.class.name=org.apache.ranger.authorization.kafka.authorizer.RangerKafkaAuthorizer
advertised.listeners=SASL_PLAINTEXT://broker1:9092
security.inter.broker.protocol=SASL_PLAINTEXT
sasl.mechanism.inter.broker.protocol=GSSAPI
sasl.enabled.mechanisms=GSSAPI
sasl.kerberos.service.name=kafka
listeners=SASL_PLAINTEXT://broker1:9092
addprinc -randkey client1/[email protected]
addprinc -randkey client2/[email protected]
KafkaClient {
com.sun.security.auth.module.Krb5LoginModule required
useKeyTab=true
storeKey=true
keyTab="${KAFKA_HOME}/keytabs/client1.keytab" #KAFKA_HOME需要修改为绝对路径
principal="client1/[email protected]";
};
KafkaClient {
com.sun.security.auth.module.Krb5LoginModule required
useKeyTab=true
storeKey=true
keyTab="${KAFKA_HOME}/keytabs/client2.keytab" #KAFKA_HOME需要修改为绝对路径
principal="client2/[email protected]";
};
security.protocol=SASL_PLAINTEXT
sasl.mechanism=GSSAPI
sasl.kerberos.service.name=kafka
export KAFKA_JAAS="-Djava.security.auth.login.config=${KKAFKA_HOME}/jaasconf/kafka_server_jaas.conf"
$AKFKA_HOME/bin/kafka-topics.sh --create --bootstrap-server broker1:9092 --topic test-topic --partitions 3 --replication-factor 2 --command-config $AKFKA_HOME/userConf/client.conf
export KAFKA_JAAS="-Djava.security.auth.login.config=${KKAFKA_HOME}/jaasconf/kafka_client1_jaas.conf"
$AKFKA_HOME/bin/k/kafka-console-producer.sh --broker-list broker1:9092 --topic test-topic --producer.config $AKFKA_HOME/config/userConf/client.conf
export KAFKA_JAAS="-Djava.security.auth.login.config=${KKAFKA_HOME}/jaasconf/kafka_client2_jaas.conf"
$AKFKA_HOME/bin/k/kafka-console-producer.sh --broker-list broker1:9092 --topic test-topic --producer.config $AKFKA_HOME/config/userConf/client.conf
export KAFKA_JAAS="-Djava.security.auth.login.config=${KKAFKA_HOME}/jaasconf/kafka_client2_jaas.conf"
$AKFKA_HOME/bin/k/kafka-console-producer.sh --broker-list broker1:9092 --topic test-topic --producer.config $AKFKA_HOME/config/userConf/client.conf