单节点Kafka部署并开启Kerberos配置
安装Kerberos
server节点安装kerberos相关软件
yum install -y krb5-server krb5-workstation krb5-libs
修改配置文件krb5.conf
(#以下需要在服务端和客户端都配置,可以在服务端配置好以后使用scp拷贝。)
vim /etc/krb5.conf
# Configuration snippets may be placed in this directory as well
includedir /etc/krb5.conf.d/
[logging]
default = FILE:/var/log/krb5libs.log
kdc = FILE:/var/log/krb5kdc.log
admin_server = FILE:/var/log/kadmind.log
[libdefaults]
dns_lookup_realm = false
ticket_lifetime = 24h
renew_lifetime = 7d
forwardable = true
rdns = false
pkinit_anchors = FILE:/etc/pki/tls/certs/ca-bundle.crt
default_realm = HADOOP.COM
[realms]
HADOOP.COM = {
kdc = hadoop01
admin_server = hadoop01
}
[domain_realm]
修改server服务端的配置文件kdc.conf
vim /var/kerberos/krb5kdc/kdc.conf
[kdcdefaults]
kdc_ports = 88
kdc_tcp_ports = 88
[realms]
HADOOP.COM = {
#master_key_type = aes256-cts
acl_file = /var/kerberos/krb5kdc/kadm5.acl
dict_file = /usr/share/dict/words
admin_keytab = /var/kerberos/krb5kdc/kadm5.keytab
supported_enctypes = aes256-cts:normal aes128-cts:normal des3-hmac-sha1:normal arcfour-hmac:normal camellia256-cts:normal camellia128-cts:normal des-hmac-sha1:normal des-cbc-md5:normal des-cbc-crc:normal
}
配置阶段
创建kerberos数据库
[root@hdp01 ~]# kdb5_util create -s -r HADOOP.COM
Loading random data
Initializing database '/var/kerberos/krb5kdc/principal' for realm 'HADOOP.COM',
master key name 'K/M@HADOOP.COM'
You will be prompted for the database Master Password.
It is important that you NOT FORGET this password.
Enter KDC database master key:
Re-enter KDC database master key to verify:
(123456)
[root@hdp01 ~]#
创建管理员admin
[root@hdp01 ~]# kadmin.local -q "addprinc admin/admin"
Authenticating as principal root/admin@HADOOP.COM with password.
WARNING: no policy specified for admin/admin@HADOOP.COM; defaulting to no policy
Enter password for principal "admin/admin@HADOOP.COM":
Re-enter password for principal "admin/admin@HADOOP.COM":
Principal "admin/admin@HADOOP.COM" created.
(123456)
[root@hdp01 ~]#
给管理员账户添加acl权限
[root@hdp01 ~]# cat /var/kerberos/krb5kdc/kadm5.acl
*/admin@HADOOP.COM *
启动服务和设置开机自启
systemctl start krb5kdc
systemctl start kadmin
systemctl enable krb5kdc
systemctl enable kadmin
在客户端测试连接
[root@hdp01 ~]# kadmin -p admin/admin
Authenticating as principal admin/admin with password.
Password for admin/admin@HADOOP.COM:
kadmin: listprincs
admin/admin@HADOOP.COM
kadmin/admin@HADOOP.COM
kadmin/changepw@HADOOP.COM
kadmin/hdp01@HADOOP.COM
kiprop/hdp01@HADOOP.COM
krbtgt/HADOOP.COM@HADOOP.COM
首先下载kafka
wget https://mirrors.tuna.tsinghua.edu.cn/apache/kafka/3.4.1/kafka_2.12-3.4.1.tgz --no-check-certificate
解压和重命名:
tar -xvf kafka_2.12-3.4.1.tgz
mv kafka_2.12-3.4.1 kafka
修改kafka配置
vim kafka/config/server.properties
broker.id=0
listeners=PLAINTEXT://192.168.117.135:9092
advertised.listeners=PLAINTEXT://192.168.117.135:9092
log.dirs=/opt/kafka/kafka-logs
zookeeper.connect=192.168.117.135:2181
启动kafka
./kafka-server-start.sh ../config/server.properties &
命令:
查看当前服务器中的所有topic
/opt/kafka/bin/kafka-topics.sh --bootstrap-server 172.16.121.194:9092 -list
创建test topic
/opt/kafka/bin/kafka-topics.sh --bootstrap-server 172.16.121.194:9092 --create --partitions 1 --replication-factor 1 --topic test
选项说明:
–topic 定义topic名
–replication-factor 定义副本数
–partitions 定义分区数
生产者命令行操作
/opt/kafka/bin/kafka-console-producer.sh --bootstrap-server 172.16.121.194:9092 --topic test
消费test主题中的数据。
/opt/kafka/bin/kafka-console-consumer.sh --bootstrap-server 172.16.121.194:9092 --from-beginning --topic test
kafka开启kerberos
首先创建用户和生成用户keytab
kadmin: addprinc -randkey kafka/hadoop01@HADOOP.COM
kdmin: addprinc -randkey client/hadoop01@HADOOP.COM
kadmin.local -q "xst -k /tmp/kafka.keytab -norandkey kafka/hadoop01@HADOOP.COM"
kadmin.local -q "xst -k /tmp/client_kafka.keytab -norandkey client/hadoop01@HADOOP.COM"
Ps:
执行addprinc命令,如果没指定-randkey或-nokey参数,需要设置密码
执行xst导出命令,如果没有使用-norandkey,会导致密码被随机重置
配置
修改kafka/conf/server.properties
#增加
security.inter.broker.protocol=SASL_PLAINTEXT
sasl.mechanism.inter.broker.protocol=GSSAPI
sasl.enabled.mechanisms=GSSAPI
sasl.kerberos.service.name=kafka
super.users=User:kafka
新建kafka_server_jaas.conf 文件
KafkaServer {
com.sun.security.auth.module.Krb5LoginModule required
useKeyTab=true
keyTab="/opt/kafka/config/kafka.keytab"
storeKey=true
useTicketCache=false
principal="kafka/hadoop01@HADOOP.COM";
};
修改/kafka/bin/kafka-server-start.sh
export KAFKA_OPTS="-Djava.security.krb5.conf=/etc/krb5.conf -Djava.security.auth.login.config=/opt/kafka/config/kafka_server_jaas.conf"
Kafka 客户端配置
新建 kafka/config/client.properties 文件
security.protocol=SASL_PLAINTEXT
sasl.mechanism=GSSAPI
sasl.kerberos.service.name=kafka
新建 kafka/config/kafka_client_jaas.conf 文件
KafkaClient {
com.sun.security.auth.module.Krb5LoginModule required
useKeyTab=true
keyTab="/opt/kafka/config/client.keytab"
storeKey=true
useTicketCache=false
principal="client/hadoop01@HADOOP.COM";
};
修改 kafka-topics.sh、kafka-console-consumer.sh、kafka-console-producer.sh 脚本
倒数第二行增加
export KAFKA_OPTS="-Djava.security.krb5.conf=/opt/kafka/config/krb5.conf -Djava.security.auth.login.config=/opt/kafka/config/kafka_client_jaas.conf"
启动并测试
/opt/kafka/bin/kafka-server-start.sh -daemon /opt/kafka/config/server.properties
查看所有Topic
/opt/kafka/bin/kafka-topics.sh --list --bootstrap-server 192.168.117.135:9092 --command-config /opt/kafka/config/client.properties
创建一个测试的Topic
/opt/kafka/bin/kafka-topics.sh --create --topic topictsasl --bootstrap-server 192.168.117.135:9092 --partitions 1 --replication-factor 1 --command-config /opt/kafka/config/client.properties
生产消息
/opt/kafka/bin/kafka-console-producer.sh --topic topicsasl --bootstrap-server 192.168.117.135:9092 --producer.config /opt/kafka/config/client.properties
消费消息
/opt/kafka/bin/kafka-console-consumer.sh --topic topicsasl --from-beginning --bootstrap-server 192.168.117.135:9092 --consumer.config /opt/kafka/config/client.properties