本文共 9272 字,大约阅读时间需要 30 分钟。
想搞过一个sftp 服务的高可用,然后rsync能实现相互通信,以保证数据有序同步。
先创建个用户
#用户名hsi_ftp可以改groupadd sftp[root@localhost ~]# groupadd sftp[root@localhost ~]# useradd -g sftp -s /sbin/nologin -M sftpuser[root@localhost ~]# passwd sftpuserChanging password for user sftpuser.New password: BAD PASSWORD: The password is shorter than 8 charactersRetype new password: passwd: all authentication tokens updated successfully.
[root@localhost ~]# mkdir -p /data/sftp/sftpuser[root@localhost ~]# usermod -d /data/sftp/ sftpuser[root@localhost ~]# chown root:sftp /data/sftp/[root@localhost ~]# chown sftpuser:sftp /data/sftp/sftpuser/
[root@localhost ~]# vim /etc/ssh/sshd_config#Subsystem sftp /usr/libexec/openssh/sftp-serverSubsystem sftp internal-sftpMatch Group sftpChrootDirectory /data/sftpForceCommand internal-sftpAllowTcpForwarding no X11Forwarding no
[root@localhost ~]# systemctl restart sshd
另选一台也如上面的方式创建用户与配置服务,如果有需要还需要在
C:\Users\cheng>sftp sftpuser@192.168.5.5sftpuser@192.168.5.5's password:Connected to sftpuser@192.168.5.5.sftp> lstestsftp>
yum install -y keepalived
选其中的一个节点node1
[root@hadoop05 /]# cat /etc/keepalived/keepalived.conf! Configuration File for keepalivedglobal_defs { notification_email { xxx@xxx.com } notification_email_from xxx@xxx.com smtp_server smtp.xxx.com smtp_connect_timeout 30 router_id LVS_DEVEL vrrp_skip_check_adv_addr # vrrp_strict vrrp_garp_interval 0 vrrp_gna_interval 0}vrrp_instance VI_1 { state BACKUP interface ens33 virtual_router_id 53 priority 100 nopreempt advert_int 1 authentication { auth_type PASS auth_pass 1111 } virtual_ipaddress { 192.168.5.16 }}
选其中的一个节点node2
[root@hadoop03 /]# cat /etc/keepalived/keepalived.conf ! Configuration File for keepalivedglobal_defs { notification_email { xxx@xxx.com } notification_email_from xxx@xxx.com smtp_server smtp.xxx.com smtp_connect_timeout 30 router_id LVS_DEVEL vrrp_skip_check_adv_addr # vrrp_strict vrrp_garp_interval 0 vrrp_gna_interval 0}vrrp_instance VI_1 { state BACKUP interface ens33 virtual_router_id 53 priority 100 nopreempt advert_int 1 authentication { auth_type PASS auth_pass 1111 } virtual_ipaddress { 192.168.5.16 }}
systemctl start keepalivedsystemctl enable keepalived
两台服务器都得配置
[root@hadoop03 /]# cat /etc/rsyncd.conf # /etc/rsyncd: configuration file for rsync daemon mode# See rsyncd.conf man page for more options.# configuration example:# uid = nobody# gid = nobody# use chroot = yes# max connections = 4pid file = /var/run/rsyncd.pid# exclude = lost+found/# transfer logging = yes# timeout = 900# ignore nonreadable = yes# dont compress = *.gz *.tgz *.zip *.z *.Z *.rpm *.deb *.bz2# [ftp]# path = /home/ftp# comment = ftp export area#[webgis]path=/data/sftp/sftpuseruid=sftpusergid=sftpread only= false
将用户的密码与名字写到指定的位置
[root@hadoop03 ~]# cat sftppassword/sftppasswordsftpuser:123456
[root@hadoop03 ~]# systemctl start rsyncd[root@hadoop03 ~]# systemctl enable rsyncd
[root@hadoop03 ~]# systemctl status rsyncd[root@hadoop03 ~]# rsync -auz /data/sftp/sftpuser/* sftpuser@192.168.5.5::webgis --password-file=sftppassword/sftppassword
恰好集群有zookeeper组件,再在两台机器上各装一个客户端就好。
(1)主要利用了ZK让rsync做到半双工通信的方式。(听说两边同时同步容易导致文件损坏,没测试,反正我还是喜欢有序地同步数据 (2) 利用ZK禁止在定时调度的时候不能被多个进程同时调用rsync服务。要保证rsync_signal目录不为空,起码有一个${curNode}_node_sync_finished数据
[root@hadoop03 hsi_ftp]# cat /sftp_data_rsync.sh #!/bin/bash## 这个脚本用来同步数据。# (1)然后需要手动在ZK上创建一个节点/keepalived/rsync_signal。# (2)还需要创建节点/keepalived/backup1# (3)还需要创建节点/keepalived/backup2#. /etc/profile. /root/.bash_profile## curNode=backup2anoNode=backup1zkClient=/opt/apache-zookeeper-3.6.2-bin/bin/zkCli.shvip=192.168.5.16user=sftpusersrcPath=/data/sftp/$user/destIp=192.168.5.5module=webgispwdFile=/root/sftppassword/sftppasswordsignalPath=/keepalived/rsync_signalsignal=$(bash $zkClient get $signalPath )#可以确定当前是哪个KL对外提供服务services2user=$(ip addr|grep $vip |wc -l)#master每同步10次就礼让给slave同步一次nodePath=/keepalived/$curNodemaxTimes=10getCurrentSyncTimes(){ preffixNum=1373736 startNum=1373736000 maxNum=$(( $startNum + $maxTimes ))rsyncTimes=$(bash $zkClient get $nodePath |grep $preffixNum )if [[ $rsyncTimes == "" ]];then bash $zkClient set $nodePath $startNum >/dev/null 2>&1elif [[ $rsyncTimes -ge $maxNum ]];then bash $zkClient set $nodePath $startNum >/dev/null 2>&1else rsyncTimes=$(( rsyncTimes + 1 )) bash $zkClient set $nodePath $rsyncTimes >/dev/null 2>&1fitotalTimes=$(($rsyncTimes - $startNum))echo $totalTimes}#main #说明当前服务已经停止了,所以需要将后续没有同步的数据进行同步到backup节点上。if [[ $services2user -eq 0 ]]; then if [[ $signal =~ $curNode ||$signal =~ "${anoNode}_node_stop" ]]; then echo "停止前最后一次发送数据, $signal" rsync -auz $srcPath $user@$destIp::$module --password-file=$pwdFile #通知另一个节点可以同步数据了 bash $zkClient set $signalPath "${curNode}_node_stop" >/dev/null 2>&1 fielse echo "正在发送数据 $signal" #isEmpty=$(echo $signal | grep $anoNode) if [[ $signal =~ "${curNode}_node_sync_finished" || $signal =~ "${anoNode}_node_stop" ]];then echo "正在发送数据" bash $zkClient set $signalPath "${curNode}_node_synchronizing" >/dev/null 2>&1 rsync -auz $srcPath $user@$destIp::$module --password-file=$pwdFile curTotalTimes=$(getCurrentSyncTimes) echo "第$curTotalTimes次发送" if [[ $curTotalTimes -ge $maxTimes ]];then echo "让另一个节点执行一次" bash $zkClient set $signalPath "${curNode}_node_stop" >/dev/null 2>&1 else bash $zkClient set $signalPath "${curNode}_node_sync_finished" >/dev/null 2>&1 fi fifi
[root@hadoop05 hsi_ftp]# cat /sftp_data_rsync.sh#!/bin/bash## 这个脚本用来同步数据。# (1)然后需要手动在ZK上创建一个节点/keepalived/rsync_signal。# (2)还需要创建节点/keepalived/backup1# (3)还需要创建节点/keepalived/backup2#. /etc/profile. /root/.bash_profile## curNode=backup1anoNode=backup2zkClient=/opt/apache-zookeeper-3.6.2-bin/bin/zkCli.shvip=192.168.5.16user=sftpusersrcPath=/data/sftp/$user/destIp=192.168.5.3module=webgispwdFile=/root/sftppassword/sftppasswordsignalPath=/keepalived/rsync_signal#用于解决哪怕定时器到了也只能有一个脚本同步数据signal=$(bash $zkClient get $signalPath )#可以确定当前是哪个KL对外提供服务services2user=$(ip addr|grep $vip |wc -l)nodePath=/keepalived/$curNode#master每同步10次就礼让给slave同步一次maxTimes=10getCurrentSyncTimes(){ preffixNum=1373736 startNum=1373736000 maxNum=$(( $startNum + $maxTimes ))rsyncTimes=$(bash $zkClient get $nodePath |grep $preffixNum )if [[ $rsyncTimes == "" ]];then bash $zkClient set $nodePath $startNum >/dev/null 2>&1elif [[ $rsyncTimes -ge $maxNum ]];then bash $zkClient set $nodePath $startNum >/dev/null 2>&1else rsyncTimes=$(( rsyncTimes + 1 )) bash $zkClient set $nodePath $rsyncTimes >/dev/null 2>&1fitotalTimes=$(($rsyncTimes - $startNum))echo $totalTimes}#main#说明当前服务已经停止了,所以需要将后续没有同步的数据进行同步到另一个backup节点上。if [[ $services2user -eq 0 ]]; then if [[ $signal =~ $curNode ||$signal =~ "${anoNode}_node_stop" ]]; then echo "停止前最后一次发送数据, $signal" rsync -auz $srcPath $user@$destIp::$module --password-file=$pwdFile #通知另一个节点可以同步数据了 bash $zkClient set $signalPath "${curNode}_node_stop" >/dev/null 2>&1 fielse echo "正在发送数据 ,当前信号--$signal" if [[ $signal =~ "${curNode}_node_sync_finished" || $signal =~ "${anoNode}_node_stop" ]];then bash $zkClient set $signalPath "${curNode}_node_synchronizing" >/dev/null 2>&1 rsync -auz $srcPath $user@$destIp::$module --password-file=$pwdFile curTotalTimes=$(getCurrentSyncTimes) echo "第$curTotalTimes次发送" if [[ $curTotalTimes -ge $maxTimes ]];then echo "让另一个节点执行一次" bash $zkClient set $signalPath "${curNode}_node_stop" >/dev/null 2>&1 else bash $zkClient set $signalPath "${curNode}_node_sync_finished" >/dev/null 2>&1 fi fifi
[root@hadoop03 /]# crontab -e * * * * * sh /sftp_data_rsync.sh>>/tmp/synclog.log * * * * * sleep 10 ; sh /sftp_data_rsync.sh>>/tmp/synclog.log * * * * * sleep 20 ; sh /sftp_data_rsync.sh>>/tmp/synclog.log * * * * * sleep 30 ; sh /sftp_data_rsync.sh>>/tmp/synclog.log * * * * * sleep 40 ; sh /sftp_data_rsync.sh>>/tmp/synclog.log * * * * * sleep 50 ; sh /sftp_data_rsync.sh>>/tmp/synclog.log
查看节点信息
[root@hadoop05 hsi_ftp]# tail -n 400 /tmp/synclog.log |grep backupbackup1_node_stopbackup2_node_stopbackup1_node_sync_finishedbackup1_node_sync_finishedbackup1_node_sync_finishedbackup1_node_sync_finishedbackup1_node_sync_finishedbackup1_node_sync_finishedbackup1_node_sync_finishedbackup1_node_sync_finishedbackup1_node_sync_finishedbackup1_node_stop
转载地址:http://mvqdz.baihongyu.com/