14 Commits

Author SHA1 Message Date
Bastian
4bfd79e79e improved: retrieve source/destination cluster name for better insights 2023-07-13 15:18:24 +02:00
Bastian
6e8eb7ce2b fixed: preflight checks 2023-07-13 14:45:55 +02:00
Bastian
be88cb4d40 fixed: perf_vm_stopped++ never counted. 2023-07-13 13:54:20 +02:00
Bastian
1343dc6b51 fixed: Correct target host now displayed in log messsage, Add downtime counter 2023-07-13 13:51:58 +02:00
Bastian
5ce325beec Strip ansi color codes from syslog and mail 2023-06-13 16:29:06 +02:00
Bastian
b8d2386e69 Added Logging by mail functionality, added --mail parameter, added logfilehandling 2023-06-13 16:13:23 +02:00
Bastian
a5ea397d11 bump version 2023-06-13 14:33:20 +02:00
Bastian
36dabe9d79 fixed several linting issues 2023-06-13 14:21:06 +02:00
Bastian
284cfb6e76 Added features to README, minor wording changes 2023-04-26 15:41:13 +02:00
Bastian
5b7fd4986b Add preflight check: pool_exists 2023-03-23 16:14:04 +01:00
Bastian
41abd0429a Fix Regex to exclude cloud-init drive 2023-03-23 15:46:58 +01:00
Bastian
890567ad05 Remove vm from ha group before shutting down on migration 2023-03-23 14:10:55 +01:00
Bastian
f5441f4c0b Sanitize cloud-init drives from the config 2023-03-22 15:22:36 +01:00
Bastian
fb5b3a6d09 Merge pull request #1 from lephisto/feature-move
Add --migrate feature: near-live migrate between clusters
2023-03-22 14:42:05 +01:00
2 changed files with 191 additions and 74 deletions

View File

@@ -86,6 +86,7 @@ It'll work according this scheme:
* Retention policy: (eg. keep x snapshots on the source and y snapshots in the destination cluster)
* Rewrites VM configurations so they match the new VMID and/or poolname on the destination
* Secure an encrypted transfer (SSH), so it's safe to mirror between datacenter without an additional VPN
* Near live-migrate: To move a VM from one Cluster to another, make an initial copy and re-run with --migrate. This will shutdown the VM on the source cluster and start it on the destination cluster.
## Installation of prerequisites
@@ -99,9 +100,9 @@ git clone https://github.com/lephisto/crossover/ /opt
Ensure that you can freely ssh from the Node you plan to mirror _from_ to _all_ nodes in the destination cluster, as well as localhost.
## Examples
## Continuous replication between Clusters
Mirror VM to another Cluster:
Example 1: Mirror VM to another Cluster:
```
root@pve01:~/crossover# ./crossover mirror --vmid=all --prefixid=99 --excludevmids=101 --destination=pve04 --pool=data2 --overwrite --online
@@ -136,6 +137,7 @@ Full xmitted..........: 0 byte
Differential Bytes ...: 372.96 KiB
```
This example creates a mirror of VM 100 (in the source cluster) as VM 10100 (in the destination cluster) using the ceph pool "data2" for storing all attached disks. It will keep 4 Ceph snapshots prior the latest (in total 5) and 8 snapshots on the remote cluster. It will keep the VM on the target Cluster locked to avoid an accidental start (thus causing split brain issues), and will do it even if the source VM is running.
The use case is that you might want to keep a cold-standby copy of a certain VM on another Cluster. If you need to start it on the target cluster you just have to unlock it with `qm unlock VMID` there.
@@ -146,6 +148,78 @@ Another usecase could be that you want to migrate a VM from one cluster to anoth
To minimize downtime and achive a near-live Migration from one Cluster to another it's recommended to do an initial Sync of a VM from the source to the destination cluster. After that, run the job again, and add the --migrate switch. This causes the source VM to be shut down prior snapshot + transfer, and be restarted on the destination cluster as soon as the incremental transfer is complete. Using --migrate will always try to start the VM on the destination cluster.
Example 2: Near-live migrate VM from one cluster to another (Run initial replication first, which works online, then run with --migrate to shutdown on source, incrematally copy and start on destination):
```
root@pve01:~/crossover# ./crossover mirror --jobname=migrate --vmid=100 --destination=pve04 --pool=data2 --online
ACTION: Onlinemirror
Start mirror 2023-04-26 15:02:24
VM 100 - Starting mirror for testubuntu
VM 100 - Checking for VM 100 on destination cluster pve04 /etc/pve/nodes/*/qemu-server
VM 100 - Transmitting Config for to destination pve04 VMID 100
VM 100 - locked 100 [rc:0] on source
VM 100 - locked 100 [rc:0] on destination
VM 100 - Creating snapshot data/vm-100-disk-0@mirror-20230426150224
VM 100 - Creating snapshot data/vm-100-disk-1@mirror-20230426150224
VM 100 - unlocked source VM 100 [rc:0]
VM 100 - F data/vm-100-disk-0@mirror-20230426150224: e:0:09:20 r: c:[36.6MiB/s] a:[36.6MiB/s] 20.0GiB [===============================>] 100%
VM 100 - created snapshot on 100 [rc:0]
VM 100 - Disk Summary: Took 560 Seconds to transfer 20.00 GiB in a full run
VM 100 - F data/vm-100-disk-1@mirror-20230426150224: e:0:00:40 r: c:[50.7MiB/s] a:[50.7MiB/s] 2.00GiB [===============================>] 100%
VM 100 - created snapshot on 100 [rc:0]
VM 100 - Disk Summary: Took 40 Seconds to transfer 22.00 GiB in a full run
VM 100 - Unlocking destination VM 100
Finnished mirror 2023-04-26 15:13:47
Job Summary: Bytes transferred 22.00 GiB for 2 Disks on 1 VMs in 00 hours 11 minutes 23 seconds
VM Freeze OK/failed.......: 1/0
RBD Snapshot OK/failed....: 2/0
RBD export-full OK/failed.: 2/0
RBD export-diff OK/failed.: 0/0
Full xmitted..............: 22.00 GiB
Differential Bytes .......: 0 Bytes
root@pve01:~/crossover# ./crossover mirror --jobname=migrate --vmid=100 --destination=pve04 --pool=data2 --online --migrate
ACTION: Onlinemirror
Start mirror 2023-04-26 15:22:35
VM 100 - Starting mirror for testubuntu
VM 100 - Checking for VM 100 on destination cluster pve04 /etc/pve/nodes/*/qemu-server
VM 100 - Migration requested, shutting down VM on pve01
VM 100 - locked 100 [rc:0] on source
VM 100 - locked 100 [rc:0] on destination
VM 100 - Creating snapshot data/vm-100-disk-0@mirror-20230426152235
VM 100 - Creating snapshot data/vm-100-disk-1@mirror-20230426152235
VM 100 - I data/vm-100-disk-0@mirror-20230426152235: e:0:00:03 c:[1.29MiB/s] a:[1.29MiB/s] 4.38MiB
VM 100 - Housekeeping: localhost data/vm-100-disk-0, keeping Snapshots for 0s
VM 100 - Removing Snapshot localhost data/vm-100-disk-0@mirror-20230323162532 (2930293s) [rc:0]
VM 100 - Removing Snapshot localhost data/vm-100-disk-0@mirror-20230426144911 (2076s) [rc:0]
VM 100 - Removing Snapshot localhost data/vm-100-disk-0@mirror-20230426145632 (1637s) [rc:0]
VM 100 - Removing Snapshot localhost data/vm-100-disk-0@mirror-20230426145859 (1492s) [rc:0]
VM 100 - Removing Snapshot localhost data/vm-100-disk-0@mirror-20230426150224 (1290s) [rc:0]
VM 100 - Housekeeping: pve04 data2/vm-100-disk-0-data, keeping Snapshots for 0s
VM 100 - Removing Snapshot pve04 data2/vm-100-disk-0-data@mirror-20230426150224 (1293s) [rc:0]
VM 100 - Disk Summary: Took 4 Seconds to transfer 4.37 MiB in a incremental run
VM 100 - I data/vm-100-disk-1@mirror-20230426152235: e:0:00:00 c:[ 227 B/s] a:[ 227 B/s] 74.0 B
VM 100 - Housekeeping: localhost data/vm-100-disk-1, keeping Snapshots for 0s
VM 100 - Removing Snapshot localhost data/vm-100-disk-1@mirror-20230323162532 (2930315s) [rc:0]
VM 100 - Removing Snapshot localhost data/vm-100-disk-1@mirror-20230426144911 (2098s) [rc:0]
VM 100 - Removing Snapshot localhost data/vm-100-disk-1@mirror-20230426145632 (1659s) [rc:0]
VM 100 - Removing Snapshot localhost data/vm-100-disk-1@mirror-20230426145859 (1513s) [rc:0]
VM 100 - Removing Snapshot localhost data/vm-100-disk-1@mirror-20230426150224 (1310s) [rc:0]
VM 100 - Housekeeping: pve04 data2/vm-100-disk-1-data, keeping Snapshots for 0s
VM 100 - Removing Snapshot pve04 data2/vm-100-disk-1-data@mirror-20230426150224 (1313s) [rc:0]
VM 100 - Disk Summary: Took 2 Seconds to transfer 4.37 MiB in a incremental run
VM 100 - Unlocking destination VM 100
VM 100 - Starting VM on pve01
Finnished mirror 2023-04-26 15:24:25
Job Summary: Bytes transferred 4.37 MiB for 2 Disks on 1 VMs in 00 hours 01 minutes 50 seconds
VM Freeze OK/failed.......: 0/0
RBD Snapshot OK/failed....: 2/0
RBD export-full OK/failed.: 0/0
RBD export-diff OK/failed.: 2/0
Full xmitted..............: 0 Bytes
Differential Bytes .......: 4.37 MiB
```
## Things to check
From Proxmox VE Hosts you want to backup you need to be able to ssh passwordless to all other Cluster hosts, that may hold VM's or Containers. This goes for the source and for the destination Cluster.

187
crossover
View File

@@ -1,5 +1,8 @@
#!/bin/bash
# Cross Pool Migration and incremental replication Tool for Proxmox VMs using Ceph.
# Author: Bastian Mäuser <bma@netz.org>
LC_ALL="en_US.UTF-8"
source rainbow.sh
@@ -13,13 +16,11 @@ declare opt_influx_jobname=''
declare opt_influx_job_metrics='crossover_xmit'
declare opt_influx_summary_metrics='crossover_jobs'
# Cross Pool Migration and incremental replication Tool for Proxmox VMs using Ceph.
# Author: Bastian Mäuser <bma@netz.org>
declare -r VERSION=0.7
declare -r NAME=$(basename "$0")
name=$(basename "$0")
# readonly variables
declare -r NAME=$name
declare -r VERSION=0.8
declare -r PROGNAME=${NAME%.*}
declare -r PVE_DIR="/etc/pve"
declare -r PVE_NODES="$PVE_DIR/nodes"
declare -r QEMU='qemu-server'
@@ -27,14 +28,19 @@ declare -r QEMU_CONF_CLUSTER="$PVE_NODES/*/$QEMU"
declare -r EXT_CONF='.conf'
declare -r PVFORMAT_FULL='e:%t r:%e c:%r a:%a %b %p'
declare -r PVFORMAT_SNAP='e:%t c:%r a:%a %b'
logfile=$(mktemp)
declare -r LOG_FILE=$logfile
declare -r LOG_FILE=$(mktemp)
# associative global arrays
declare -A -g pvnode
declare -A -g dstpvnode
declare -A -g svmids
declare -A -g dvmids
declare -g scluster
declare -g dcluster
# global integers
declare -g -i perf_freeze_ok=0
declare -g -i perf_freeze_failed=0
declare -g -i perf_ss_ok=0
@@ -51,7 +57,10 @@ declare -g -i perf_bytes_total=0
declare -g -i perf_vm_running=0
declare -g -i perf_vm_stopped=0
declare -g -i perf_snaps_removed=0
declare -g -i perf_vm_total=0
declare -g -i perf_vm_ok=0
# commandline parameters
declare opt_destination
declare opt_vm_ids=''
declare opt_snapshot_prefix='mirror-'
@@ -75,6 +84,7 @@ declare -r redstconf='^\/etc\/pve\/nodes\/(.*)\/qemu-server\/([0-9]+).conf$'
declare -r recephimg='([a-zA-Z0-9]+)\:(.*)'
declare -r restripsnapshots='/^$/,$d'
declare -r redateex='^([0-9]{4})([0-9]{2})([0-9]{2})([0-9]{2})([0-9]{2})([0-9]{2})$'
declare -r restripansicolor='s/\x1b\[[0-9;]*m//g'
function usage(){
shift
@@ -117,9 +127,10 @@ Options:
--influxtoken Influx API token with write permission
--influxbucket Influx Bucket to write to (e.g. --influxbucket=telegraf/autogen)
--jobname Descriptive name for the job, used in Statistics
--mail Mail address to send report to, comma-seperated (e.g. --mail=admin@test.com,admin2@test.com)
Switches:
--online Allow online Copy
--migrate Stop VM on Source Cluster before final Transfer and restart on destination Cluster
--migrate Stop VM on Source Cluster before final Transfer and start on destination Cluster
--nolock Don't lock source VM on Transfer (mainly for test purposes)
--keep-slock Keep source VM locked on Transfer
--keep-dlock Keep VM locked after transfer on Destination
@@ -129,7 +140,6 @@ Switches:
Report bugs to <mephisto@mephis.to>
EOF
exit 1
}
function parse_opts(){
@@ -139,7 +149,7 @@ function parse_opts(){
local args
args=$(getopt \
--options '' \
--longoptions=vmid:,prefixid:,excludevmids:,destination:,pool:,keeplocal:,keepremote:,rewrite:,influxurl:,influxorg:,influxtoken:,influxbucket:,jobname:,online,migrate,nolock,keep-slock,keep-dlock,overwrite,dry-run,debug \
--longoptions=vmid:,prefixid:,excludevmids:,destination:,pool:,keeplocal:,keepremote:,rewrite:,influxurl:,influxorg:,influxtoken:,influxbucket:,jobname:,mail:,online,migrate,nolock,keep-slock,keep-dlock,overwrite,dry-run,debug,syslog \
--name "$PROGNAME" \
-- "$@") \
|| end_process 128
@@ -161,6 +171,7 @@ function parse_opts(){
--influxtoken) opt_influx_token=$2; shift 2;;
--influxbucket) opt_influx_bucket=$2; shift 2;;
--jobname) opt_influx_jobname=$2; shift 2;;
--mail) opt_addr_mail="$2"; shift 2;;
--online) opt_online=1; shift ;;
--migrate) opt_migrate=1; shift ;;
--dry-run) opt_dry_run=1; shift;;
@@ -169,6 +180,7 @@ function parse_opts(){
--keep-slock) opt_keepslock=1; shift;;
--keep-dlock) opt_keepdlock=1; shift;;
--overwrite) opt_overwrite=1; shift;;
--syslog) opt_syslog=1; shift;;
--) shift; break;;
*) break;;
esac
@@ -181,7 +193,7 @@ function parse_opts(){
log info "============================================"
log info "Proxmox VE Version:"
echowhite $(pveversion)
echowhite "$(pveversion)"
log info "============================================"
fi
@@ -302,20 +314,6 @@ function get_vm_ids(){
echo "$data"
}
function get_config_file(){
local file_config=''
if exist_file "$QEMU_CONF_CLUSTER/$vm_id$EXT_CONF"; then
file_config=$(ls $QEMU_CONF_CLUSTER/$vm_id$EXT_CONF)
else
log error "VM $vm_id - Unknown technology or VMID not found: $QEMU_CONF_CLUSTER/$vm_id$EXT_CONF"
end_process 128
fi
echo "$file_config"
}
function get_disks_from_config(){
local disks;
local file_config=$1
@@ -339,6 +337,7 @@ function log(){
local level=$1
shift 1
local message=$*
local syslog_msg=''
case $level in
debug)
@@ -350,28 +349,32 @@ function log(){
info)
echo -e "$message";
echo -e "$message" >> "$LOG_FILE";
[ $opt_syslog -eq 1 ] && logger -t "$PROGNAME" "$message"
echo -e "$message" | sed -e 's/\x1b\[[0-9;]*m//g' >> "$LOG_FILE";
syslog_msg=$(echo -e "$message" | sed -e ${restripansicolor})
[ $opt_syslog -eq 1 ] && logger -t "$PROGNAME" "$syslog_msg"
;;
warn)
echo -n $(echoyellow "WARNING: ")
echo $(echowhite "$message") 1>&2
echo -e "$message" >> "$LOG_FILE";
[ $opt_syslog -eq 1 ] && logger -t "$PROGNAME" -p daemon.warn "$message"
echo -n "$(echoyellow 'WARNING: ')"
echowhite "$message" 1>&2
echo -e "$message" | sed -e ${restripansicolor} >> "$LOG_FILE";
syslog_msg=$(echo -e "$message" | sed -e ${restripansicolor})
[ $opt_syslog -eq 1 ] && logger -t "$PROGNAME" -p daemon.warn "$syslog_msg"
;;
error)
echo -n $(echored "ERROR: ")
echo $(echowhite "$message") 1>&2
echo -e "$message" >> "$LOG_FILE";
[ $opt_syslog -eq 1 ] && logger -t "$PROGNAME" -p daemon.err "$message"
echo -n "$(echored 'ERROR: ')"
echowhite "$message" 1>&2
echo -e "$message" | sed -e ${restripansicolor} >> "$LOG_FILE";
syslog_msg=$(echo -e "$message" | sed -e ${restripansicolor})
[ $opt_syslog -eq 1 ] && logger -t "$PROGNAME" -p daemon.err "$syslog_msg"
;;
*)
echo "$message" 1>&2
echo -e "$message" >> "$LOG_FILE";
[ $opt_syslog -eq 1 ] && logger -t "$PROGNAME" "$message"
echo -e "$message" | sed -e ${restripansicolor} >> "$LOG_FILE";
syslog_msg=$(echo -e "$message" | sed -e ${restripansicolor})
[ $opt_syslog -eq 1 ] && logger -t "$PROGNAME" "$syslog_msg"
;;
esac
}
@@ -403,6 +406,8 @@ function mirror() {
local -i endjob
local -i vmcount=0
local -i diskcount=0
local -i startdowntime
local -i enddowntime
local disp_perf_freeze_failed
local disp_perf_ss_failed
@@ -427,32 +432,41 @@ function mirror() {
end_process 1
fi
scluster=$(grep cluster_name /etc/pve/corosync.conf | cut -d " " -f 4)
dcluster=$(ssh "$opt_destination" grep cluster_name /etc/pve/corosync.conf | cut -d " " -f 4)
map_source_to_destination_vmid
map_vmids_to_host
map_vmids_to_dsthost "$opt_destination"
if [ "$(check_pool_exist "$opt_pool")" -eq 0 ]; then
log error "Preflight check: Destination RBD-Pool $opt_pool does not exist."
end_process 255
fi
for vm_id in $svmids; do
file_config="$PVE_NODES/${pvnode[$vm_id]}/$QEMU/$vm_id.conf"
if ! exist_file "$file_config"; then
log error "VM $vm_id - Preflight check: VM $vm_id does not exist on source cluster [$scluster] - skipping to next VM."
continue
fi
(( vmcount++ ))
local file_config; file_config=$(get_config_file)
[ -z "$file_config" ] && continue
local disk=''
dvmid=${dvmids[$vm_id]}
vmname=$(cat $PVE_NODES/"${pvnode[$vm_id]}"/$QEMU/"$vm_id".conf | sed -e ''$restripsnapshots'' | grep "name\:" | cut -d' ' -f 2)
log info "VM $vm_id - Starting mirror for $(echowhite "$vmname")"
srcvmgenid=$(cat $PVE_NODES/"${pvnode[$vm_id]}"/$QEMU/"$vm_id".conf | sed -e ''$restripsnapshots'' | grep vmgenid | sed -r -e 's/^vmgenid:\s(.*)/\1/')
dstvmgenid=$(ssh $opt_destination cat $PVE_NODES/"${dstpvnode[$dvmid]}"/$QEMU/"$dvmid".conf 2>/dev/null | grep vmgenid | sed -e ''$restripsnapshots'' | sed -r -e 's/^vmgenid:\s(.*)/\1/')
log info "VM $vm_id - Checking for VM $dvmid on Destination Host $opt_destination $QEMU_CONF_CLUSTER"
dstvmgenid=$(ssh "$opt_destination" cat $PVE_NODES/"${dstpvnode[$dvmid]}"/$QEMU/"$dvmid".conf 2>/dev/null | grep vmgenid | sed -e ''$restripsnapshots'' | sed -r -e 's/^vmgenid:\s(.*)/\1/')
log info "VM $vm_id - Checking for VM $dvmid on destination cluster $opt_destination $QEMU_CONF_CLUSTER"
log debug "DVMID:$dvmid srcvmgenid:$srcvmgenid dstvmgenid:$dstvmgenid"
conf_on_destination=$(ssh $opt_destination "ls -d $QEMU_CONF_CLUSTER/$dvmid$EXT_CONF 2>/dev/null")
conf_on_destination=$(ssh "$opt_destination" "ls -d $QEMU_CONF_CLUSTER/$dvmid$EXT_CONF 2>/dev/null")
[[ "$conf_on_destination" =~ $redstconf ]]
host_on_destination=${BASH_REMATCH[1]}
if [ $host_on_destination ]; then
dststatus=$(ssh root@${dstpvnode[$dvmid]} qm status $dvmid|cut -d' ' -f 2)
if [ $dststatus == "running" ]; then
log error "VM is running on Destination. bailing out"
log error "VM is running on Destination Cluster [$dcluster]. bailing out"
end_process 255
fi
fi
@@ -473,25 +487,30 @@ function mirror() {
map_vmids_to_dsthost "$opt_destination"
fi
#--move so we need to shutdown?
#--move so we need to shutdown and remove from ha group?
if [ $opt_migrate -eq 1 ]; then
log info "VM $vm_id - Shutting down VM on ${pvnode[$vm_id]}"
do_run "ssh root@"${pvnode[$vm_id]}" qm shutdown "$vm_id >/dev/null
log info "VM $vm_id - Migration requested, shutting down VM on ${pvnode[$vm_id]}"
if [ "$(get_ha_status "$vm_id")" == "started" ]; then
log info "VM $vm_id - remove from HA"
do_run "ha-manager remove $vm_id"
fi
do_run "ssh root@${pvnode[$vm_id]} qm shutdown $vm_id >/dev/null"
startdowntime=$(date +%s)
fi
#Lock on source + destination
if [ $opt_lock -eq 1 ]; then
do_run "ssh root@"${pvnode[$vm_id]}" qm set "$vm_id" --lock backup" >/dev/null
log info "VM $vm_id - locked $vm_id [rc:$?]"
do_run "ssh root@"${dstpvnode[$dvmid]}" qm set "$dvmid" --lock backup" >/dev/null
log info "VM $dvmid - locked $dvmid [rc:$?]"
do_run "ssh root@""${pvnode[$vm_id]}"" qm set ""$vm_id"" --lock backup" >/dev/null
log info "VM $vm_id - locked $vm_id [rc:$?] on source"
do_run "ssh root@""${dstpvnode[$dvmid]}"" qm set ""$dvmid"" --lock backup" >/dev/null
log info "VM $dvmid - locked $dvmid [rc:$?] on destination"
fi
#Freeze fs only if no migration running
if [ $opt_migrate -eq 0 ]; then
vm_freeze "$vm_id" "${pvnode[$vm_id]}" >/dev/null
freezerc=$?
if [ $freezerc -gt 0 ]; then
log error "VM $vm_id - QEMU-Guest could not fsfreeze on guest."
log warn "VM $vm_id - QEMU-Guest could not fsfreeze on guest."
(( perf_freeze_failed++ ))
else
(( perf_freeze_ok++ ))
@@ -502,7 +521,7 @@ function mirror() {
create_snapshot "$src_image_spec@$opt_snapshot_prefix$timestamp" 2>/dev/null
ssrc=$?
if [ $ssrc -gt 0 ]; then
log error "VM $vm_id - rbd snap failed."
log warn "VM $vm_id - rbd snap failed."
(( perf_ss_failed++ ))
else
(( perf_ss_ok++ ))
@@ -619,8 +638,10 @@ function mirror() {
fi
#--migrate so start on destination?
if [ $opt_migrate -eq 1 ]; then
log info "VM $dvmid - Starting VM on ${pvnode[$vm_id]}"
do_run "ssh root@"${dstpvnode[$dvmid]}" qm start "$dvmid >/dev/null
log info "VM $dvmid - Starting VM on node ${dstpvnode[$dvmid]} in cluster [$dcluster]"
do_run "ssh root@""${dstpvnode[$dvmid]}"" qm start "$dvmid >/dev/null
enddowntime=$(date +%s)
log info "VM $dvmid - Downtime: $(( enddowntime - startdowntime )) Seconds"
fi
done
@@ -645,6 +666,8 @@ function mirror() {
cmd="curl --request POST \"$opt_influx_api_url/v2/write?org=$opt_influx_api_org&bucket=$opt_influx_bucket&precision=ns\" --header \"Authorization: Token $opt_influx_token\" --header \"Content-Type: text/plain; charset=utf-8\" --header \"Accept: application/json\" --data-binary '$influxlp'"
do_run "$cmd"
fi
(( perf_vm_ok++ ))
end_process 0
}
function do_housekeeping(){
@@ -701,8 +724,8 @@ function vm_freeze() {
status=$(ssh root@"$fhost" qm status "$fvm"|cut -d' ' -f 2)
if ! [[ "$status" == "running" ]]; then
log info "VM $fvm - Not running, skipping fsfreeze-freeze"
return
(( perf_vm_stopped++ ))
return
else
(( perf_vm_running++ ))
fi
@@ -742,7 +765,7 @@ function rewriteconfig(){
else
sedcmd='sed -e /^$/,$d'
fi
cat "$oldconfig" | sed -r -e "s/^(virtio|ide|scsi|sata|mp)([0-9]+):\s([a-zA-Z0-9]+):(.*)-([0-9]+)-disk-([0-9]+).*,(.*)$/\1\2: $newpool:\4-$newvmid-disk-\6-\3,\7/g" | $sedcmd | sed -e '/^$/,$d' | grep -v "^parent:\s.*$" | ssh "$dst" "cat - >$newconfig"
cat "$oldconfig" | sed -r -e "s/^(virtio|ide|scsi|sata|mp)([0-9]+):\s([a-zA-Z0-9]+):(.*)-([0-9]+)-disk-([0-9]+).*,(.*)$/\1\2: $newpool:\4-$newvmid-disk-\6-\3,\7/g" | $sedcmd | sed -e '/^$/,$d' | sed -e '/ide[0-9]:.*-cloudinit,media=cdrom.*/d' | grep -v "^parent:\s.*$" | ssh "$dst" "cat - >$newconfig"
}
function checkvmid(){
@@ -773,23 +796,25 @@ function do_run(){
function end_process(){
local -i rc=$1;
# if ! [[ -z "$startts" && -z "$endts" ]]; then
# local -i runtime=$(expr $endts - $startts)
# local -i bps=$(expr $bytecount/$runtime)
# fi
# local subject="Ceph [VM:$vmok/$vmtotal SS:$snapshotok/$snapshottotal EX:$exportok/$exporttotal] [$(bytesToHuman "$bytecount")@$(bytesToHuman "$bps")/s]"
# [ $rc != 0 ] && subject="$subject [ERROR]"
local -i runtime
local -i bps
local -i ss_total
local subject
if ! [[ -z "$startjob" || -z "$endjob" ]]; then
runtime=$(expr $endjob - $startjob)
bps=$(expr $perf_bytes_total/$runtime)
fi
ss_total=$(expr $perf_ss_ok + $perf_ss_failed)
subject="Crossover [VM:$perf_vm_ok/$vmcount SS:$perf_ss_ok/$ss_total]"
[ $rc != 0 ] && subject="[ERROR] $subject" || subject="[OK] $subject"
#send email
# local mail;
# local mailhead="Backup $imgcount Images in $vmcount VMs (Bytes: $bytecount)"
# for mail in $(echo "$opt_addr_mail" | tr "," "\n"); do
# do_run "cat '$LOG_FILE' | mail -s '$subject' '$mail'"
# done
local mail;
for mail in $(echo "$opt_addr_mail" | tr "," "\n"); do
do_run "cat '$LOG_FILE' | mail -s '$subject' '$mail'"
done
#remove log
# rm "$LOG_FILE"
rm "$LOG_FILE"
exit "$rc";
}
@@ -806,6 +831,24 @@ function get_image_spec(){
echo "$image_spec"
}
function get_ha_status() {
local havmid="$1"
ha_status=$(ha-manager status| grep vm:"$havmid" | cut -d " " -f 4| sed 's/.$//')
echo "$ha_status"
}
function check_pool_exist() {
local poolname="$1"
local -i exists=255
pool_status=$(ssh $opt_destination pvesm status|grep rbd|cut -d " " -f 1|grep $poolname)
if [ "$pool_status" == "$poolname" ]; then
exists=1
else
exists=0
fi
echo $exists
}
function main(){
[ $# = 0 ] && usage;