mirror of
https://github.com/lephisto/crossover.git
synced 2025-12-06 04:09:20 +01:00
Compare commits
6 Commits
feature-mo
...
284cfb6e76
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
284cfb6e76 | ||
|
|
5b7fd4986b | ||
|
|
41abd0429a | ||
|
|
890567ad05 | ||
|
|
f5441f4c0b | ||
|
|
fb5b3a6d09 |
78
README.md
78
README.md
@@ -86,6 +86,7 @@ It'll work according this scheme:
|
||||
* Retention policy: (eg. keep x snapshots on the source and y snapshots in the destination cluster)
|
||||
* Rewrites VM configurations so they match the new VMID and/or poolname on the destination
|
||||
* Secure an encrypted transfer (SSH), so it's safe to mirror between datacenter without an additional VPN
|
||||
* Near live-migrate: To move a VM from one Cluster to another, make an initial copy and re-run with --migrate. This will shutdown the VM on the source cluster and start it on the destination cluster.
|
||||
|
||||
## Installation of prerequisites
|
||||
|
||||
@@ -99,9 +100,9 @@ git clone https://github.com/lephisto/crossover/ /opt
|
||||
|
||||
Ensure that you can freely ssh from the Node you plan to mirror _from_ to _all_ nodes in the destination cluster, as well as localhost.
|
||||
|
||||
## Examples
|
||||
## Continuous replication between Clusters
|
||||
|
||||
Mirror VM to another Cluster:
|
||||
Example 1: Mirror VM to another Cluster:
|
||||
|
||||
```
|
||||
root@pve01:~/crossover# ./crossover mirror --vmid=all --prefixid=99 --excludevmids=101 --destination=pve04 --pool=data2 --overwrite --online
|
||||
@@ -136,6 +137,7 @@ Full xmitted..........: 0 byte
|
||||
Differential Bytes ...: 372.96 KiB
|
||||
|
||||
```
|
||||
|
||||
This example creates a mirror of VM 100 (in the source cluster) as VM 10100 (in the destination cluster) using the ceph pool "data2" for storing all attached disks. It will keep 4 Ceph snapshots prior the latest (in total 5) and 8 snapshots on the remote cluster. It will keep the VM on the target Cluster locked to avoid an accidental start (thus causing split brain issues), and will do it even if the source VM is running.
|
||||
|
||||
The use case is that you might want to keep a cold-standby copy of a certain VM on another Cluster. If you need to start it on the target cluster you just have to unlock it with `qm unlock VMID` there.
|
||||
@@ -146,6 +148,78 @@ Another usecase could be that you want to migrate a VM from one cluster to anoth
|
||||
|
||||
To minimize downtime and achive a near-live Migration from one Cluster to another it's recommended to do an initial Sync of a VM from the source to the destination cluster. After that, run the job again, and add the --migrate switch. This causes the source VM to be shut down prior snapshot + transfer, and be restarted on the destination cluster as soon as the incremental transfer is complete. Using --migrate will always try to start the VM on the destination cluster.
|
||||
|
||||
Example 2: Near-live migrate VM from one cluster to another (Run initial replication first, which works online, then run with --migrate to shutdown on source, incrematally copy and start on destination):
|
||||
|
||||
```
|
||||
root@pve01:~/crossover# ./crossover mirror --jobname=migrate --vmid=100 --destination=pve04 --pool=data2 --online
|
||||
ACTION: Onlinemirror
|
||||
Start mirror 2023-04-26 15:02:24
|
||||
VM 100 - Starting mirror for testubuntu
|
||||
VM 100 - Checking for VM 100 on destination cluster pve04 /etc/pve/nodes/*/qemu-server
|
||||
VM 100 - Transmitting Config for to destination pve04 VMID 100
|
||||
VM 100 - locked 100 [rc:0] on source
|
||||
VM 100 - locked 100 [rc:0] on destination
|
||||
VM 100 - Creating snapshot data/vm-100-disk-0@mirror-20230426150224
|
||||
VM 100 - Creating snapshot data/vm-100-disk-1@mirror-20230426150224
|
||||
VM 100 - unlocked source VM 100 [rc:0]
|
||||
VM 100 - F data/vm-100-disk-0@mirror-20230426150224: e:0:09:20 r: c:[36.6MiB/s] a:[36.6MiB/s] 20.0GiB [===============================>] 100%
|
||||
VM 100 - created snapshot on 100 [rc:0]
|
||||
VM 100 - Disk Summary: Took 560 Seconds to transfer 20.00 GiB in a full run
|
||||
VM 100 - F data/vm-100-disk-1@mirror-20230426150224: e:0:00:40 r: c:[50.7MiB/s] a:[50.7MiB/s] 2.00GiB [===============================>] 100%
|
||||
VM 100 - created snapshot on 100 [rc:0]
|
||||
VM 100 - Disk Summary: Took 40 Seconds to transfer 22.00 GiB in a full run
|
||||
VM 100 - Unlocking destination VM 100
|
||||
Finnished mirror 2023-04-26 15:13:47
|
||||
Job Summary: Bytes transferred 22.00 GiB for 2 Disks on 1 VMs in 00 hours 11 minutes 23 seconds
|
||||
VM Freeze OK/failed.......: 1/0
|
||||
RBD Snapshot OK/failed....: 2/0
|
||||
RBD export-full OK/failed.: 2/0
|
||||
RBD export-diff OK/failed.: 0/0
|
||||
Full xmitted..............: 22.00 GiB
|
||||
Differential Bytes .......: 0 Bytes
|
||||
|
||||
root@pve01:~/crossover# ./crossover mirror --jobname=migrate --vmid=100 --destination=pve04 --pool=data2 --online --migrate
|
||||
ACTION: Onlinemirror
|
||||
Start mirror 2023-04-26 15:22:35
|
||||
VM 100 - Starting mirror for testubuntu
|
||||
VM 100 - Checking for VM 100 on destination cluster pve04 /etc/pve/nodes/*/qemu-server
|
||||
VM 100 - Migration requested, shutting down VM on pve01
|
||||
VM 100 - locked 100 [rc:0] on source
|
||||
VM 100 - locked 100 [rc:0] on destination
|
||||
VM 100 - Creating snapshot data/vm-100-disk-0@mirror-20230426152235
|
||||
VM 100 - Creating snapshot data/vm-100-disk-1@mirror-20230426152235
|
||||
VM 100 - I data/vm-100-disk-0@mirror-20230426152235: e:0:00:03 c:[1.29MiB/s] a:[1.29MiB/s] 4.38MiB
|
||||
VM 100 - Housekeeping: localhost data/vm-100-disk-0, keeping Snapshots for 0s
|
||||
VM 100 - Removing Snapshot localhost data/vm-100-disk-0@mirror-20230323162532 (2930293s) [rc:0]
|
||||
VM 100 - Removing Snapshot localhost data/vm-100-disk-0@mirror-20230426144911 (2076s) [rc:0]
|
||||
VM 100 - Removing Snapshot localhost data/vm-100-disk-0@mirror-20230426145632 (1637s) [rc:0]
|
||||
VM 100 - Removing Snapshot localhost data/vm-100-disk-0@mirror-20230426145859 (1492s) [rc:0]
|
||||
VM 100 - Removing Snapshot localhost data/vm-100-disk-0@mirror-20230426150224 (1290s) [rc:0]
|
||||
VM 100 - Housekeeping: pve04 data2/vm-100-disk-0-data, keeping Snapshots for 0s
|
||||
VM 100 - Removing Snapshot pve04 data2/vm-100-disk-0-data@mirror-20230426150224 (1293s) [rc:0]
|
||||
VM 100 - Disk Summary: Took 4 Seconds to transfer 4.37 MiB in a incremental run
|
||||
VM 100 - I data/vm-100-disk-1@mirror-20230426152235: e:0:00:00 c:[ 227 B/s] a:[ 227 B/s] 74.0 B
|
||||
VM 100 - Housekeeping: localhost data/vm-100-disk-1, keeping Snapshots for 0s
|
||||
VM 100 - Removing Snapshot localhost data/vm-100-disk-1@mirror-20230323162532 (2930315s) [rc:0]
|
||||
VM 100 - Removing Snapshot localhost data/vm-100-disk-1@mirror-20230426144911 (2098s) [rc:0]
|
||||
VM 100 - Removing Snapshot localhost data/vm-100-disk-1@mirror-20230426145632 (1659s) [rc:0]
|
||||
VM 100 - Removing Snapshot localhost data/vm-100-disk-1@mirror-20230426145859 (1513s) [rc:0]
|
||||
VM 100 - Removing Snapshot localhost data/vm-100-disk-1@mirror-20230426150224 (1310s) [rc:0]
|
||||
VM 100 - Housekeeping: pve04 data2/vm-100-disk-1-data, keeping Snapshots for 0s
|
||||
VM 100 - Removing Snapshot pve04 data2/vm-100-disk-1-data@mirror-20230426150224 (1313s) [rc:0]
|
||||
VM 100 - Disk Summary: Took 2 Seconds to transfer 4.37 MiB in a incremental run
|
||||
VM 100 - Unlocking destination VM 100
|
||||
VM 100 - Starting VM on pve01
|
||||
Finnished mirror 2023-04-26 15:24:25
|
||||
Job Summary: Bytes transferred 4.37 MiB for 2 Disks on 1 VMs in 00 hours 01 minutes 50 seconds
|
||||
VM Freeze OK/failed.......: 0/0
|
||||
RBD Snapshot OK/failed....: 2/0
|
||||
RBD export-full OK/failed.: 0/0
|
||||
RBD export-diff OK/failed.: 2/0
|
||||
Full xmitted..............: 0 Bytes
|
||||
Differential Bytes .......: 4.37 MiB
|
||||
```
|
||||
|
||||
## Things to check
|
||||
|
||||
From Proxmox VE Hosts you want to backup you need to be able to ssh passwordless to all other Cluster hosts, that may hold VM's or Containers. This goes for the source and for the destination Cluster.
|
||||
|
||||
41
crossover
41
crossover
@@ -433,6 +433,11 @@ function mirror() {
|
||||
map_vmids_to_host
|
||||
map_vmids_to_dsthost "$opt_destination"
|
||||
|
||||
if [ $(check_pool_exist "$opt_pool") -eq 0 ]; then
|
||||
log error "Preflight check: Destination RBD-Pool $opt_pool does not exist."
|
||||
end_process 255
|
||||
fi
|
||||
|
||||
for vm_id in $svmids; do
|
||||
(( vmcount++ ))
|
||||
local file_config; file_config=$(get_config_file)
|
||||
@@ -443,7 +448,7 @@ function mirror() {
|
||||
log info "VM $vm_id - Starting mirror for $(echowhite "$vmname")"
|
||||
srcvmgenid=$(cat $PVE_NODES/"${pvnode[$vm_id]}"/$QEMU/"$vm_id".conf | sed -e ''$restripsnapshots'' | grep vmgenid | sed -r -e 's/^vmgenid:\s(.*)/\1/')
|
||||
dstvmgenid=$(ssh $opt_destination cat $PVE_NODES/"${dstpvnode[$dvmid]}"/$QEMU/"$dvmid".conf 2>/dev/null | grep vmgenid | sed -e ''$restripsnapshots'' | sed -r -e 's/^vmgenid:\s(.*)/\1/')
|
||||
log info "VM $vm_id - Checking for VM $dvmid on Destination Host $opt_destination $QEMU_CONF_CLUSTER"
|
||||
log info "VM $vm_id - Checking for VM $dvmid on destination cluster $opt_destination $QEMU_CONF_CLUSTER"
|
||||
log debug "DVMID:$dvmid srcvmgenid:$srcvmgenid dstvmgenid:$dstvmgenid"
|
||||
conf_on_destination=$(ssh $opt_destination "ls -d $QEMU_CONF_CLUSTER/$dvmid$EXT_CONF 2>/dev/null")
|
||||
[[ "$conf_on_destination" =~ $redstconf ]]
|
||||
@@ -473,18 +478,22 @@ function mirror() {
|
||||
map_vmids_to_dsthost "$opt_destination"
|
||||
fi
|
||||
|
||||
#--move so we need to shutdown?
|
||||
#--move so we need to shutdown and remove from ha group?
|
||||
if [ $opt_migrate -eq 1 ]; then
|
||||
log info "VM $vm_id - Shutting down VM on ${pvnode[$vm_id]}"
|
||||
do_run "ssh root@"${pvnode[$vm_id]}" qm shutdown "$vm_id >/dev/null
|
||||
log info "VM $vm_id - Migration requested, shutting down VM on ${pvnode[$vm_id]}"
|
||||
if [ "$(get_ha_status "$vm_id")" == "started" ]; then
|
||||
log info "VM $vm_id - remove from HA"
|
||||
do_run "ha-manager remove $vm_id"
|
||||
fi
|
||||
do_run "ssh root@${pvnode[$vm_id]} qm shutdown $vm_id >/dev/null"
|
||||
fi
|
||||
|
||||
#Lock on source + destination
|
||||
if [ $opt_lock -eq 1 ]; then
|
||||
do_run "ssh root@"${pvnode[$vm_id]}" qm set "$vm_id" --lock backup" >/dev/null
|
||||
log info "VM $vm_id - locked $vm_id [rc:$?]"
|
||||
log info "VM $vm_id - locked $vm_id [rc:$?] on source"
|
||||
do_run "ssh root@"${dstpvnode[$dvmid]}" qm set "$dvmid" --lock backup" >/dev/null
|
||||
log info "VM $dvmid - locked $dvmid [rc:$?]"
|
||||
log info "VM $dvmid - locked $dvmid [rc:$?] on destination"
|
||||
fi
|
||||
#Freeze fs only if no migration running
|
||||
if [ $opt_migrate -eq 0 ]; then
|
||||
@@ -742,7 +751,7 @@ function rewriteconfig(){
|
||||
else
|
||||
sedcmd='sed -e /^$/,$d'
|
||||
fi
|
||||
cat "$oldconfig" | sed -r -e "s/^(virtio|ide|scsi|sata|mp)([0-9]+):\s([a-zA-Z0-9]+):(.*)-([0-9]+)-disk-([0-9]+).*,(.*)$/\1\2: $newpool:\4-$newvmid-disk-\6-\3,\7/g" | $sedcmd | sed -e '/^$/,$d' | grep -v "^parent:\s.*$" | ssh "$dst" "cat - >$newconfig"
|
||||
cat "$oldconfig" | sed -r -e "s/^(virtio|ide|scsi|sata|mp)([0-9]+):\s([a-zA-Z0-9]+):(.*)-([0-9]+)-disk-([0-9]+).*,(.*)$/\1\2: $newpool:\4-$newvmid-disk-\6-\3,\7/g" | $sedcmd | sed -e '/^$/,$d' | sed -e '/ide[0-9]:.*-cloudinit,media=cdrom.*/d' | grep -v "^parent:\s.*$" | ssh "$dst" "cat - >$newconfig"
|
||||
}
|
||||
|
||||
function checkvmid(){
|
||||
@@ -806,6 +815,24 @@ function get_image_spec(){
|
||||
echo "$image_spec"
|
||||
}
|
||||
|
||||
function get_ha_status() {
|
||||
local havmid="$1"
|
||||
ha_status=$(ha-manager status| grep vm:"$havmid" | cut -d " " -f 4| sed 's/.$//')
|
||||
echo "$ha_status"
|
||||
}
|
||||
|
||||
function check_pool_exist() {
|
||||
local poolname="$1"
|
||||
local -i exists=255
|
||||
pool_status=$(ssh $opt_destination pvesm status|grep rbd|cut -d " " -f 1|grep $poolname)
|
||||
if [ "$pool_status" == "$poolname" ]; then
|
||||
exists=1
|
||||
else
|
||||
exists=0
|
||||
fi
|
||||
echo $exists
|
||||
}
|
||||
|
||||
function main(){
|
||||
[ $# = 0 ] && usage;
|
||||
|
||||
|
||||
Reference in New Issue
Block a user