43 Commits

Author SHA1 Message Date
Bastian
5ce325beec Strip ansi color codes from syslog and mail 2023-06-13 16:29:06 +02:00
Bastian
b8d2386e69 Added Logging by mail functionality, added --mail parameter, added logfilehandling 2023-06-13 16:13:23 +02:00
Bastian
a5ea397d11 bump version 2023-06-13 14:33:20 +02:00
Bastian
36dabe9d79 fixed several linting issues 2023-06-13 14:21:06 +02:00
Bastian
284cfb6e76 Added features to README, minor wording changes 2023-04-26 15:41:13 +02:00
Bastian
5b7fd4986b Add preflight check: pool_exists 2023-03-23 16:14:04 +01:00
Bastian
41abd0429a Fix Regex to exclude cloud-init drive 2023-03-23 15:46:58 +01:00
Bastian
890567ad05 Remove vm from ha group before shutting down on migration 2023-03-23 14:10:55 +01:00
Bastian
f5441f4c0b Sanitize cloud-init drives from the config 2023-03-22 15:22:36 +01:00
Bastian
fb5b3a6d09 Merge pull request #1 from lephisto/feature-move
Add --migrate feature: near-live migrate between clusters
2023-03-22 14:42:05 +01:00
Bastian
5bf37e886c Add --migrate feature: near-live migrate between clusters 2023-03-22 14:40:01 +01:00
Bastian
010f04c412 make --vmids=num vorking with --prefixids, bump version 2022-12-06 14:20:28 +01:00
Bastian
13245fdf5e Add --jobname as mandatory parameter 2022-11-16 12:47:47 +01:00
Bastian
ae641a3927 Add colors to central logging function 2022-11-15 16:21:36 +01:00
Bastian
2f3fa5a39f Add some decent coloring 2022-11-14 16:34:55 +01:00
Bastian
7f64f6abc8 Add Helper for bash colors 2022-11-14 15:43:14 +01:00
Bastian
99f3ced23c Fix Total Byte CCounter 2022-11-04 22:52:10 +01:00
Bastian
d72e66a230 exec crossover 2022-11-04 22:38:37 +01:00
Bastian
88ccbc914f Fix locale error in human_readable() function 2022-11-04 22:37:40 +01:00
Bastian
fa953c6dbc restore crossover 2022-11-01 19:52:25 +01:00
Bastian
b60c086071 Fix counting issue, Readme 2022-11-01 19:26:56 +01:00
Bastian
21301838de Improve Performance metrics and logging 2022-11-01 17:07:08 +01:00
Bastian
3cbe62f246 Add .gitignore 2022-11-01 16:47:22 +01:00
Bastian
ccd957c6ed logging improvements, human readable stuff 2022-10-28 21:29:41 +02:00
Bastian
ce5660c186 Bump Version 2022-10-28 14:54:45 +02:00
Bastian
aecea23afd Added Documentation for updated housekeeping rules 2022-10-28 14:53:56 +02:00
Bastian
812253a7e0 Rewrite of housekeeping, timebased retention introduced 2022-10-28 13:53:07 +02:00
Bastian
f20e4c4f63 Added performance metrics, some message polishing 2022-10-28 11:35:36 +02:00
Bastian
1883342180 Unlock Source VM directly after fsfreeze 2022-10-28 10:46:06 +02:00
Bastian
ccbc40511c Added Metric InfluxDB to InfluxDB 2022-10-27 16:55:47 +02:00
Bastian
0b0bdaec33 minor changes and fixes 2022-10-27 16:51:59 +02:00
Bastian
11261d6473 remove junk 2022-10-27 13:32:25 +02:00
Bastian
4cab0a5c26 Adjust Documentation 2022-10-26 17:03:35 +02:00
Bastian
c229fbf21e Added option to regex-rewrite the VM config 2022-10-26 17:02:25 +02:00
Bastian
6c8b6d99ca Added functionality to set vmids=all to mirror all VMs on source 2022-10-26 13:47:27 +02:00
Bastian
512f7d664f Fix regression where multiple existing snaps on source prevented full copies to run. 2022-10-26 13:05:42 +02:00
Bastian
bc2f6f34fc some fix to snapshotsize calculation 2022-10-24 16:39:28 +02:00
Bastian
fd190a3622 Some magic to get a proper value for snapshot size 2022-10-24 15:10:30 +02:00
Bastian
9b29489dc8 Added estimation of Transfer times and amounts of data 2022-10-24 13:37:37 +02:00
Bastian
886512bd41 Fix confusion of PVE Pool name vs Ceph Pool name 2022-10-23 02:02:25 +02:00
Bastian
35110daf35 Fix cornercase where disks could be the same on different source pools 2022-10-22 20:10:50 +02:00
Bastian
0cee976786 Add lookup, in case pve poolname doesn't match Ceph poolname 2022-10-21 19:38:07 +02:00
Bastian
aadf7e656c Add stuff to readme 2022-10-21 18:18:24 +02:00
4 changed files with 639 additions and 141 deletions

1
.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
eve4pve-barc

190
README.md
View File

@@ -11,71 +11,86 @@ ______
| --| _| . |_ -|_ -| . | | | -_| _|
|_____|_| |___|___|___|___|\_/|___|_|
Cross Pool (live) replication and near-live migration forProxmox VE
Cross Pool (live) replication and near-live migration for Proxmox VE
Usage:
crossover <COMMAND> [ARGS] [OPTIONS]
crossover help
crossover version
crossover mirror --vmid=<string> --destination=<destionationhost> --pool=<targetpool> --keeplocal=n --keepremote=n
crossover mirror --vmid=<string> --destination=<destionationhost> --pool=<targetpool> --keeplocal=[n][d|s] --keepremote=[n][d|s]
Commands:
version Show version program
help Show help program
mirror Replicate a stopped VM to another Cluster (full clone)
Options:
--vmid The source+target ID of the VM/CT, comma separated (eg. --vmid=100:100,101:101),
--destination 'Target PVE Host in target pool. e.g. --destination=pve04
--pool 'Ceph pool name in target pool. e.g. --pool=data
--keeplocal 'How many additional Snapshots to keep locally. e.g. --keeplocal=2
--keepremote 'How many additional Snapshots to keep remote. e.g. --keepremote=2
--online 'Allow online Copy
--nolock 'Don't lock source VM on Transfer (mainly for test purposes)
--keep-slock 'Keep source VM locked on Transfer
--keep-dlock 'Keep VM locked after transfer on Destination
--overwrite 'Overwrite Destination
--protect 'Protect Ceph Snapshots
--debug 'Show Debug Output
--vmid The source+target ID of the VM, comma separated (eg. --vmid=100:100,101:101)
(The possibility to specify a different Target VMID is to not interfere with VMIDs on the
target cluster, or mark mirrored VMs on the destination)
--prefixid Prefix for VMID's on target System [optional]
--excludevmids Exclusde VM IDs when using --vmid==all
--destination Target PVE Host in target pool. e.g. --destination=pve04
--pool Ceph pool name in target pool. e.g. --pool=data
--keeplocal How many additional Snapshots to keep locally, specified in seconds or day. e.g. --keeplocal=2d
--keepremote How many additional Snapshots to keep remote, specified in seconds or day. e.g. --keepremote=7d
--rewrite PCRE Regex to rewrite the Config Files (eg. --rewrite='s/(net0:)(.*)tag=([0-9]+)/\1\2tag=1/g' would
change the VLAN tag from 5 to 1 for net0.
--influxurl Influx API url (e.g. --influxurl=https://your-influxserver.com/api/)
--influxtoken Influx API token with write permission
--influxbucket Influx Bucket to write to (e.g. --influxbucket=telegraf/autogen)
Switches:
--online Allow online Copy
--nolock Don't lock source VM on Transfer (mainly for test purposes)
--keep-slock Keep source VM locked on Transfer
--keep-dlock Keep VM locked after transfer on Destination
--overwrite Overwrite Destination
--protect Protect Ceph Snapshots
--debug Show Debug Output
Report bugs to the Github repo at https://github.com/lephisto/crossover/
```
## Introduction
When working with hyperconverges Proxmox HA Clusters you sometimes need to get VMs migrated
to another cluster, or have a cold-standby copy of a VM ready to start there. Crossover implements
functions that enable you to do the following:
When working with hyperconverged Proxmox HA Clusters you sometimes need to get VMs migrated to another cluster, or have a cold-standby copy of a VM ready to start there in case your main Datacenter goes boom. Crossover implements functionality that enables you to do the following:
- Transfer a non-running VM to another Cluster
- Transfer a running VM to another Cluster
- Continuously update a previously tranferred VM in another Cluster with incemental snapshots
Backup And Restore Ceph for Proxmox VE with retention. This solution implements
a snapshotbackup for Ceph cluster exporting to specific directory. The mechanism using
Ceph snapshot, export and export differential. In backup export image and config
file VM/CT.
Currently this only works with Ceph based storage backends, since the incremental logic heavily
relies on Rados block device features.
It'll work according this scheme:
```
.:::::::::. .:::::::::.
|Cluster-A| |Cluster-B|
| | | |
| _______ | rbd export-diff [..] | ssh pve04 | rbd import-diff [..] | _______ |
| pve01 -|-----------------------------------------------------------|->pve04 |
| _______ | | _______ |
| pve02 | | pve05 |
| _______ | | _______ |
| pve03 | | pve06 |
| _______ | | _______ |
| | | |
|:::::::::| |:::::::::|
```
## Main features
* Currently only for KVM. I might add LXC support when I need to.
* Can keep multiple backup
* Retention policy: (eg. keep x snapshots on the source and y snapshots in the destination cluster)
* Rewrites VM configurations so they match the new VMID and/or poolname on the destination
## Protected / unprotected snapshot
!TBD!
You can protect Ceph Snapshots by the according Ceph/RDB flag, to avoid accidental deletion
and thus damaging your chain. Keep in mind that Proxmox won't let you delete VM's then, because
it's not aware of that paramter.
* Secure an encrypted transfer (SSH), so it's safe to mirror between datacenter without an additional VPN
* Near live-migrate: To move a VM from one Cluster to another, make an initial copy and re-run with --migrate. This will shutdown the VM on the source cluster and start it on the destination cluster.
## Installation of prerequisites
```apt install git
```apt install git pv gawk jq
## Install the Script somewhere, eg to /opt
@@ -83,19 +98,128 @@ git clone https://github.com/lephisto/crossover/ /opt
```
## Usage
Ensure that you can freely ssh from the Node you plan to mirror _from_ to _all_ nodes in the destination cluster, as well as localhost.
Mirror VM to another Cluster:
## Continuous replication between Clusters
Example 1: Mirror VM to another Cluster:
```
root@pve01:~/crossover# ./crossover mirror --vmid=100:10100 --destination=pve04 --pool=data2 --keeplocal=4 --keepremote=8 --overwrite --keep-dlock --online
root@pve01:~/crossover# ./crossover mirror --vmid=all --prefixid=99 --excludevmids=101 --destination=pve04 --pool=data2 --overwrite --online
ACTION: Onlinemirror
Start mirror 2022-11-01 19:21:44
VM 100 - Starting mirror for testubuntu
VM 100 - Checking for VM 99100 on Destination Host pve04 /etc/pve/nodes/*/qemu-server
VM 100 - Transmitting Config for to destination pve04 VMID 99100
VM 100 - locked 100 [rc:0]
VM 99100 - locked 99100 [rc:0]
VM 100 - Creating snapshot data/vm-100-disk-0@mirror-20221101192144
VM 100 - Creating snapshot data/vm-100-disk-1@mirror-20221101192144
VM 100 - unlocked source VM 100 [rc:0]
VM 100 - I data/vm-100-disk-0@mirror-20221101192144: e:0:00:01 c:[ 227KiB/s] a:[ 227KiB/s] 372KiB
VM 100 - Housekeeping: localhost data/vm-100-disk-0, keeping Snapshots for 0s
VM 100 - Removing Snapshot localhost data/vm-100-disk-0@mirror-20221101192032 (106s) [rc:0]
VM 100 - Housekeeping: pve04 data2/vm-99100-disk-0-data, keeping Snapshots for 0s
VM 100 - Removing Snapshot pve04 data2/vm-99100-disk-0-data@mirror-20221101192032 (108s) [rc:0]
VM 100 - Disk Summary: Took 2 Seconds to transfer 372.89 KiB in a incremental run
VM 100 - I data/vm-100-disk-1@mirror-20221101192144: e:0:00:00 c:[ 346 B/s] a:[ 346 B/s] 74.0 B
VM 100 - Housekeeping: localhost data/vm-100-disk-1, keeping Snapshots for 0s
VM 100 - Removing Snapshot localhost data/vm-100-disk-1@mirror-20221101192032 (114s) [rc:0]
VM 100 - Housekeeping: pve04 data2/vm-99100-disk-1-data, keeping Snapshots for 0s
VM 100 - Removing Snapshot pve04 data2/vm-99100-disk-1-data@mirror-20221101192032 (115s) [rc:0]
VM 100 - Disk Summary: Took 1 Seconds to transfer 372.96 KiB in a incremental run
VM 99100 - Unlocking destination VM 99100
Finnished mirror 2022-11-01 19:22:30
Job Summary: Bytes transferd 2 bytes for 2 Disks on 1 VMs in 00 hours 00 minutes 46 seconds
VM Freeze OK/failed...: 1/0
RBD Snapshot OK/failed: 2/0
Full xmitted..........: 0 byte
Differential Bytes ...: 372.96 KiB
```
This example creates a mirror of VM 100 (in the source cluster) as VM 10100 (in the destination cluster) using the ceph pool "data2" for storing all attached disks. It will keep 4 Ceph snapshots prior the latest (in total 5) and 8 snapshots on the remote cluster. It will keep the VM on the target Cluster locked to avoid an accidental start (thus causing split brain issues), and will do it even if the source VM is running.
The use case is that you might want to keep a cold-standby copy of a certain VM on another Cluster. If you need to start it on the target cluster you just have to unlock it with `qm unlock VMID` there.
Another usecase could be that you want to migrate a VM from one cluster to another with the least downtime possible. Real live migration that you are used to inside one cluster is hard to achive cross-cluster, but you can easily make an initial migration while the VM is still running on the source cluster (fully transferring the block devices), shut it down on source, run the mirror process again (which is much faster now because it only needs to transfer the diff since the initial snapshot) and start it up on the target cluster. This way the migration basically takes one boot plus a few seconds for transferring the incremental snapshot.
## Near-live Migration
To minimize downtime and achive a near-live Migration from one Cluster to another it's recommended to do an initial Sync of a VM from the source to the destination cluster. After that, run the job again, and add the --migrate switch. This causes the source VM to be shut down prior snapshot + transfer, and be restarted on the destination cluster as soon as the incremental transfer is complete. Using --migrate will always try to start the VM on the destination cluster.
Example 2: Near-live migrate VM from one cluster to another (Run initial replication first, which works online, then run with --migrate to shutdown on source, incrematally copy and start on destination):
```
root@pve01:~/crossover# ./crossover mirror --jobname=migrate --vmid=100 --destination=pve04 --pool=data2 --online
ACTION: Onlinemirror
Start mirror 2023-04-26 15:02:24
VM 100 - Starting mirror for testubuntu
VM 100 - Checking for VM 100 on destination cluster pve04 /etc/pve/nodes/*/qemu-server
VM 100 - Transmitting Config for to destination pve04 VMID 100
VM 100 - locked 100 [rc:0] on source
VM 100 - locked 100 [rc:0] on destination
VM 100 - Creating snapshot data/vm-100-disk-0@mirror-20230426150224
VM 100 - Creating snapshot data/vm-100-disk-1@mirror-20230426150224
VM 100 - unlocked source VM 100 [rc:0]
VM 100 - F data/vm-100-disk-0@mirror-20230426150224: e:0:09:20 r: c:[36.6MiB/s] a:[36.6MiB/s] 20.0GiB [===============================>] 100%
VM 100 - created snapshot on 100 [rc:0]
VM 100 - Disk Summary: Took 560 Seconds to transfer 20.00 GiB in a full run
VM 100 - F data/vm-100-disk-1@mirror-20230426150224: e:0:00:40 r: c:[50.7MiB/s] a:[50.7MiB/s] 2.00GiB [===============================>] 100%
VM 100 - created snapshot on 100 [rc:0]
VM 100 - Disk Summary: Took 40 Seconds to transfer 22.00 GiB in a full run
VM 100 - Unlocking destination VM 100
Finnished mirror 2023-04-26 15:13:47
Job Summary: Bytes transferred 22.00 GiB for 2 Disks on 1 VMs in 00 hours 11 minutes 23 seconds
VM Freeze OK/failed.......: 1/0
RBD Snapshot OK/failed....: 2/0
RBD export-full OK/failed.: 2/0
RBD export-diff OK/failed.: 0/0
Full xmitted..............: 22.00 GiB
Differential Bytes .......: 0 Bytes
root@pve01:~/crossover# ./crossover mirror --jobname=migrate --vmid=100 --destination=pve04 --pool=data2 --online --migrate
ACTION: Onlinemirror
Start mirror 2023-04-26 15:22:35
VM 100 - Starting mirror for testubuntu
VM 100 - Checking for VM 100 on destination cluster pve04 /etc/pve/nodes/*/qemu-server
VM 100 - Migration requested, shutting down VM on pve01
VM 100 - locked 100 [rc:0] on source
VM 100 - locked 100 [rc:0] on destination
VM 100 - Creating snapshot data/vm-100-disk-0@mirror-20230426152235
VM 100 - Creating snapshot data/vm-100-disk-1@mirror-20230426152235
VM 100 - I data/vm-100-disk-0@mirror-20230426152235: e:0:00:03 c:[1.29MiB/s] a:[1.29MiB/s] 4.38MiB
VM 100 - Housekeeping: localhost data/vm-100-disk-0, keeping Snapshots for 0s
VM 100 - Removing Snapshot localhost data/vm-100-disk-0@mirror-20230323162532 (2930293s) [rc:0]
VM 100 - Removing Snapshot localhost data/vm-100-disk-0@mirror-20230426144911 (2076s) [rc:0]
VM 100 - Removing Snapshot localhost data/vm-100-disk-0@mirror-20230426145632 (1637s) [rc:0]
VM 100 - Removing Snapshot localhost data/vm-100-disk-0@mirror-20230426145859 (1492s) [rc:0]
VM 100 - Removing Snapshot localhost data/vm-100-disk-0@mirror-20230426150224 (1290s) [rc:0]
VM 100 - Housekeeping: pve04 data2/vm-100-disk-0-data, keeping Snapshots for 0s
VM 100 - Removing Snapshot pve04 data2/vm-100-disk-0-data@mirror-20230426150224 (1293s) [rc:0]
VM 100 - Disk Summary: Took 4 Seconds to transfer 4.37 MiB in a incremental run
VM 100 - I data/vm-100-disk-1@mirror-20230426152235: e:0:00:00 c:[ 227 B/s] a:[ 227 B/s] 74.0 B
VM 100 - Housekeeping: localhost data/vm-100-disk-1, keeping Snapshots for 0s
VM 100 - Removing Snapshot localhost data/vm-100-disk-1@mirror-20230323162532 (2930315s) [rc:0]
VM 100 - Removing Snapshot localhost data/vm-100-disk-1@mirror-20230426144911 (2098s) [rc:0]
VM 100 - Removing Snapshot localhost data/vm-100-disk-1@mirror-20230426145632 (1659s) [rc:0]
VM 100 - Removing Snapshot localhost data/vm-100-disk-1@mirror-20230426145859 (1513s) [rc:0]
VM 100 - Removing Snapshot localhost data/vm-100-disk-1@mirror-20230426150224 (1310s) [rc:0]
VM 100 - Housekeeping: pve04 data2/vm-100-disk-1-data, keeping Snapshots for 0s
VM 100 - Removing Snapshot pve04 data2/vm-100-disk-1-data@mirror-20230426150224 (1313s) [rc:0]
VM 100 - Disk Summary: Took 2 Seconds to transfer 4.37 MiB in a incremental run
VM 100 - Unlocking destination VM 100
VM 100 - Starting VM on pve01
Finnished mirror 2023-04-26 15:24:25
Job Summary: Bytes transferred 4.37 MiB for 2 Disks on 1 VMs in 00 hours 01 minutes 50 seconds
VM Freeze OK/failed.......: 0/0
RBD Snapshot OK/failed....: 2/0
RBD export-full OK/failed.: 0/0
RBD export-diff OK/failed.: 2/0
Full xmitted..............: 0 Bytes
Differential Bytes .......: 4.37 MiB
```
## Things to check
From Proxmox VE Hosts you want to backup you need to be able to ssh passwordless to all other Cluster hosts, that may hold VM's or Containers. This goes for the source and for the destination Cluster.

538
crossover
View File

@@ -1,12 +1,24 @@
#!/bin/bash
#set -x
LC_ALL="en_US.UTF-8"
source rainbow.sh
# Predefine if you want
declare opt_influx_api_url=''
declare opt_influx_token=''
declare opt_influx_bucket=''
declare opt_influx_api_org=''
declare opt_influx_jobname=''
declare opt_influx_job_metrics='crossover_xmit'
declare opt_influx_summary_metrics='crossover_jobs'
# Cross Pool Migration and incremental replication Tool for Proxmox VMs using Ceph.
# Author: Bastian Mäuser <bma@netz.org>
declare -r VERSION=0.2
declare -r NAME=$(basename "$0")
name=$(basename "$0")
declare -r NAME=$name
declare -r VERSION=0.8
declare -r PROGNAME=${NAME%.*}
declare -r PVE_DIR="/etc/pve"
@@ -14,17 +26,42 @@ declare -r PVE_NODES="$PVE_DIR/nodes"
declare -r QEMU='qemu-server'
declare -r QEMU_CONF_CLUSTER="$PVE_NODES/*/$QEMU"
declare -r EXT_CONF='.conf'
declare -r PVFORMAT_FULL='e:%t r:%e c:%r a:%a %b %p'
declare -r PVFORMAT_SNAP='e:%t c:%r a:%a %b'
declare -r LOG_FILE=$(mktemp)
logfile=$(mktemp)
declare -r LOG_FILE=$logfile
declare -A -g pvnode
declare -A -g dstpvnode
declare -A -g svmids
declare -A -g dvmids
declare -g -i perf_freeze_ok=0
declare -g -i perf_freeze_failed=0
declare -g -i perf_ss_ok=0
declare -g -i perf_ss_failed=0
declare -g -i perf_ss_ok=0
declare -g -i perf_ss_failed=0
declare -g -i perf_full_ok=0
declare -g -i perf_full_failed=0
declare -g -i perf_diff_ok=0
declare -g -i perf_diff_failed=0
declare -g -i perf_bytes_full=0
declare -g -i perf_bytes_diff=0
declare -g -i perf_bytes_total=0
declare -g -i perf_vm_running=0
declare -g -i perf_vm_stopped=0
declare -g -i perf_snaps_removed=0
declare -g -i perf_vm_total=0
declare -g -i perf_vm_ok=0
declare opt_destination
declare opt_vm_ids=''
declare opt_snapshot_prefix='mirror-'
declare opt_rewrite=''
declare -i opt_prefix_id
declare opt_exclude_vmids=''
declare -i opt_debug=0
declare -i opt_dry_run=0
declare -i opt_syslog=0
@@ -33,11 +70,16 @@ declare -i opt_keepslock=0
declare -i opt_keepdlock=0
declare -i opt_overwrite=0
declare -i opt_online=0
declare -i opt_keep_local=0
declare -i opt_keep_remote=0
declare -i opt_migrate=0
declare opt_keep_local='0s'
declare opt_keep_remote='0s'
declare -r redstconf='^\/etc\/pve\/nodes\/(.*)\/qemu-server\/([0-9]+).conf$'
declare -r recephimg='([a-zA-Z0-9]+)\:(.*)'
declare -r restripsnapshots='/^$/,$d'
declare -r redateex='^([0-9]{4})([0-9]{2})([0-9]{2})([0-9]{2})([0-9]{2})([0-9]{2})$'
declare -r restripansicolor='s/\x1b\[[0-9;]*m//g'
function usage(){
shift
@@ -47,7 +89,7 @@ function usage(){
_____
| |___ ___ ___ ___ ___ _ _ ___ ___
| --| _| . |_ -|_ -| . | | | -_| _|
|_____|_| |___|___|___|___|\_/|___|_|
|_____|_| |___|___|___|___|\_/|___|_| v$VERSION
EOF
fi
@@ -67,22 +109,32 @@ Commands:
mirror Replicate a stopped VM to another Cluster (full clone)
Options:
--vmid The source+target ID of the VM/CT, comma separated (eg. --vmid=100:100,101:101),
--destination 'Target PVE Host in target pool. e.g. --destination=pve04
--pool 'Ceph pool name in target pool. e.g. --pool=data
--keeplocal 'How many additional Snapshots to keep locally. e.g. --keeplocal=2
--keepremote 'How many additional Snapshots to keep remote. e.g. --keepremote=2
--online 'Allow online Copy
--nolock 'Don't lock source VM on Transfer (mainly for test purposes)
--keep-slock 'Keep source VM locked on Transfer
--keep-dlock 'Keep VM locked after transfer on Destination
--overwrite 'Overwrite Destination
--debug 'Show Debug Output
--vmid The source+target ID of the VM/CT, comma separated (eg. --vmid=100:100,101:101), or all for all
--prefixid Prefix for VMID's on target System [optional]
--excludevmids Exclusde VM IDs when using --vmid==all
--destination Target PVE Host in target pool. e.g. --destination=pve04
--pool Ceph pool name in target pool. e.g. --pool=data
--keeplocal How many additional Snapshots to keep locally. e.g. --keeplocal=2d
--keepremote How many additional Snapshots to keep remote. e.g. --keepremote=7d
--rewrite PCRE Regex to rewrite the Config Files (eg. --rewrite='s/(net0:)(.*)tag=([0-9]+)/\1\2tag=1/g' would
change the VLAN tag from 5 to 1 for net0.
--influxurl Influx API url (e.g. --influxurl=https://your-influxserver.com/api/)
--influxtoken Influx API token with write permission
--influxbucket Influx Bucket to write to (e.g. --influxbucket=telegraf/autogen)
--jobname Descriptive name for the job, used in Statistics
--mail Mail address to send report to, comma-seperated (e.g. --mail=admin@test.com,admin2@test.com)
Switches:
--online Allow online Copy
--migrate Stop VM on Source Cluster before final Transfer and start on destination Cluster
--nolock Don't lock source VM on Transfer (mainly for test purposes)
--keep-slock Keep source VM locked on Transfer
--keep-dlock Keep VM locked after transfer on Destination
--overwrite Overwrite Destination
--debug Show Debug Output
Report bugs to <mephisto@mephis.to>
EOF
exit 1
}
function parse_opts(){
@@ -92,7 +144,7 @@ function parse_opts(){
local args
args=$(getopt \
--options '' \
--longoptions=vmid:,destination:,pool:,keeplocal:,keepremote:,online,nolock,keep-slock,keep-dlock,overwrite,dry-run,debug \
--longoptions=vmid:,prefixid:,excludevmids:,destination:,pool:,keeplocal:,keepremote:,rewrite:,influxurl:,influxorg:,influxtoken:,influxbucket:,jobname:,mail:,online,migrate,nolock,keep-slock,keep-dlock,overwrite,dry-run,debug,syslog \
--name "$PROGNAME" \
-- "$@") \
|| end_process 128
@@ -102,17 +154,28 @@ function parse_opts(){
while true; do
case "$1" in
--vmid) opt_vm_ids=$2; shift 2;;
--prefixid) opt_prefix_id=$2; shift 2;;
--excludevmids) opt_exclude_vmids=$2; shift 2;;
--destination) opt_destination=$2; shift 2;;
--pool) opt_pool=$2; shift 2;;
--keeplocal) opt_keep_local=$2; shift 2;;
--keepremote) opt_keep_remote=$2; shift 2;;
--online) opt_online=1; shift 2;;
--rewrite) opt_rewrite=$2; shift 2;;
--influxurl) opt_influx_api_url=$2; shift 2;;
--influxorg) opt_influx_api_org=$2; shift 2;;
--influxtoken) opt_influx_token=$2; shift 2;;
--influxbucket) opt_influx_bucket=$2; shift 2;;
--jobname) opt_influx_jobname=$2; shift 2;;
--mail) opt_addr_mail="$2"; shift 2;;
--online) opt_online=1; shift ;;
--migrate) opt_migrate=1; shift ;;
--dry-run) opt_dry_run=1; shift;;
--debug) opt_debug=1; shift;;
--nolock) opt_lock=0; shift;;
--keep-slock) opt_keepslock=1; shift;;
--keep-dlock) opt_keepdlock=1; shift;;
--overwrite) opt_overwrite=1; shift;;
--syslog) opt_syslog=1; shift;;
--) shift; break;;
*) break;;
esac
@@ -121,18 +184,76 @@ function parse_opts(){
if [ $opt_debug -eq 1 ]; then
log info "============================================"
log info "Proxmox Crosspool Migration: $VERSION";
log info "pid: $(cat /var/run/"$PROGNAME".pid)"
log info "============================================"
log info "Proxmox VE Version:"
pveversion
echowhite "$(pveversion)"
log info "============================================"
fi
[ -z "$opt_vm_ids" ] && { log info "VM id is not set."; end_process 1; }
vm_ids=$(echo "$opt_vm_ids" | tr ',' "\n")
[ -z "$opt_influx_jobname" ] && { log info "Jobname is not set."; end_process 1; }
if [ -n "$opt_keep_local" ]; then
if ! [[ ${opt_keep_local:(-1)} == "s" || ${opt_keep_local:(-1)} == "d" ]]; then
echo "--keeplocal: Parameter malformed. suffix s or d missing"
end_process 255
fi
fi
if [ -n "$opt_keep_remote" ]; then
if ! [[ ${opt_keep_remote:(-1)} == "s" || ${opt_keep_remote:(-1)} == "d" ]]; then
echo "--keepremote: Parameter malformed. suffix s or d missing"
end_process 255
fi
fi
if [ $opt_keepdlock -eq 1 ] && [ $opt_migrate -eq 1 ]; then
log error "--keepdlock/--migrate: Invalid parameter Combination: you can't keep the destination locked in near-live migration mode"
end_process 255
fi
if [ "$opt_vm_ids" = "all" ]; then
local all=''
local data=''
local cnt=''
local ids=''
all=$(get_vm_ids "$QEMU_CONF_CLUSTER/*$EXT_CONF" "$LXC_CONF_CLUSTER/*$EXT_CONF")
all=$(echo "$all" | tr ',' "\n")
opt_exclude_vmids=$(echo "$opt_exclude_vmids" | tr ',' "\n")
for id in $all; do
cnt=$(echo $opt_exclude_vmids | grep -o $id|wc -w)
if [ $cnt == 0 ]; then
vm_ids=$(echo "$vm_ids$id:$opt_prefix_id$id,")
fi
done
vm_ids=$(echo "$vm_ids" | tr ',' "\n")
else
if [ ! -z $opt_prefix_id ]; then
ids=$(echo "$opt_vm_ids" | tr ',' "\n")
for id in $ids; do
vm_ids=$(echo "$vm_ids$id:$opt_prefix_id$id,")
done
vm_ids=$(echo "$vm_ids" | tr ',' "\n")
else
vm_ids=$(echo "$opt_vm_ids" | tr ',' "\n")
fi
fi
log debug "vm_ids: $vm_ids"
}
human_readable() {
b=${1:-0}; d=''; s=0; S=(Bytes {K,M,G,T,P,E,Z,Y}iB)
while ((b > 1024)); do
d="$(printf ".%02d" $((b % 1024 * 100 / 1024)))"
b=$((b / 1024))
let s++
done
echo "$b$d ${S[$s]}"
}
function map_vmids_to_host(){
@@ -163,6 +284,13 @@ function exist_file(){
done
}
function lookupcephpool() {
pvehost=$1
pvepoolname=$2
res=$(ssh $pvehost cat /etc/pve/storage.cfg | sed -n "/rbd: $pvepoolname/,/^$/p" | grep pool | cut -d " " -f 2)
echo $res
}
function get_vm_ids(){
local data=''
local conf=''
@@ -218,6 +346,7 @@ function log(){
local level=$1
shift 1
local message=$*
local syslog_msg=''
case $level in
debug)
@@ -229,26 +358,32 @@ function log(){
info)
echo -e "$message";
echo -e "$message" >> "$LOG_FILE";
[ $opt_syslog -eq 1 ] && logger -t "$PROGNAME" "$message"
echo -e "$message" | sed -e 's/\x1b\[[0-9;]*m//g' >> "$LOG_FILE";
syslog_msg=$(echo -e "$message" | sed -e ${restripansicolor})
[ $opt_syslog -eq 1 ] && logger -t "$PROGNAME" "$syslog_msg"
;;
warn)
echo "WARNING: $message" 1>&2
echo -e "$message" >> "$LOG_FILE";
[ $opt_syslog -eq 1 ] && logger -t "$PROGNAME" -p daemon.warn "$message"
echo -n "$(echoyellow 'WARNING: ')"
echowhite "$message" 1>&2
echo -e "$message" | sed -e ${restripansicolor} >> "$LOG_FILE";
syslog_msg=$(echo -e "$message" | sed -e ${restripansicolor})
[ $opt_syslog -eq 1 ] && logger -t "$PROGNAME" -p daemon.warn "$syslog_msg"
;;
error)
echo "ERROR: $message" 1>&2
echo -e "$message" >> "$LOG_FILE";
[ $opt_syslog -eq 1 ] && logger -t "$PROGNAME" -p daemon.err "$message"
echo -n "$(echored 'ERROR: ')"
echowhite "$message" 1>&2
echo -e "$message" | sed -e ${restripansicolor} >> "$LOG_FILE";
syslog_msg=$(echo -e "$message" | sed -e ${restripansicolor})
[ $opt_syslog -eq 1 ] && logger -t "$PROGNAME" -p daemon.err "$syslog_msg"
;;
*)
echo "$message" 1>&2
echo -e "$message" >> "$LOG_FILE";
[ $opt_syslog -eq 1 ] && logger -t "$PROGNAME" "$message"
echo -e "$message" | sed -e ${restripansicolor} >> "$LOG_FILE";
syslog_msg=$(echo -e "$message" | sed -e ${restripansicolor})
[ $opt_syslog -eq 1 ] && logger -t "$PROGNAME" "$syslog_msg"
;;
esac
}
@@ -267,9 +402,28 @@ function mirror() {
parse_opts "$@"
local timestamp; timestamp=$(date +%Y%m%d%H%M%S)
local xmittype
local humantime
local vmname
local -i xmitrc
local -i ssrc
local -i freezerc
local -i unfreezerc
local -i startdisk
local -i enddisk
local -i startjob
local -i endjob
local -i vmcount=0
local -i diskcount=0
log info "ACTION: Onlinemirror"
local disp_perf_freeze_failed
local disp_perf_ss_failed
local disp_perf_full_failed
local disp_perf_diff_failed
log info "ACTION: $(echowhite Onlinemirror)"
log info "Start mirror $(date "+%F %T")"
startjob=$(date +%s)
#create pid file
local pid_file="/var/run/$PROGNAME.pid"
@@ -289,25 +443,31 @@ function mirror() {
map_vmids_to_host
map_vmids_to_dsthost "$opt_destination"
if [ "$(check_pool_exist "$opt_pool")" -eq 0 ]; then
log error "Preflight check: Destination RBD-Pool $opt_pool does not exist."
end_process 255
fi
for vm_id in $svmids; do
(( vmcount++ ))
local file_config; file_config=$(get_config_file)
[ -z "$file_config" ] && continue
local disk=''
dvmid=${dvmids[$vm_id]}
srcvmgenid=$(cat $PVE_NODES/"${pvnode[$vm_id]}"/$QEMU/"$vm_id".conf|grep vmgenid|sed -r -e 's/^vmgenid:\s(.*)/\1/')
dstvmgenid=$(ssh $opt_destination cat $PVE_NODES/"${dstpvnode[$dvmid]}"/$QEMU/"$dvmid".conf 2>/dev/null|grep vmgenid|sed -r -e 's/^vmgenid:\s(.*)/\1/')
log debug "Checking for VM $dvmid on Destination Host $opt_destination $QEMU_CONF_CLUSTER"
log debug "DVMID: $dvmid"
conf_on_destination=$(ssh $opt_destination "ls -d $QEMU_CONF_CLUSTER/$dvmid$EXT_CONF 2>/dev/null")
vmname=$(cat $PVE_NODES/"${pvnode[$vm_id]}"/$QEMU/"$vm_id".conf | sed -e ''$restripsnapshots'' | grep "name\:" | cut -d' ' -f 2)
log info "VM $vm_id - Starting mirror for $(echowhite "$vmname")"
srcvmgenid=$(cat $PVE_NODES/"${pvnode[$vm_id]}"/$QEMU/"$vm_id".conf | sed -e ''$restripsnapshots'' | grep vmgenid | sed -r -e 's/^vmgenid:\s(.*)/\1/')
dstvmgenid=$(ssh "$opt_destination" cat $PVE_NODES/"${dstpvnode[$dvmid]}"/$QEMU/"$dvmid".conf 2>/dev/null | grep vmgenid | sed -e ''$restripsnapshots'' | sed -r -e 's/^vmgenid:\s(.*)/\1/')
log info "VM $vm_id - Checking for VM $dvmid on destination cluster $opt_destination $QEMU_CONF_CLUSTER"
log debug "DVMID:$dvmid srcvmgenid:$srcvmgenid dstvmgenid:$dstvmgenid"
conf_on_destination=$(ssh "$opt_destination" "ls -d $QEMU_CONF_CLUSTER/$dvmid$EXT_CONF 2>/dev/null")
[[ "$conf_on_destination" =~ $redstconf ]]
host_on_destination=${BASH_REMATCH[1]}
if [ $host_on_destination ]; then
dststatus=$(ssh root@${dstpvnode[$dvmid]} qm status $dvmid|cut -d' ' -f 2)
if [ $dststatus == "running" ]; then
log error "Destination VM is running. bailing out"
log error "VM is running on Destination. bailing out"
end_process 255
fi
fi
@@ -323,96 +483,226 @@ function mirror() {
log error "Source VM genid ($srcvmgenid) doesn't match destination VM genid ($dstvmgenid). This should not happen. Bailing out.."
end_process 255
fi
log info "Transmitting Config for VM $vm_id to desination $dvmid"
log info "VM $vm_id - Transmitting Config for to destination $opt_destination VMID $dvmid"
rewriteconfig $PVE_NODES/"${pvnode[$vm_id]}"/$QEMU/"$vm_id".conf $opt_destination "$opt_pool" $PVE_NODES/"$opt_destination"/$QEMU/"$dvmid".conf "$dvmid"
map_vmids_to_dsthost "$opt_destination"
fi
#--move so we need to shutdown and remove from ha group?
if [ $opt_migrate -eq 1 ]; then
log info "VM $vm_id - Migration requested, shutting down VM on ${pvnode[$vm_id]}"
if [ "$(get_ha_status "$vm_id")" == "started" ]; then
log info "VM $vm_id - remove from HA"
do_run "ha-manager remove $vm_id"
fi
do_run "ssh root@${pvnode[$vm_id]} qm shutdown $vm_id >/dev/null"
fi
#Lock on source + destination
if [ $opt_lock -eq 1 ]; then
ssh root@"${pvnode[$vm_id]}" qm set "$vm_id" --lock backup
ssh root@"${dstpvnode[$dvmid]}" qm set "$dvmid" --lock backup
do_run "ssh root@""${pvnode[$vm_id]}"" qm set ""$vm_id"" --lock backup" >/dev/null
log info "VM $vm_id - locked $vm_id [rc:$?] on source"
do_run "ssh root@""${dstpvnode[$dvmid]}"" qm set ""$dvmid"" --lock backup" >/dev/null
log info "VM $dvmid - locked $dvmid [rc:$?] on destination"
fi
#Freeze fs only if no migration running
if [ $opt_migrate -eq 0 ]; then
vm_freeze "$vm_id" "${pvnode[$vm_id]}" >/dev/null
freezerc=$?
if [ $freezerc -gt 0 ]; then
log warn "VM $vm_id - QEMU-Guest could not fsfreeze on guest."
(( perf_freeze_failed++ ))
else
(( perf_freeze_ok++ ))
fi
fi
#Freeze, take Rbd Snapshot then unfreeze
vm_freeze "$vm_id" "${pvnode[$vm_id]}"
for disk in $(get_disks_from_config "$file_config"); do
src_image_spec=$(get_image_spec "$disk")
create_snapshot "$src_image_spec@$opt_snapshot_prefix$timestamp"
create_snapshot "$src_image_spec@$opt_snapshot_prefix$timestamp" 2>/dev/null
ssrc=$?
if [ $ssrc -gt 0 ]; then
log warn "VM $vm_id - rbd snap failed."
(( perf_ss_failed++ ))
else
(( perf_ss_ok++ ))
fi
done
vm_unfreeze "$vm_id" "${pvnode[$vm_id]}"
if [ $opt_migrate -eq 0 ]; then
vm_unfreeze "$vm_id" "${pvnode[$vm_id]}" >/dev/null
unfreezerc=$?
if [ $unfreezerc -gt 0 ]; then
log error "VM $vm_id - QEMU-Guest could not fsunfreeze on guest."
fi
if [ ! $opt_keepslock -eq 1 ]; then
do_run "ssh root@${pvnode[$vm_id]} qm unlock $vm_id" >/dev/null
log info "VM $vm_id - unlocked source VM $vm_id [rc:$?]"
fi
fi
for disk in $(get_disks_from_config "$file_config"); do
(( diskcount++ ))
log debug "VMID: $vm_id Disk: $disk DESTVMID: $dvmid"
src_image_spec=$(get_image_spec "$disk")
[ -z "$src_image_spec" ] && continue
dst_image_spec=$(echo $src_image_spec | sed -r -e "s/([a-zA-Z0-9]+\/[a-zA-Z0-9]+\-)([0-9]+)(\-[a-zA-Z0-9]+\-[0-9]+)/\1$dvmid\3/")
dst_image_spec=$(echo $src_image_spec | sed -r -e "s/(.*\/[a-zA-Z0-9]+\-)([0-9]+)(\-[a-zA-Z0-9]+\-[0-9]+)/\1$dvmid\3/")
[ -z "$dst_image_spec" ] && continue
[[ $disk =~ $recephimg ]]
src_image_pool=${BASH_REMATCH[1]}
src_image_pool_pve=${BASH_REMATCH[1]}
src_image_pool=$(lookupcephpool "localhost" ${BASH_REMATCH[1]})
src_image_name=${BASH_REMATCH[2]}
[[ $dst_image_spec =~ ^[a-zA-Z0-9]+\/(.*)$ ]]
dst_image_name=${BASH_REMATCH[1]}
[[ $dst_image_spec =~ ^.*\/(.*)$ ]]
dst_image_name=${BASH_REMATCH[1]}-$src_image_pool_pve
dst_image_pool=$(lookupcephpool $opt_destination $opt_pool)
snapshot_name="@$opt_snapshot_prefix$timestamp"
localsnapcount=$(rbd ls -l $src_image_pool | grep $src_image_name@$opt_snapshot_prefix | cut -d ' ' -f 1|wc -l)
if [ $localsnapcount -ge 2 ]; then
# we have at least 2 local snapshots, to we can make an incremental copy
currentlocal=$(rbd ls -l $src_image_pool | grep $src_image_name@$opt_snapshot_prefix | cut -d ' ' -f 1|tail -n 1)
localts=$(rbd ls -l $src_image_pool | grep $src_image_name@$opt_snapshot_prefix | cut -d ' ' -f 1 | sed -r -e 's/.*@mirror-(.*)/\1/')
localts=$(rbd ls -l $src_image_pool | grep $src_image_name@$opt_snapshot_prefix | cut -d ' ' -f 1 | sed -r -e 's/.*@'$opt_snapshot_prefix'(.*)/\1/')
fi
latestremote=$(ssh $opt_destination rbd ls -l $opt_pool | grep $dst_image_name@$opt_snapshot_prefix | cut -d ' ' -f 1|tail -n 1)
if [ $latestremote ]; then
latestremote=$(ssh $opt_destination rbd ls -l $dst_image_pool | grep $dst_image_name@$opt_snapshot_prefix | cut -d ' ' -f 1|tail -n 1)
if [ "$latestremote" ]; then
[[ $latestremote =~ ^.*@$opt_snapshot_prefix([0-9]+)$ ]]
latestremotets=${BASH_REMATCH[1]}
for ts in $localts; do
if [ $ts == $latestremotets ]; then
if [ "$ts" == "$latestremotets" ]; then
basets=$ts
fi
done
fi
if [ -z $basets ]; then
log debug "No matching Snapshot found on destination - Full Copy $src_image_pool/$src_image_name$snapshot_name to $opt_pool/$dst_image_name"
xmitjob="rbd export --rbd-concurrent-management-ops 8 $src_image_pool/$src_image_name$snapshot_name --no-progress -|pv -r|ssh $opt_destination rbd import --image-format 2 - $opt_pool/$dst_image_name"
if [ -z "$basets" ]; then
xmittype='full'
log debug "No matching Snapshot found on destination - Full Copy $src_image_pool/$src_image_name$snapshot_name to $dst_image_pool/$dst_image_name"
#snapts=$(echo $currentlocal | sed -r -e 's/.*@mirror-(.*)/\1/')
snapshotsize=$(rbd du --pretty-format --format json $src_image_pool/$src_image_name|jq '.images[] | select (.snapshot_id == null) | {provisioned_size}.provisioned_size'|tail -1)
log debug "snapsize: $snapshotsize "
xmitjob="rbd export --rbd-concurrent-management-ops 8 $src_image_pool/$src_image_name$snapshot_name --no-progress - | tee >({ wc -c; } >/tmp/$PROGNAME.$pid.$dst_image_pool-$dst_image_name.size) | pv -s $snapshotsize -F \"VM $vm_id - F $src_image_pool/$src_image_name$snapshot_name: $PVFORMAT_FULL\" | ssh $opt_destination rbd import --image-format 2 - $dst_image_pool/$dst_image_name 2>/dev/null"
# create initial snapshot on destination
if ! do_run $xmitjob; then
log debug "xmitjob: $xmitjob"
startdisk=$(date +%s)
do_run "$xmitjob"
enddisk=$(date +%s)
xmitrc=$?
if [ ! $xmitrc ]; then
log error "Transmitting Image failed"
(( perf_full_failed++ ))
return 1
else
(( perf_full_ok++ ))
fi
cmd="ssh $opt_destination rbd snap create $opt_pool/$dst_image_name$snapshot_name"
do_run $cmd
cmd="ssh $opt_destination rbd snap create $dst_image_pool/$dst_image_name$snapshot_name"
do_run "$cmd" 2>/dev/null
log info "VM $vm_id - created snapshot on $dvmid [rc:$?]"
perf_bytes_full=$(( perf_bytes_full + $(cat /tmp/"$PROGNAME"."$pid"."$dst_image_pool"-"$dst_image_name".size) ))
else
xmittype='incremental'
log debug "Basecopy + snapshot on destination - let's just transfer the diff"
xmitjob="rbd export-diff --from-snap $opt_snapshot_prefix$basets $src_image_pool/$currentlocal - | ssh $opt_destination rbd import-diff - $opt_pool/$dst_image_name"
if ! do_run $xmitjob; then
log error "Transmitting Image failed"
return 1
log debug "sizer: rbd diff $src_image_pool/$currentlocal --from-snap $opt_snapshot_prefix$basets|gawk --bignum '{ SUM += \$2 } END { print SUM }'"
snapshotsize=$(rbd diff $src_image_pool/$currentlocal --from-snap $opt_snapshot_prefix$basets|gawk --bignum '{ SUM += $2 } END { print SUM }')
log debug "snapshotsize: $snapshotsize"
if [ -z "$snapshotsize" ]; then
#disk was not attached, or really nothing has changed..
snapshotsize=0
fi
do_housekeeping "localhost" "$src_image_pool" "$src_image_name" $opt_keep_local
do_housekeeping "$opt_destination" "$opt_pool" "$dst_image_name" $opt_keep_remote
xmitjob="rbd export-diff --no-progress --from-snap $opt_snapshot_prefix$basets $src_image_pool/$currentlocal - | tee >({ wc -c; } >/tmp/$PROGNAME.$pid.$dst_image_pool-$dst_image_name.size) | pv -F \"VM $vm_id - I $src_image_pool/$src_image_name$snapshot_name: $PVFORMAT_SNAP\" | ssh $opt_destination rbd import-diff --no-progress - $dst_image_pool/$dst_image_name"
log debug "xmitjob: $xmitjob"
startdisk=$(date +%s)
do_run "$xmitjob"
enddisk=$(date +%s)
xmitrc=$?
if [ ! $xmitrc ]; then
log error "Transmitting Image failed"
(( perf_diff_failed++ ))
return 1
else
(( perf_diff_ok++ ))
fi
do_housekeeping "localhost" "$src_image_pool" "$src_image_name" "$opt_keep_local" "$vm_id"
do_housekeeping "$opt_destination" "$dst_image_pool" "$dst_image_name" "$opt_keep_remote" "$vm_id"
perf_bytes_diff=$(( perf_bytes_diff + $(cat /tmp/"$PROGNAME"."$pid"."$dst_image_pool"-"$dst_image_name".size) ))
fi
perf_bytes_total=$(( perf_bytes_total + $(cat /tmp/"$PROGNAME"."$pid"."$dst_image_pool"-"$dst_image_name".size) ))
rm /tmp/"$PROGNAME"."$pid"."$dst_image_pool"-"$dst_image_name".size
log info "VM $vm_id - Disk Summary: Took $(( enddisk - startdisk )) Seconds to transfer $(human_readable "$perf_bytes_total" 2) in a $xmittype run"
if [ -n "$opt_influx_api_url" ]; then
log info "VM $vm_id - Logging to InfluxDB: $opt_influx_api_url"
influxlp="$opt_influx_job_metrics,vmname=$vmname,jobname=$opt_influx_jobname,destination=$opt_destination,srcimage=$src_image_name,dstimage=$dst_image_name,xmittype=$xmittype bytescalculated=$snapshotsize""i,bytesonwire=$perf_bytes_total""i,xmitrc=$xmitrc""i,freezerc=$freezerc""i,unfreezerc=$unfreezerc""i,basets=$basets""i"
log debug "InfluxLP: --->\n $influxlp"
cmd="curl --request POST \"$opt_influx_api_url/v2/write?org=$opt_influx_api_org&bucket=$opt_influx_bucket&precision=ns\" --header \"Authorization: Token $opt_influx_token\" --header \"Content-Type: text/plain; charset=utf-8\" --header \"Accept: application/json\" --data-binary '$influxlp'"
do_run "$cmd"
fi
unset basets
done
if [ ! $opt_keepslock -eq 1 ]; then
ssh root@${pvnode[$vm_id]} qm unlock $vm_id
log info "Unlocking source VM $vm_id"
fi
if [ $opt_keepdlock -eq 0 ]; then
ssh root@${dstpvnode[$dvmid]} qm unlock $dvmid
log info "Unlocking destination VM $dvmid"
log info "VM $dvmid - Unlocking destination VM $dvmid"
fi
#--migrate so start on destination?
if [ $opt_migrate -eq 1 ]; then
log info "VM $dvmid - Starting VM on ${pvnode[$vm_id]}"
do_run "ssh root@""${dstpvnode[$dvmid]}"" qm start "$dvmid >/dev/null
fi
done
endjob=$(date +%s)
log info "Finnished mirror $(date "+%F %T")"
humantime=$(date -ud "@$((endjob-startjob))" +'%H hours %M minutes %S seconds')
log info "Job Summary: Bytes transferred $(human_readable $perf_bytes_total) for $diskcount Disks on $vmcount VMs in $humantime"
if [ "$perf_freeze_failed" -gt 0 ]; then disp_perf_freeze_failed="$(echored $perf_freeze_failed)"; else disp_perf_freeze_failed="$(echogreen $perf_freeze_failed)"; fi
if [ "$perf_ss_failed" -gt 0 ]; then disp_perf_ss_failed="$(echored $perf_ss_failed)"; else disp_perf_ss_failed="$(echogreen $perf_ss_failed)"; fi
if [ "$perf_full_failed" -gt 0 ]; then disp_perf_full_failed="$(echored $perf_full_failed)"; else disp_perf_full_failed="$(echogreen $perf_full_failed)"; fi
if [ "$perf_diff_failed" -gt 0 ]; then disp_perf_diff_failed="$(echored $perf_diff_failed)"; else disp_perf_diff_failed="$(echogreen $perf_diff_failed)"; fi
log info "VM Freeze OK/failed.......: $perf_freeze_ok/$disp_perf_freeze_failed"
log info "RBD Snapshot OK/failed....: $perf_ss_ok/$disp_perf_ss_failed"
log info "RBD export-full OK/failed.: $perf_full_ok/$disp_perf_full_failed"
log info "RBD export-diff OK/failed.: $perf_diff_ok/$disp_perf_diff_failed"
log info "Full xmitted..............: $(human_readable $perf_bytes_full)"
log info "Differential Bytes .......: $(human_readable $perf_bytes_diff)"
if [ -n "$opt_influx_api_url" ]; then
log info "VM $vm_id - Logging Job summary to InfluxDB: $opt_influx_api_url"
influxlp="$opt_influx_summary_metrics,jobname=$opt_influx_jobname perf_bytes_diff=$perf_bytes_diff""i,perf_bytes_full=$perf_bytes_full""i,perf_bytes_total=$perf_bytes_total""i,perf_diff_failed=$perf_diff_failed""i,perf_diff_ok=$perf_diff_ok""i,perf_freeze_failed=$perf_freeze_failed""i,perf_freeze_ok=$perf_freeze_ok""i,perf_full_failed=$perf_full_failed""i,perf_full_ok=$perf_full_ok""i,perf_ss_failed=$perf_ss_failed""i,perf_ss_ok=$perf_ss_ok""i,perf_vm_running=$perf_vm_running""i,perf_vm_stopped=$perf_vm_stopped""i"
log debug "InfluxLP: --->\n $influxlp"
cmd="curl --request POST \"$opt_influx_api_url/v2/write?org=$opt_influx_api_org&bucket=$opt_influx_bucket&precision=ns\" --header \"Authorization: Token $opt_influx_token\" --header \"Content-Type: text/plain; charset=utf-8\" --header \"Accept: application/json\" --data-binary '$influxlp'"
do_run "$cmd"
fi
(( perf_vm_ok++ ))
end_process 0
}
function do_housekeeping(){
horst=$1
rbdpool=$2
rbdimage=$3
keep=$4
snapshotstokill=$(ssh $horst rbd ls -l $rbdpool | grep $rbdimage@$opt_snapshot_prefix | cut -d ' ' -f 1|head -n -1 |head -n -$keep)
log info "Houskeeping $horst $rbdpool $rbdimage, keeping previous $keep Snapshots"
for snap in $snapshotstokill; do
cmd="ssh $horst rbd snap rm $rbdpool/$snap"
if ! do_run $cmd; then
log error "Housekeeping failed: $cmd"
return 1
local horst=$1
local rbdpool=$2
local rbdimage=$3
local keep=$4
local vm=$5
local snap
local -i keeptime
local -i ts
local -i snapepoch
local -i age
log info "VM $vm - Housekeeping: $horst $rbdpool/$rbdimage, keeping Snapshots for $keep"
cmd="ssh $horst rbd ls -l $rbdpool | grep $rbdimage@$opt_snapshot_prefix | cut -d ' ' -f 1|head -n -1"
snapshots=$($cmd)
if [ "${keep:(-1)}" == "d" ]; then
keep=${keep%?}
keeptime=$(( $keep * 86400 ))
elif [ "${keep:(-1)}" == "s" ]; then
keep=${keep%?}
keeptime=$keep
fi
for snap in $snapshots; do
[[ $snap =~ ^.*@$opt_snapshot_prefix([0-9]+)$ ]]
ts=${BASH_REMATCH[1]}
[[ $ts =~ $redateex ]]
snapepoch=$(date --date "${BASH_REMATCH[1]}/${BASH_REMATCH[2]}/${BASH_REMATCH[3]} ${BASH_REMATCH[4]}:${BASH_REMATCH[5]}:${BASH_REMATCH[6]}" +%s)
age=$(($(date -u +"%s")-$snapepoch ))
if [ $age -gt "$keeptime" ]; then
cmd="ssh $horst rbd snap rm $rbdpool/$snap"
do_run "$cmd" 2>/dev/null
log info "VM $vm_id - Removing Snapshot $horst $rbdpool/$snap ($age""s) [rc:$?]"
if [ $rc -eq 0 ]; then
(( perf_snaps_removed++ ))
fi
fi
done
}
@@ -420,30 +710,35 @@ function do_housekeeping(){
function create_snapshot(){
local snap="$1"
log info "VM $vm_id - Creating snapshot $snap"
if ! do_run "rbd snap create $snap"; then
return 1;
fi
do_run "rbd snap create $snap"
rc=$?
log debug "create_snapshot() return $rc"
return $rc
}
function vm_freeze() {
local fvm=$1;
local fhost=$2;
status=$(ssh root@$fhost qm status $fvm|cut -d' ' -f 2)
status=$(ssh root@"$fhost" qm status "$fvm"|cut -d' ' -f 2)
if ! [[ "$status" == "running" ]]; then
log info "VM $fvm - Not running, skipping fsfreeze-freeze"
return
(( perf_vm_stopped++ ))
else
(( perf_vm_running++ ))
fi
local cmd="ssh root@$fhost /usr/sbin/qm guest cmd $fvm fsfreeze-freeze"
log info "VM $fvm - Issuing fsfreeze-freeze to $fvm on $fhost"
do_run "$cmd"
rc=$?
log debug "vm_freeze() return $rc"
return $rc
}
function vm_unfreeze() {
local fvm=$1;
local fhost=$2;
status=$(ssh root@$fhost qm status $fvm|cut -d' ' -f 2)
status=$(ssh root@"$fhost" qm status "$fvm"|cut -d' ' -f 2)
if ! [[ "$status" == "running" ]]; then
log info "VM $fvm - Not running, skipping fsfreeze-thaw"
return
@@ -453,6 +748,7 @@ function vm_unfreeze() {
do_run "$cmd"
rc=$?
log debug "vm_unfreeze() return $rc"
return $rc
}
function rewriteconfig(){
@@ -461,7 +757,13 @@ function rewriteconfig(){
local newpool=$3
local newconfig=$4
local newvmid=$5
cat "$oldconfig" | sed -r -e "s/^(virtio|ide|scsi|sata|mp)([0-9]+):\s([a-zA-Z0-9]+):(.*)-([0-9]+)-disk-([0-9]+),(.*)$/\1\2: $newpool:\4-$newvmid-disk-\6,\7/g" | ssh $dst "cat - >$newconfig"
local sedcmd
if [ ! -z "$opt_rewrite" ]; then
sedcmd='sed -r -e '$opt_rewrite
else
sedcmd='sed -e /^$/,$d'
fi
cat "$oldconfig" | sed -r -e "s/^(virtio|ide|scsi|sata|mp)([0-9]+):\s([a-zA-Z0-9]+):(.*)-([0-9]+)-disk-([0-9]+).*,(.*)$/\1\2: $newpool:\4-$newvmid-disk-\6-\3,\7/g" | $sedcmd | sed -e '/^$/,$d' | sed -e '/ide[0-9]:.*-cloudinit,media=cdrom.*/d' | grep -v "^parent:\s.*$" | ssh "$dst" "cat - >$newconfig"
}
function checkvmid(){
@@ -469,7 +771,7 @@ function checkvmid(){
local vmid=$2
cmd="ssh $dst ls -l $QEMU_CONF_CLUSTER/$vmid.conf|wc -l"
rval=$($cmd)
echo $rval
echo "$rval"
}
function do_run(){
@@ -492,23 +794,25 @@ function do_run(){
function end_process(){
local -i rc=$1;
# if ! [[ -z "$startts" && -z "$endts" ]]; then
# local -i runtime=$(expr $endts - $startts)
# local -i bps=$(expr $bytecount/$runtime)
# fi
# local subject="Ceph [VM:$vmok/$vmtotal SS:$snapshotok/$snapshottotal EX:$exportok/$exporttotal] [$(bytesToHuman "$bytecount")@$(bytesToHuman "$bps")/s]"
# [ $rc != 0 ] && subject="$subject [ERROR]"
local -i runtime
local -i bps
local -i ss_total
local subject
if ! [[ -z "$startjob" && -z "$endjob" ]]; then
runtime=$(expr $endjob - $startjob)
bps=$(expr $perf_bytes_total/$runtime)
fi
ss_total=$(expr $perf_ss_ok + $perf_ss_failed)
subject="Crossover [VM:$perf_vm_ok/$vmcount SS:$perf_ss_ok/$ss_total]"
[ $rc != 0 ] && subject="[ERROR] $subject" || subject="[OK] $subject"
#send email
# local mail;
# local mailhead="Backup $imgcount Images in $vmcount VMs (Bytes: $bytecount)"
# for mail in $(echo "$opt_addr_mail" | tr "," "\n"); do
# do_run "cat '$LOG_FILE' | mail -s '$subject' '$mail'"
# done
local mail;
for mail in $(echo "$opt_addr_mail" | tr "," "\n"); do
do_run "cat '$LOG_FILE' | mail -s '$subject' '$mail'"
done
#remove log
# rm "$LOG_FILE"
rm "$LOG_FILE"
exit "$rc";
}
@@ -525,6 +829,24 @@ function get_image_spec(){
echo "$image_spec"
}
function get_ha_status() {
local havmid="$1"
ha_status=$(ha-manager status| grep vm:"$havmid" | cut -d " " -f 4| sed 's/.$//')
echo "$ha_status"
}
function check_pool_exist() {
local poolname="$1"
local -i exists=255
pool_status=$(ssh $opt_destination pvesm status|grep rbd|cut -d " " -f 1|grep $poolname)
if [ "$pool_status" == "$poolname" ]; then
exists=1
else
exists=0
fi
echo $exists
}
function main(){
[ $# = 0 ] && usage;

51
rainbow.sh Normal file
View File

@@ -0,0 +1,51 @@
# https://github.com/xr09/rainbow.sh
# Bash helper functions to put colors on your scripts
#
# Usage example:
# vargreen=$(echogreen "Grass is green")
# echo "Coming next: $vargreen"
#
__RAINBOWPALETTE="1"
function __colortext()
{
echo -e " \e[$__RAINBOWPALETTE;$2m$1\e[0m"
}
function echogreen()
{
echo $(__colortext "$1" "32")
}
function echored()
{
echo $(__colortext "$1" "31")
}
function echoblue()
{
echo $(__colortext "$1" "34")
}
function echopurple()
{
echo $(__colortext "$1" "35")
}
function echoyellow()
{
echo $(__colortext "$1" "33")
}
function echocyan()
{
echo $(__colortext "$1" "36")
}
function echowhite()
{
echo $(__colortext "$1" "37")
}