62 Commits

Author SHA1 Message Date
Bastian
a11dd53d4d wip 2024-10-16 21:36:08 +02:00
Bastian
e3419b45ce Merge remote-tracking branch 'origin/main' into ft-uniquenames 2024-10-16 21:30:54 +02:00
Bastian
587392e06d Add TPM Disk to migration 2024-10-15 12:53:00 +02:00
Bastian Mäuser
813c54760d WIP 2024-02-26 17:25:46 +01:00
Bastian Mäuser
5588b7342e Require Unique Disk names throughout all Storage pools. 2024-02-26 17:09:13 +01:00
Bastian Mäuser
2f985df07d Added option, do specify VMs to process using Proxmox UI Tags 2024-02-26 15:32:23 +01:00
Bastian Mäuser
b7c86b0206 fix incremental ecpool 2024-02-23 12:08:50 +01:00
Bastian Mäuser
3d0babd12c Adjust Documentation 2024-02-23 11:43:00 +01:00
Bastian Mäuser
a885c9fbf9 Added Option to select ssh cipher and set decent default for it 2024-02-23 11:38:11 +01:00
root
694396255d Add support for ceph erasure-coded pools, fix a bug, when a pool was called pool 2024-02-22 17:39:36 +01:00
Bastian
8cd0472cba Fix Documentation typo 2024-01-15 12:04:50 +01:00
Bastian Mäuser
e0d1814c15 doc: Clarify on preflight checks 2023-08-09 10:56:51 +02:00
Bastian
8467bcd08e improvement: precise wording 2023-08-04 16:05:56 +02:00
Bastian
48eb3f840e fix: missing vm_id in log message 2023-08-04 15:59:07 +02:00
Bastian
514d19b9f6 added: retrieve ceph versions for compatibility checks 2023-08-04 15:54:37 +02:00
Bastian
a6e1f9342a added: support for EFI Disks 2023-08-04 15:36:18 +02:00
Bastian
59b8ab5ce2 added: default pool, feature: confirm --migrate, add: --noconfirm 2023-08-04 13:38:26 +02:00
Bastian
4bfd79e79e improved: retrieve source/destination cluster name for better insights 2023-07-13 15:18:24 +02:00
Bastian
6e8eb7ce2b fixed: preflight checks 2023-07-13 14:45:55 +02:00
Bastian
be88cb4d40 fixed: perf_vm_stopped++ never counted. 2023-07-13 13:54:20 +02:00
Bastian
1343dc6b51 fixed: Correct target host now displayed in log messsage, Add downtime counter 2023-07-13 13:51:58 +02:00
Bastian
5ce325beec Strip ansi color codes from syslog and mail 2023-06-13 16:29:06 +02:00
Bastian
b8d2386e69 Added Logging by mail functionality, added --mail parameter, added logfilehandling 2023-06-13 16:13:23 +02:00
Bastian
a5ea397d11 bump version 2023-06-13 14:33:20 +02:00
Bastian
36dabe9d79 fixed several linting issues 2023-06-13 14:21:06 +02:00
Bastian
284cfb6e76 Added features to README, minor wording changes 2023-04-26 15:41:13 +02:00
Bastian
5b7fd4986b Add preflight check: pool_exists 2023-03-23 16:14:04 +01:00
Bastian
41abd0429a Fix Regex to exclude cloud-init drive 2023-03-23 15:46:58 +01:00
Bastian
890567ad05 Remove vm from ha group before shutting down on migration 2023-03-23 14:10:55 +01:00
Bastian
f5441f4c0b Sanitize cloud-init drives from the config 2023-03-22 15:22:36 +01:00
Bastian
fb5b3a6d09 Merge pull request #1 from lephisto/feature-move
Add --migrate feature: near-live migrate between clusters
2023-03-22 14:42:05 +01:00
Bastian
5bf37e886c Add --migrate feature: near-live migrate between clusters 2023-03-22 14:40:01 +01:00
Bastian
010f04c412 make --vmids=num vorking with --prefixids, bump version 2022-12-06 14:20:28 +01:00
Bastian
13245fdf5e Add --jobname as mandatory parameter 2022-11-16 12:47:47 +01:00
Bastian
ae641a3927 Add colors to central logging function 2022-11-15 16:21:36 +01:00
Bastian
2f3fa5a39f Add some decent coloring 2022-11-14 16:34:55 +01:00
Bastian
7f64f6abc8 Add Helper for bash colors 2022-11-14 15:43:14 +01:00
Bastian
99f3ced23c Fix Total Byte CCounter 2022-11-04 22:52:10 +01:00
Bastian
d72e66a230 exec crossover 2022-11-04 22:38:37 +01:00
Bastian
88ccbc914f Fix locale error in human_readable() function 2022-11-04 22:37:40 +01:00
Bastian
fa953c6dbc restore crossover 2022-11-01 19:52:25 +01:00
Bastian
b60c086071 Fix counting issue, Readme 2022-11-01 19:26:56 +01:00
Bastian
21301838de Improve Performance metrics and logging 2022-11-01 17:07:08 +01:00
Bastian
3cbe62f246 Add .gitignore 2022-11-01 16:47:22 +01:00
Bastian
ccd957c6ed logging improvements, human readable stuff 2022-10-28 21:29:41 +02:00
Bastian
ce5660c186 Bump Version 2022-10-28 14:54:45 +02:00
Bastian
aecea23afd Added Documentation for updated housekeeping rules 2022-10-28 14:53:56 +02:00
Bastian
812253a7e0 Rewrite of housekeeping, timebased retention introduced 2022-10-28 13:53:07 +02:00
Bastian
f20e4c4f63 Added performance metrics, some message polishing 2022-10-28 11:35:36 +02:00
Bastian
1883342180 Unlock Source VM directly after fsfreeze 2022-10-28 10:46:06 +02:00
Bastian
ccbc40511c Added Metric InfluxDB to InfluxDB 2022-10-27 16:55:47 +02:00
Bastian
0b0bdaec33 minor changes and fixes 2022-10-27 16:51:59 +02:00
Bastian
11261d6473 remove junk 2022-10-27 13:32:25 +02:00
Bastian
4cab0a5c26 Adjust Documentation 2022-10-26 17:03:35 +02:00
Bastian
c229fbf21e Added option to regex-rewrite the VM config 2022-10-26 17:02:25 +02:00
Bastian
6c8b6d99ca Added functionality to set vmids=all to mirror all VMs on source 2022-10-26 13:47:27 +02:00
Bastian
512f7d664f Fix regression where multiple existing snaps on source prevented full copies to run. 2022-10-26 13:05:42 +02:00
Bastian
bc2f6f34fc some fix to snapshotsize calculation 2022-10-24 16:39:28 +02:00
Bastian
fd190a3622 Some magic to get a proper value for snapshot size 2022-10-24 15:10:30 +02:00
Bastian
9b29489dc8 Added estimation of Transfer times and amounts of data 2022-10-24 13:37:37 +02:00
Bastian
886512bd41 Fix confusion of PVE Pool name vs Ceph Pool name 2022-10-23 02:02:25 +02:00
Bastian
35110daf35 Fix cornercase where disks could be the same on different source pools 2022-10-22 20:10:50 +02:00
4 changed files with 756 additions and 189 deletions

1
.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
eve4pve-barc

203
README.md
View File

@@ -2,7 +2,7 @@
[![License](https://img.shields.io/github/license/EnterpriseVE/eve4pve-barc.svg)](https://www.gnu.org/licenses/gpl-3.0.en.html)
Cross-Pool (live) Replication and near-live migration for Proxmox VE
Cross-Pool asynchronous online-replication and near-live migration for Proxmox VE
```text
@@ -11,32 +11,43 @@ ______
| --| _| . |_ -|_ -| . | | | -_| _|
|_____|_| |___|___|___|___|\_/|___|_|
Cross Pool (live) replication and near-live migration for Proxmox VE
Cross Pool asynchronous online-replication and near-live migration for Proxmox VE
Usage:
crossover <COMMAND> [ARGS] [OPTIONS]
crossover help
crossover version
crossover mirror --vmid=<string> --destination=<destionationhost> --pool=<targetpool> --keeplocal=n --keepremote=n
crossover mirror --vmid=<string> --destination=<destionationhost> --pool=<targetpool> --keeplocal=[n][d|s] --keepremote=[n][d|s]
Commands:
version Show version program
help Show help program
mirror Replicate a stopped VM to another Cluster (full clone)
Options:
--vmid The source+target ID of the VM/CT, comma separated (eg. --vmid=100:100,101:101),
--destination 'Target PVE Host in target pool. e.g. --destination=pve04
--pool 'Ceph pool name in target pool. e.g. --pool=data
--keeplocal 'How many additional Snapshots to keep locally. e.g. --keeplocal=2
--keepremote 'How many additional Snapshots to keep remote. e.g. --keepremote=2
--online 'Allow online Copy
--nolock 'Don't lock source VM on Transfer (mainly for test purposes)
--keep-slock 'Keep source VM locked on Transfer
--keep-dlock 'Keep VM locked after transfer on Destination
--overwrite 'Overwrite Destination
--protect 'Protect Ceph Snapshots
--debug 'Show Debug Output
--sshcipher SSH Cipher to use for transfer (default: aes128-gcm@openssh.com,aes128-cbc)
--vmid The source+target ID of the VM, comma separated (eg. --vmid=100:100,101:101)
(The possibility to specify a different Target VMID is to not interfere with VMIDs on the
target cluster, or mark mirrored VMs on the destination)
--prefixid Prefix for VMID's on target System [optional]
--excludevmids Exclusde VM IDs when using --vmid==all
--destination Target PVE Host in target pool. e.g. --destination=pve04
--pool Ceph pool name in target pool. e.g. --pool=data
--keeplocal How many additional Snapshots to keep locally, specified in seconds or day. e.g. --keeplocal=2d
--keepremote How many additional Snapshots to keep remote, specified in seconds or day. e.g. --keepremote=7d
--rewrite PCRE Regex to rewrite the Config Files (eg. --rewrite='s/(net0:)(.*)tag=([0-9]+)/\1\2tag=1/g' would
change the VLAN tag from 5 to 1 for net0.
--influxurl Influx API url (e.g. --influxurl=https://your-influxserver.com/api/)
--influxtoken Influx API token with write permission
--influxbucket Influx Bucket to write to (e.g. --influxbucket=telegraf/autogen)
Switches:
--online Allow online Copy
--nolock Don't lock source VM on Transfer (mainly for test purposes)
--keep-slock Keep source VM locked on Transfer
--keep-dlock Keep VM locked after transfer on Destination
--overwrite Overwrite Destination
--protect Protect Ceph Snapshots
--debug Show Debug Output
Report bugs to the Github repo at https://github.com/lephisto/crossover/
```
@@ -75,17 +86,13 @@ It'll work according this scheme:
* Can keep multiple backup
* Retention policy: (eg. keep x snapshots on the source and y snapshots in the destination cluster)
* Rewrites VM configurations so they match the new VMID and/or poolname on the destination
## Protected / unprotected snapshot
!TBD!
You can protect Ceph Snapshots by the according Ceph/RDB flag, to avoid accidental deletion
and thus damaging your chain. Keep in mind that Proxmox won't let you delete VM's then, because
it's not aware of that paramter.
* Secure an encrypted transfer (SSH), so it's safe to mirror between datacenter without an additional VPN
* Near live-migrate: To move a VM from one Cluster to another, make an initial copy and re-run with --migrate. This will shutdown the VM on the source cluster and start it on the destination cluster.
## Installation of prerequisites
```apt install git
```
apt install git pv gawk jq curl
## Install the Script somewhere, eg to /opt
@@ -93,54 +100,141 @@ git clone https://github.com/lephisto/crossover/ /opt
```
## Usage
Ensure that you can freely ssh from the Node you plan to mirror _from_ to _all_ nodes in the destination cluster, as well as localhost.
Mirror VM to another Cluster:
## Continuous replication between Clusters
Example 1: Mirror VM to another Cluster:
```
root@pve01:~/crossover# ./crossover mirror --vmid=100:10100 --destination=pve04 --pool=data2 --keeplocal=4 --keepremote=8 --overwrite --keep-dlock --online
Start mirror 2022-10-21 18:09:36
Transmitting Config for VM 100 to desination 10100
update VM 100: -lock backup
update VM 10100: -lock backup
VM 100 - Issuing fsfreeze-freeze to 100 on pve01
2
VM 100 - Creating snapshot data/vm-100-disk-0@mirror-20221021180936
Creating snap: 100% complete...done.
VM 100 - Creating snapshot data/vm-100-disk-1@mirror-20221021180936
Creating snap: 100% complete...done.
VM 100 - Issuing fsfreeze-thaw to 100 on pve01
2
Exporting image: 100% complete...done.
Importing image diff: 100% complete...done.
Houskeeping localhost data vm-100-disk-0, keeping previous 4 Snapshots
Removing snap: 100% complete...done.
Houskeeping pve04 data2 vm-10100-disk-0, keeping previous 8 Snapshots
Exporting image: 100% complete...done.
Importing image diff: 100% complete...done.
Houskeeping localhost data vm-100-disk-1, keeping previous 4 Snapshots
Removing snap: 100% complete...done.
Houskeeping pve04 data2 vm-10100-disk-1, keeping previous 8 Snapshots
Unlocking source VM 100
root@pve01:~/crossover#
root@pve01:~/crossover# ./crossover mirror --vmid=all --prefixid=99 --excludevmids=101 --destination=pve04 --pool=data2 --overwrite --online
ACTION: Onlinemirror
Start mirror 2022-11-01 19:21:44
VM 100 - Starting mirror for testubuntu
VM 100 - Checking for VM 99100 on Destination Host pve04 /etc/pve/nodes/*/qemu-server
VM 100 - Transmitting Config for to destination pve04 VMID 99100
VM 100 - locked 100 [rc:0]
VM 99100 - locked 99100 [rc:0]
VM 100 - Creating snapshot data/vm-100-disk-0@mirror-20221101192144
VM 100 - Creating snapshot data/vm-100-disk-1@mirror-20221101192144
VM 100 - unlocked source VM 100 [rc:0]
VM 100 - I data/vm-100-disk-0@mirror-20221101192144: e:0:00:01 c:[ 227KiB/s] a:[ 227KiB/s] 372KiB
VM 100 - Housekeeping: localhost data/vm-100-disk-0, keeping Snapshots for 0s
VM 100 - Removing Snapshot localhost data/vm-100-disk-0@mirror-20221101192032 (106s) [rc:0]
VM 100 - Housekeeping: pve04 data2/vm-99100-disk-0-data, keeping Snapshots for 0s
VM 100 - Removing Snapshot pve04 data2/vm-99100-disk-0-data@mirror-20221101192032 (108s) [rc:0]
VM 100 - Disk Summary: Took 2 Seconds to transfer 372.89 KiB in a incremental run
VM 100 - I data/vm-100-disk-1@mirror-20221101192144: e:0:00:00 c:[ 346 B/s] a:[ 346 B/s] 74.0 B
VM 100 - Housekeeping: localhost data/vm-100-disk-1, keeping Snapshots for 0s
VM 100 - Removing Snapshot localhost data/vm-100-disk-1@mirror-20221101192032 (114s) [rc:0]
VM 100 - Housekeeping: pve04 data2/vm-99100-disk-1-data, keeping Snapshots for 0s
VM 100 - Removing Snapshot pve04 data2/vm-99100-disk-1-data@mirror-20221101192032 (115s) [rc:0]
VM 100 - Disk Summary: Took 1 Seconds to transfer 372.96 KiB in a incremental run
VM 99100 - Unlocking destination VM 99100
Finnished mirror 2022-11-01 19:22:30
Job Summary: Bytes transferd 2 bytes for 2 Disks on 1 VMs in 00 hours 00 minutes 46 seconds
VM Freeze OK/failed...: 1/0
RBD Snapshot OK/failed: 2/0
Full xmitted..........: 0 byte
Differential Bytes ...: 372.96 KiB
```
This example creates a mirror of VM 100 (in the source cluster) as VM 10100 (in the destination cluster) using the ceph pool "data2" for storing all attached disks. It will keep 4 Ceph snapshots prior the latest (in total 5) and 8 snapshots on the remote cluster. It will keep the VM on the target Cluster locked to avoid an accidental start (thus causing split brain issues), and will do it even if the source VM is running.
The use case is that you might want to keep a cold-standby copy of a certain VM on another Cluster. If you need to start it on the target cluster you just have to unlock it with `qm unlock VMID` there.
Another usecase could be that you want to migrate a VM from one cluster to another with the least downtime possible. Real live migration that you are used to inside one cluster is hard to achive cross-cluster, but you can easily make an initial migration while the VM is still running on the source cluster (fully transferring the block devices), shut it down on source, run the mirror process again (which is much faster now because it only needs to transfer the diff since the initial snapshot) and start it up on the target cluster. This way the migration basically takes one boot plus a few seconds for transferring the incremental snapshot.
## Near-live Migration
To minimize downtime and achive a near-live Migration from one Cluster to another it's recommended to do an initial Sync of a VM from the source to the destination cluster. After that, run the job again, and add the --migrate switch. This causes the source VM to be shut down prior snapshot + transfer, and be restarted on the destination cluster as soon as the incremental transfer is complete. Using --migrate will always try to start the VM on the destination cluster.
Example 2: Near-live migrate VM from one cluster to another (Run initial replication first, which works online, then run with --migrate to shutdown on source, incrematally copy and start on destination):
```
root@pve01:~/crossover# ./crossover mirror --jobname=migrate --vmid=100 --destination=pve04 --pool=data2 --online
ACTION: Onlinemirror
Start mirror 2023-04-26 15:02:24
VM 100 - Starting mirror for testubuntu
VM 100 - Checking for VM 100 on destination cluster pve04 /etc/pve/nodes/*/qemu-server
VM 100 - Transmitting Config for to destination pve04 VMID 100
VM 100 - locked 100 [rc:0] on source
VM 100 - locked 100 [rc:0] on destination
VM 100 - Creating snapshot data/vm-100-disk-0@mirror-20230426150224
VM 100 - Creating snapshot data/vm-100-disk-1@mirror-20230426150224
VM 100 - unlocked source VM 100 [rc:0]
VM 100 - F data/vm-100-disk-0@mirror-20230426150224: e:0:09:20 r: c:[36.6MiB/s] a:[36.6MiB/s] 20.0GiB [===============================>] 100%
VM 100 - created snapshot on 100 [rc:0]
VM 100 - Disk Summary: Took 560 Seconds to transfer 20.00 GiB in a full run
VM 100 - F data/vm-100-disk-1@mirror-20230426150224: e:0:00:40 r: c:[50.7MiB/s] a:[50.7MiB/s] 2.00GiB [===============================>] 100%
VM 100 - created snapshot on 100 [rc:0]
VM 100 - Disk Summary: Took 40 Seconds to transfer 22.00 GiB in a full run
VM 100 - Unlocking destination VM 100
Finnished mirror 2023-04-26 15:13:47
Job Summary: Bytes transferred 22.00 GiB for 2 Disks on 1 VMs in 00 hours 11 minutes 23 seconds
VM Freeze OK/failed.......: 1/0
RBD Snapshot OK/failed....: 2/0
RBD export-full OK/failed.: 2/0
RBD export-diff OK/failed.: 0/0
Full xmitted..............: 22.00 GiB
Differential Bytes .......: 0 Bytes
root@pve01:~/crossover# ./crossover mirror --jobname=migrate --vmid=100 --destination=pve04 --pool=data2 --online --migrate
ACTION: Onlinemirror
Start mirror 2023-04-26 15:22:35
VM 100 - Starting mirror for testubuntu
VM 100 - Checking for VM 100 on destination cluster pve04 /etc/pve/nodes/*/qemu-server
VM 100 - Migration requested, shutting down VM on pve01
VM 100 - locked 100 [rc:0] on source
VM 100 - locked 100 [rc:0] on destination
VM 100 - Creating snapshot data/vm-100-disk-0@mirror-20230426152235
VM 100 - Creating snapshot data/vm-100-disk-1@mirror-20230426152235
VM 100 - I data/vm-100-disk-0@mirror-20230426152235: e:0:00:03 c:[1.29MiB/s] a:[1.29MiB/s] 4.38MiB
VM 100 - Housekeeping: localhost data/vm-100-disk-0, keeping Snapshots for 0s
VM 100 - Removing Snapshot localhost data/vm-100-disk-0@mirror-20230323162532 (2930293s) [rc:0]
VM 100 - Removing Snapshot localhost data/vm-100-disk-0@mirror-20230426144911 (2076s) [rc:0]
VM 100 - Removing Snapshot localhost data/vm-100-disk-0@mirror-20230426145632 (1637s) [rc:0]
VM 100 - Removing Snapshot localhost data/vm-100-disk-0@mirror-20230426145859 (1492s) [rc:0]
VM 100 - Removing Snapshot localhost data/vm-100-disk-0@mirror-20230426150224 (1290s) [rc:0]
VM 100 - Housekeeping: pve04 data2/vm-100-disk-0-data, keeping Snapshots for 0s
VM 100 - Removing Snapshot pve04 data2/vm-100-disk-0-data@mirror-20230426150224 (1293s) [rc:0]
VM 100 - Disk Summary: Took 4 Seconds to transfer 4.37 MiB in a incremental run
VM 100 - I data/vm-100-disk-1@mirror-20230426152235: e:0:00:00 c:[ 227 B/s] a:[ 227 B/s] 74.0 B
VM 100 - Housekeeping: localhost data/vm-100-disk-1, keeping Snapshots for 0s
VM 100 - Removing Snapshot localhost data/vm-100-disk-1@mirror-20230323162532 (2930315s) [rc:0]
VM 100 - Removing Snapshot localhost data/vm-100-disk-1@mirror-20230426144911 (2098s) [rc:0]
VM 100 - Removing Snapshot localhost data/vm-100-disk-1@mirror-20230426145632 (1659s) [rc:0]
VM 100 - Removing Snapshot localhost data/vm-100-disk-1@mirror-20230426145859 (1513s) [rc:0]
VM 100 - Removing Snapshot localhost data/vm-100-disk-1@mirror-20230426150224 (1310s) [rc:0]
VM 100 - Housekeeping: pve04 data2/vm-100-disk-1-data, keeping Snapshots for 0s
VM 100 - Removing Snapshot pve04 data2/vm-100-disk-1-data@mirror-20230426150224 (1313s) [rc:0]
VM 100 - Disk Summary: Took 2 Seconds to transfer 4.37 MiB in a incremental run
VM 100 - Unlocking destination VM 100
VM 100 - Starting VM on pve01
Finnished mirror 2023-04-26 15:24:25
Job Summary: Bytes transferred 4.37 MiB for 2 Disks on 1 VMs in 00 hours 01 minutes 50 seconds
VM Freeze OK/failed.......: 0/0
RBD Snapshot OK/failed....: 2/0
RBD export-full OK/failed.: 0/0
RBD export-diff OK/failed.: 2/0
Full xmitted..............: 0 Bytes
Differential Bytes .......: 4.37 MiB
```
## Things to check
From Proxmox VE Hosts you want to backup you need to be able to ssh passwordless to all other Cluster hosts, that may hold VM's or Containers. This goes for the source and for the destination Cluster.
From Proxmox VE Hosts you want to backup you need to be able to ssh passwordless to all other Cluster hosts, that may hold VM's or Containers. This goes for the source and for the destination Cluster. Doublecheck this.
This is required for using the free/unfreeze and the lock/unlock function, which has to be called locally from that Host the guest is currently running on. Usually this works out of the box for the source cluster, but you may want to make sure that you can "ssh root@pvehost1...n" from every host to every other host in the cluster.
For the Destination Cluster you need to copy your ssh-key to the first host in the cluster, and login once to every node
in your cluster.
For the Destination Cluster you need to copy your ssh-key to the first host in the cluster, and login once to every node in your cluster.
Currently preflight checks don't include the check for enough resources in the destination cluster. Check beforehand that you don't exceed the maximum safe size of ceph in the destination cluster.
## Unique Disk names
There are cases, when the Source VM has Disks on different ceph pools. Now, in theory you can have identical image names for different disks. Since all disk images are migrated to one destination pool, they need to be unique. This tool detects this in Preflight checks, and skips these VMs and issues a warning. To solve this, give them unique names, like vm-100-disk-0, vm,100-disk-1 and so on. `rbd mv` will help you.
## Some words about Snapshot consistency and what qemu-guest-agent can do for you
@@ -283,3 +377,4 @@ Ceph Documentation:
Proxmox Wiki:
https://pve.proxmox.com/wiki/

684
crossover
View File

@@ -1,30 +1,77 @@
#!/bin/bash
#set -x
# Cross Pool Migration and incremental replication Tool for Proxmox VMs using Ceph.
# Author: Bastian Mäuser <bma@netz.org>
declare -r VERSION=0.3
declare -r NAME=$(basename "$0")
declare -r PROGNAME=${NAME%.*}
LC_ALL="en_US.UTF-8"
source rainbow.sh
# Predefine if you want
declare opt_influx_api_url=''
declare opt_influx_token=''
declare opt_influx_bucket=''
declare opt_influx_api_org=''
declare opt_influx_jobname=''
declare opt_influx_job_metrics='crossover_xmit'
declare opt_influx_summary_metrics='crossover_jobs'
name=$(basename "$0")
# readonly variables
declare -r NAME=$name
declare -r VERSION=0.9
declare -r PROGNAME=${NAME%.*}
declare -r PVE_DIR="/etc/pve"
declare -r PVE_NODES="$PVE_DIR/nodes"
declare -r QEMU='qemu-server'
declare -r QEMU_CONF_CLUSTER="$PVE_NODES/*/$QEMU"
declare -r EXT_CONF='.conf'
declare -r PVFORMAT_FULL='e:%t r:%e c:%r a:%a %b %p'
declare -r PVFORMAT_SNAP='e:%t c:%r a:%a %b'
logfile=$(mktemp)
declare -r LOG_FILE=$logfile
declare -r LOG_FILE=$(mktemp)
# associative global arrays
declare -A -g pvnode
declare -A -g dstpvnode
declare -A -g svmids
declare -A -g dvmids
declare -g scluster
declare -g dcluster
declare -g scephversion
declare -g dcephversion
# global integers
declare -g -i perf_freeze_ok=0
declare -g -i perf_freeze_failed=0
declare -g -i perf_ss_ok=0
declare -g -i perf_ss_failed=0
declare -g -i perf_ss_ok=0
declare -g -i perf_ss_failed=0
declare -g -i perf_full_ok=0
declare -g -i perf_full_failed=0
declare -g -i perf_diff_ok=0
declare -g -i perf_diff_failed=0
declare -g -i perf_bytes_full=0
declare -g -i perf_bytes_diff=0
declare -g -i perf_bytes_total=0
declare -g -i perf_vm_running=0
declare -g -i perf_vm_stopped=0
declare -g -i perf_snaps_removed=0
declare -g -i perf_vm_total=0
declare -g -i perf_vm_ok=0
# commandline parameters
declare opt_destination
declare opt_vm_ids=''
declare opt_snapshot_prefix='mirror-'
declare opt_rewrite=''
declare opt_pool='rbd'
declare opt_sshcipher='aes128-gcm@openssh.com,aes128-cbc'
declare opt_tag=''
declare -i opt_prefix_id
declare opt_exclude_vmids=''
declare -i opt_debug=0
declare -i opt_dry_run=0
declare -i opt_syslog=0
@@ -33,11 +80,17 @@ declare -i opt_keepslock=0
declare -i opt_keepdlock=0
declare -i opt_overwrite=0
declare -i opt_online=0
declare -i opt_keep_local=0
declare -i opt_keep_remote=0
declare -i opt_migrate=0
declare -i opt_noconfirm=0
declare opt_keep_local='0s'
declare opt_keep_remote='0s'
declare -r redstconf='^\/etc\/pve\/nodes\/(.*)\/qemu-server\/([0-9]+).conf$'
declare -r recephimg='([a-zA-Z0-9]+)\:(.*)'
declare -r restripsnapshots='/^$/,$d'
declare -r redateex='^([0-9]{4})([0-9]{2})([0-9]{2})([0-9]{2})([0-9]{2})([0-9]{2})$'
declare -r restripansicolor='s/\x1b\[[0-9;]*m//g'
function usage(){
shift
@@ -47,7 +100,7 @@ function usage(){
_____
| |___ ___ ___ ___ ___ _ _ ___ ___
| --| _| . |_ -|_ -| . | | | -_| _|
|_____|_| |___|___|___|___|\_/|___|_|
|_____|_| |___|___|___|___|\_/|___|_| v$VERSION
EOF
fi
@@ -67,22 +120,35 @@ Commands:
mirror Replicate a stopped VM to another Cluster (full clone)
Options:
--vmid The source+target ID of the VM/CT, comma separated (eg. --vmid=100:100,101:101),
--destination 'Target PVE Host in target pool. e.g. --destination=pve04
--pool 'Ceph pool name in target pool. e.g. --pool=data
--keeplocal 'How many additional Snapshots to keep locally. e.g. --keeplocal=2
--keepremote 'How many additional Snapshots to keep remote. e.g. --keepremote=2
--online 'Allow online Copy
--nolock 'Don't lock source VM on Transfer (mainly for test purposes)
--keep-slock 'Keep source VM locked on Transfer
--keep-dlock 'Keep VM locked after transfer on Destination
--overwrite 'Overwrite Destination
--debug 'Show Debug Output
--sshcipher SSH Cipher to use for transfer (default: aes128-gcm@openssh.com,aes128-cbc)
--tag Include all VMs with a specific tag set in the Proxmox UI (if set, implies vmid=all)
--vmid The source+target ID of the VM/CT, comma separated (eg. --vmid=100:100,101:101), or all for all
--prefixid Prefix for VMID's on target System [optional]
--excludevmids Exclusde VM IDs when using --vmid==all
--destination Target PVE Host in target pool. e.g. --destination=pve04
--pool Ceph pool name in target pool. e.g. --pool=data [default=rbd]
--keeplocal How many additional Snapshots to keep locally. e.g. --keeplocal=2d
--keepremote How many additional Snapshots to keep remote. e.g. --keepremote=7d
--rewrite PCRE Regex to rewrite the Config Files (eg. --rewrite='s/(net0:)(.*)tag=([0-9]+)/\1\2tag=1/g' would
change the VLAN tag from 5 to 1 for net0.
--influxurl Influx API url (e.g. --influxurl=https://your-influxserver.com/api/)
--influxtoken Influx API token with write permission
--influxbucket Influx Bucket to write to (e.g. --influxbucket=telegraf/autogen)
--jobname Descriptive name for the job, used in Statistics
--mail Mail address to send report to, comma-seperated (e.g. --mail=admin@test.com,admin2@test.com)
Switches:
--online Allow online Copy
--migrate Stop VM on Source Cluster before final Transfer and start on destination Cluster
--nolock Don't lock source VM on Transfer (mainly for test purposes)
--keep-slock Keep source VM locked on Transfer
--keep-dlock Keep VM locked after transfer on Destination
--overwrite Overwrite Destination
--noconfirm Don't ask for confirmation before starting --migrate mode (use with care!)
--debug Show Debug Output
Report bugs to <mephisto@mephis.to>
EOF
exit 1
}
function parse_opts(){
@@ -92,7 +158,7 @@ function parse_opts(){
local args
args=$(getopt \
--options '' \
--longoptions=vmid:,destination:,pool:,keeplocal:,keepremote:,online,nolock,keep-slock,keep-dlock,overwrite,dry-run,debug \
--longoptions=sshcipher:,tag:,vmid:,prefixid:,excludevmids:,destination:,pool:,keeplocal:,keepremote:,rewrite:,influxurl:,influxorg:,influxtoken:,influxbucket:,jobname:,mail:,online,migrate,nolock,keep-slock,keep-dlock,overwrite,dry-run,noconfirm,debug,syslog \
--name "$PROGNAME" \
-- "$@") \
|| end_process 128
@@ -101,18 +167,32 @@ function parse_opts(){
while true; do
case "$1" in
--sshcipher) opt_sshcipher=$2; shift 2;;
--tag) opt_tag=$2; shift 2;;
--vmid) opt_vm_ids=$2; shift 2;;
--prefixid) opt_prefix_id=$2; shift 2;;
--excludevmids) opt_exclude_vmids=$2; shift 2;;
--destination) opt_destination=$2; shift 2;;
--pool) opt_pool=$2; shift 2;;
--keeplocal) opt_keep_local=$2; shift 2;;
--keepremote) opt_keep_remote=$2; shift 2;;
--online) opt_online=1; shift 2;;
--rewrite) opt_rewrite=$2; shift 2;;
--influxurl) opt_influx_api_url=$2; shift 2;;
--influxorg) opt_influx_api_org=$2; shift 2;;
--influxtoken) opt_influx_token=$2; shift 2;;
--influxbucket) opt_influx_bucket=$2; shift 2;;
--jobname) opt_influx_jobname=$2; shift 2;;
--mail) opt_addr_mail="$2"; shift 2;;
--online) opt_online=1; shift ;;
--migrate) opt_migrate=1; shift ;;
--dry-run) opt_dry_run=1; shift;;
--noconfirm) opt_noconfirm=1; shift;;
--debug) opt_debug=1; shift;;
--nolock) opt_lock=0; shift;;
--keep-slock) opt_keepslock=1; shift;;
--keep-dlock) opt_keepdlock=1; shift;;
--overwrite) opt_overwrite=1; shift;;
--syslog) opt_syslog=1; shift;;
--) shift; break;;
*) break;;
esac
@@ -121,18 +201,85 @@ function parse_opts(){
if [ $opt_debug -eq 1 ]; then
log info "============================================"
log info "Proxmox Crosspool Migration: $VERSION";
log info "pid: $(cat /var/run/"$PROGNAME".pid)"
log info "============================================"
log info "Proxmox VE Version:"
pveversion
echowhite "$(pveversion)"
log info "============================================"
fi
[ -z "$opt_influx_jobname" ] && { log info "Jobname is not set."; end_process 1; }
if [ -n "$opt_keep_local" ]; then
if ! [[ ${opt_keep_local:(-1)} == "s" || ${opt_keep_local:(-1)} == "d" ]]; then
echo "--keeplocal: Parameter malformed. suffix s or d missing"
end_process 255
fi
fi
if [ -n "$opt_keep_remote" ]; then
if ! [[ ${opt_keep_remote:(-1)} == "s" || ${opt_keep_remote:(-1)} == "d" ]]; then
echo "--keepremote: Parameter malformed. suffix s or d missing"
end_process 255
fi
fi
if [ $opt_keepdlock -eq 1 ] && [ $opt_migrate -eq 1 ]; then
log error "--keepdlock/--migrate: Invalid parameter Combination: you can't keep the destination locked in near-live migration mode"
end_process 255
fi
if [ -n "$opt_tag" ] && [ -n "$opt_vm_ids" ] && [ "$opt_vm_ids" != "all" ]; then
log error "You can't use --tag and --vmid at the same time"
end_process 255
fi
[ -n "$opt_tag" ] && [ -z $opt_vm_ids ] && opt_vm_ids="all"
[ -z "$opt_vm_ids" ] && { log info "VM id is not set."; end_process 1; }
vm_ids=$(echo "$opt_vm_ids" | tr ',' "\n")
if [ "$opt_vm_ids" = "all" ]; then
local all=''
local data=''
local cnt=''
local ids=''
all=$(get_vm_ids "$QEMU_CONF_CLUSTER/*$EXT_CONF" "$LXC_CONF_CLUSTER/*$EXT_CONF")
log debug "all: $all"
all=$(echo "$all" | tr ',' "\n")
opt_exclude_vmids=$(echo "$opt_exclude_vmids" | tr ',' "\n")
for id in $all; do
cnt=$(echo $opt_exclude_vmids | grep -o $id|wc -w)
if [ $cnt == 0 ]; then
vm_ids=$(echo "$vm_ids$id:$opt_prefix_id$id,")
fi
done
vm_ids=$(echo "$vm_ids" | tr ',' "\n")
else
if [ ! -z $opt_prefix_id ]; then
ids=$(echo "$opt_vm_ids" | tr ',' "\n")
for id in $ids; do
vm_ids=$(echo "$vm_ids$id:$opt_prefix_id$id,")
done
vm_ids=$(echo "$vm_ids" | tr ',' "\n")
else
vm_ids=$(echo "$opt_vm_ids" | tr ',' "\n")
fi
fi
}
human_readable() {
b=${1:-0}; d=''; s=0; S=(Bytes {K,M,G,T,P,E,Z,Y}iB)
while ((b > 1024)); do
d="$(printf ".%02d" $((b % 1024 * 100 / 1024)))"
b=$((b / 1024))
let s++
done
echo "$b$d ${S[$s]}"
}
function map_vmids_to_host(){
@@ -166,7 +313,14 @@ function exist_file(){
function lookupcephpool() {
pvehost=$1
pvepoolname=$2
res=$(ssh $pvehost cat /etc/pve/storage.cfg | sed -n "/rbd: $pvepoolname/,/^$/p" | grep pool | cut -d " " -f 2)
res=$(ssh $pvehost cat /etc/pve/storage.cfg | sed -n "/rbd: $pvepoolname/,/^$/p" | grep -E "\s+pool\s" | cut -d " " -f 2)
echo $res
}
function lookupdatapool() {
pvehost=$1
pvepoolname=$2
res=$(ssh $pvehost cat /etc/pve/storage.cfg | sed -n "/rbd: $pvepoolname/,/^$/p" | grep -E "\s+data-pool\s" | cut -d " " -f 2)
echo $res
}
@@ -177,7 +331,9 @@ function get_vm_ids(){
while [ $# -gt 0 ]; do
for conf in $1; do
[ ! -e "$conf" ] && break
if [ -n "$opt_tag" ] && ! grep -qE "^tags:\s.*$opt_tag(;|$)" $conf; then
continue
fi
conf=$(basename "$conf")
[ "$data" != '' ] && data="$data,"
data="$data${conf%.*}"
@@ -188,20 +344,6 @@ function get_vm_ids(){
echo "$data"
}
function get_config_file(){
local file_config=''
if exist_file "$QEMU_CONF_CLUSTER/$vm_id$EXT_CONF"; then
file_config=$(ls $QEMU_CONF_CLUSTER/$vm_id$EXT_CONF)
else
log error "VM $vm_id - Unknown technology or VMID not found: $QEMU_CONF_CLUSTER/$vm_id$EXT_CONF"
end_process 128
fi
echo "$file_config"
}
function get_disks_from_config(){
local disks;
local file_config=$1
@@ -213,7 +355,7 @@ function get_disks_from_config(){
[[ "$line" == "" ]] && break
echo "$line"
done < "$file_config" | \
grep -P '^(?:((?:virtio|ide|scsi|sata|mp)\d+)|rootfs): ' | \
grep -P '^(?:((?:efidisk|virtio|ide|scsi|sata|mp|tpmstate)\d+)|rootfs): ' | \
grep -v -P 'cdrom|none' | \
grep -v -P 'backup=0' | \
awk '{ split($0,a,","); split(a[1],b," "); print b[2]}')
@@ -221,10 +363,35 @@ function get_disks_from_config(){
echo "$disks"
}
function check_unique_disk_config() {
local file_config=$1
disks=$(while read -r line; do
[[ "$line" == "" ]] && break
echo "$line"
done < "$file_config" | \
grep -P '^(?:((?:efidisk|virtio|ide|scsi|sata|mp|tpmstate)\d+)|rootfs): ' | \
grep -v -P 'cdrom|none' | \
grep -v -P 'backup=0' | \
awk '{ split($0,a,","); split(a[1],b," "); print b[2]}'| wc -l)
echo disks
uniquedisks=$(while read -r line; do
[[ "$line" == "" ]] && break
echo "$line"
done < "$file_config" | \
grep -P '^(?:((?:efidisk|virtio|ide|scsi|sata|mp|tpmstate)\d+)|rootfs): ' | \
grep -v -P 'cdrom|none' | \
grep -v -P 'backup=0' | \
awk '{ split($0,a,","); split(a[1],b," "); print b[2]}'|cut -d ':' -f 2 | sort -nr | uniq | wc -l)
# TBD: ^(vm|ct)-([0-9]+)-([a-z]+)-[\d]+.*$
difference=$(expr $disks - $uniquedisks)
echo "$difference"
}
function log(){
local level=$1
shift 1
local message=$*
local syslog_msg=''
case $level in
debug)
@@ -236,26 +403,32 @@ function log(){
info)
echo -e "$message";
echo -e "$message" >> "$LOG_FILE";
[ $opt_syslog -eq 1 ] && logger -t "$PROGNAME" "$message"
echo -e "$message" | sed -e 's/\x1b\[[0-9;]*m//g' >> "$LOG_FILE";
syslog_msg=$(echo -e "$message" | sed -e ${restripansicolor})
[ $opt_syslog -eq 1 ] && logger -t "$PROGNAME" "$syslog_msg"
;;
warn)
echo "WARNING: $message" 1>&2
echo -e "$message" >> "$LOG_FILE";
[ $opt_syslog -eq 1 ] && logger -t "$PROGNAME" -p daemon.warn "$message"
echo -n "$(echoyellow 'WARNING: ')"
echowhite "$message" 1>&2
echo -e "$message" | sed -e ${restripansicolor} >> "$LOG_FILE";
syslog_msg=$(echo -e "$message" | sed -e ${restripansicolor})
[ $opt_syslog -eq 1 ] && logger -t "$PROGNAME" -p daemon.warn "$syslog_msg"
;;
error)
echo "ERROR: $message" 1>&2
echo -e "$message" >> "$LOG_FILE";
[ $opt_syslog -eq 1 ] && logger -t "$PROGNAME" -p daemon.err "$message"
echo -n "$(echored 'ERROR: ')"
echowhite "$message" 1>&2
echo -e "$message" | sed -e ${restripansicolor} >> "$LOG_FILE";
syslog_msg=$(echo -e "$message" | sed -e ${restripansicolor})
[ $opt_syslog -eq 1 ] && logger -t "$PROGNAME" -p daemon.err "$syslog_msg"
;;
*)
echo "$message" 1>&2
echo -e "$message" >> "$LOG_FILE";
[ $opt_syslog -eq 1 ] && logger -t "$PROGNAME" "$message"
echo -e "$message" | sed -e ${restripansicolor} >> "$LOG_FILE";
syslog_msg=$(echo -e "$message" | sed -e ${restripansicolor})
[ $opt_syslog -eq 1 ] && logger -t "$PROGNAME" "$syslog_msg"
;;
esac
}
@@ -274,9 +447,36 @@ function mirror() {
parse_opts "$@"
local timestamp; timestamp=$(date +%Y%m%d%H%M%S)
local xmittype
local humantime
local vmname
local -i xmitrc
local -i ssrc
local -i freezerc
local -i unfreezerc
local -i startdisk
local -i enddisk
local -i startjob
local -i endjob
local -i vmcount=0
local -i diskcount=0
local -i vmdiskcount=0
local -i skipped_vm_count=0
local -i startdowntime
local -i enddowntime
local -i ga_ping
log info "ACTION: Onlinemirror"
local disp_perf_freeze_failed
local disp_perf_ss_failed
local disp_perf_full_failed
local disp_perf_diff_failed
log info "ACTION: $(echowhite Onlinemirror)"
log info "Start mirror $(date "+%F %T")"
startjob=$(date +%s)
get_ceph_version
log info "Local Ceph Version: $scephversion, Remote Ceph Version: $dcephversion"
#create pid file
local pid_file="/var/run/$PROGNAME.pid"
@@ -292,29 +492,69 @@ function mirror() {
end_process 1
fi
scluster=$(grep cluster_name /etc/pve/corosync.conf | cut -d " " -f 4)
dcluster=$(ssh "$opt_destination" grep cluster_name /etc/pve/corosync.conf | cut -d " " -f 4)
if [ $opt_migrate -eq 1 ] && [ $opt_noconfirm -eq 0 ]; then
echo "VM(s) $opt_vm_ids will subsequently be shutdown on [$scluster] and started on [$dcluster]"
read -p "Do you want to proceed? (yes/no) " yn
case $yn in
yes ) echo ok, we will proceed;;
no ) echo exiting...;
exit;;
* ) echo invalid response;
exit 1;;
esac
fi
map_source_to_destination_vmid
map_vmids_to_host
map_vmids_to_dsthost "$opt_destination"
if [ "$(check_pool_exist "$opt_pool")" -eq 0 ]; then
log error "Preflight check: Destination RBD-Pool $opt_pool does not exist."
end_process 255
fi
for vm_id in $svmids; do
local file_config; file_config=$(get_config_file)
[ -z "$file_config" ] && continue
file_config="$PVE_NODES/${pvnode[$vm_id]}/$QEMU/$vm_id.conf"
check_unique_disk_config "$file_config"
end_process 255
if [[ $(check_unique_disk_config "$file_config") -ge 1 ]]; then
log error "VM $vm_id - Preflight check: VM $vm_id has duplicate disk entries - skipping to next VM. Check Documentation to learn how to avoid this."
(( skipped_vm_count++ ))
continue
fi
if ! exist_file "$file_config"; then
log error "VM $vm_id - Preflight check: VM $vm_id does not exist on source cluster [$scluster] - skipping to next VM."
(( skipped_vm_count++ ))
continue
fi
ga_ping=$(gaping "$vm_id")
log debug "ga_ping: $ga_ping"
if [ "$ga_ping" -eq 255 ] ; then #vm running but no qemu-guest-agent answering
log error "VM $vm_id - Preflight check: VM $vm_id on source cluster [$scluster] has no qemu-guest-agent running - skipping to next VM."
(( skipped_vm_count++ ))
continue
fi
(( vmcount++ ))
local disk=''
dvmid=${dvmids[$vm_id]}
srcvmgenid=$(cat $PVE_NODES/"${pvnode[$vm_id]}"/$QEMU/"$vm_id".conf|grep vmgenid|sed -r -e 's/^vmgenid:\s(.*)/\1/')
dstvmgenid=$(ssh $opt_destination cat $PVE_NODES/"${dstpvnode[$dvmid]}"/$QEMU/"$dvmid".conf 2>/dev/null|grep vmgenid|sed -r -e 's/^vmgenid:\s(.*)/\1/')
log debug "Checking for VM $dvmid on Destination Host $opt_destination $QEMU_CONF_CLUSTER"
log debug "DVMID: $dvmid"
conf_on_destination=$(ssh $opt_destination "ls -d $QEMU_CONF_CLUSTER/$dvmid$EXT_CONF 2>/dev/null")
vmname=$(cat $PVE_NODES/"${pvnode[$vm_id]}"/$QEMU/"$vm_id".conf | sed -e ''$restripsnapshots'' | grep "name\:" | cut -d' ' -f 2)
log info "VM $vm_id - Starting mirror for $(echowhite "$vmname")"
srcvmgenid=$(cat $PVE_NODES/"${pvnode[$vm_id]}"/$QEMU/"$vm_id".conf | sed -e ''$restripsnapshots'' | grep vmgenid | sed -r -e 's/^vmgenid:\s(.*)/\1/')
dstvmgenid=$(ssh "$opt_destination" cat $PVE_NODES/"${dstpvnode[$dvmid]}"/$QEMU/"$dvmid".conf 2>/dev/null | grep vmgenid | sed -e ''$restripsnapshots'' | sed -r -e 's/^vmgenid:\s(.*)/\1/')
log info "VM $vm_id - Checking for VM $dvmid on destination cluster $opt_destination $QEMU_CONF_CLUSTER"
log debug "DVMID:$dvmid srcvmgenid:$srcvmgenid dstvmgenid:$dstvmgenid"
conf_on_destination=$(ssh "$opt_destination" "ls -d $QEMU_CONF_CLUSTER/$dvmid$EXT_CONF 2>/dev/null")
[[ "$conf_on_destination" =~ $redstconf ]]
host_on_destination=${BASH_REMATCH[1]}
if [ $host_on_destination ]; then
dststatus=$(ssh root@${dstpvnode[$dvmid]} qm status $dvmid|cut -d' ' -f 2)
if [ $dststatus == "running" ]; then
log error "Destination VM is running. bailing out"
log error "VM is running on Destination Cluster [$dcluster]. bailing out"
end_process 255
fi
fi
@@ -330,134 +570,282 @@ function mirror() {
log error "Source VM genid ($srcvmgenid) doesn't match destination VM genid ($dstvmgenid). This should not happen. Bailing out.."
end_process 255
fi
log info "Transmitting Config for VM $vm_id to desination $dvmid"
log info "VM $vm_id - Transmitting Config for VM $vm_id to destination $opt_destination VMID $dvmid"
rewriteconfig $PVE_NODES/"${pvnode[$vm_id]}"/$QEMU/"$vm_id".conf $opt_destination "$opt_pool" $PVE_NODES/"$opt_destination"/$QEMU/"$dvmid".conf "$dvmid"
map_vmids_to_dsthost "$opt_destination"
fi
#--move so we need to shutdown and remove from ha group?
if [ $opt_migrate -eq 1 ]; then
log info "VM $vm_id - Migration requested, shutting down VM on ${pvnode[$vm_id]}"
if [ "$(get_ha_status "$vm_id")" == "started" ]; then
log info "VM $vm_id - remove from HA"
do_run "ha-manager remove $vm_id"
fi
do_run "ssh root@${pvnode[$vm_id]} qm shutdown $vm_id >/dev/null"
startdowntime=$(date +%s)
fi
#Lock on source + destination
if [ $opt_lock -eq 1 ]; then
ssh root@"${pvnode[$vm_id]}" qm set "$vm_id" --lock backup
do_run "ssh root@""${pvnode[$vm_id]}"" qm set ""$vm_id"" --lock backup" >/dev/null
log info "VM $vm_id - locked $vm_id [rc:$?] on source"
do_run "ssh root@""${dstpvnode[$dvmid]}"" qm set ""$dvmid"" --lock backup" >/dev/null
log info "VM $dvmid - locked $dvmid [rc:$?] on destination"
fi
#Freeze fs only if no migration running and qemu-guest-agent okay.
if [ $opt_migrate -eq 0 ] && [ $ga_ping -eq 0 ]; then
vm_freeze "$vm_id" "${pvnode[$vm_id]}" >/dev/null
freezerc=$?
if [ $freezerc -gt 0 ]; then
log warn "VM $vm_id - QEMU-Guest could not fsfreeze on guest."
(( perf_freeze_failed++ ))
else
(( perf_freeze_ok++ ))
fi
if [ $opt_lock -eq 1 ]; then
ssh root@"${dstpvnode[$dvmid]}" qm set "$dvmid" --lock backup
fi
#Freeze, take Rbd Snapshot then unfreeze
vm_freeze "$vm_id" "${pvnode[$vm_id]}"
for disk in $(get_disks_from_config "$file_config"); do
src_image_spec=$(get_image_spec "$disk")
create_snapshot "$src_image_spec@$opt_snapshot_prefix$timestamp"
create_snapshot "$src_image_spec@$opt_snapshot_prefix$timestamp" 2>/dev/null
ssrc=$?
if [ $ssrc -gt 0 ]; then
log warn "VM $vm_id - rbd snap failed."
(( perf_ss_failed++ ))
else
(( perf_ss_ok++ ))
fi
done
vm_unfreeze "$vm_id" "${pvnode[$vm_id]}"
if [ $opt_migrate -eq 0 ]; then
vm_unfreeze "$vm_id" "${pvnode[$vm_id]}" >/dev/null
unfreezerc=$?
if [ $unfreezerc -gt 0 ]; then
log error "VM $vm_id - QEMU-Guest could not fsunfreeze on guest."
fi
if [ ! $opt_keepslock -eq 1 ]; then
do_run "ssh root@${pvnode[$vm_id]} qm unlock $vm_id" >/dev/null
log info "VM $vm_id - unlocked source VM $vm_id [rc:$?]"
fi
fi
for disk in $(get_disks_from_config "$file_config"); do
log debug "VMID: $vm_id Disk: $disk DESTVMID: $dvmid"
(( diskcount++ ))
(( vmdiskcount++ ))
src_image_spec=$(get_image_spec "$disk")
log debug "src_image_spec: $src_image_spec"
[ -z "$src_image_spec" ] && continue
dst_image_spec=$(echo $src_image_spec | sed -r -e "s/([a-zA-Z0-9]+\/[a-zA-Z0-9]+\-)([0-9]+)(\-[a-zA-Z0-9]+\-[0-9]+)/\1$dvmid\3/")
dst_image_spec=$(echo $src_image_spec | sed -r -e "s/(.*\/[a-zA-Z0-9]+\-)([0-9]+)(\-[a-zA-Z0-9]+\-[0-9]+)/\1$dvmid\3/")
[ -z "$dst_image_spec" ] && continue
[[ $disk =~ $recephimg ]]
#src_image_pool=${BASH_REMATCH[1]}
# src_image_pool_pve=${BASH_REMATCH[1]}
src_image_pool=$(lookupcephpool "localhost" ${BASH_REMATCH[1]})
src_image_name=${BASH_REMATCH[2]}
[[ $dst_image_spec =~ ^[a-zA-Z0-9]+\/(.*)$ ]]
dst_image_name=${BASH_REMATCH[1]}
[[ $dst_image_spec =~ ^.*\/(.*)$ ]]
dst_image_name=${BASH_REMATCH[1]} #-$src_image_pool_pve
dst_image_pool=$(lookupcephpool $opt_destination $opt_pool)
echo "dst_image_pool: $dst_image_pool"
dst_data_pool=$(lookupdatapool $opt_destination $opt_pool)
if [ -n "$dst_data_pool" ]; then
dst_data_opt="--data-pool $dst_data_pool"
fi
snapshot_name="@$opt_snapshot_prefix$timestamp"
localsnapcount=$(rbd ls -l $src_image_pool | grep $src_image_name@$opt_snapshot_prefix | cut -d ' ' -f 1|wc -l)
if [ $localsnapcount -ge 2 ]; then
# we have at least 2 local snapshots, to we can make an incremental copy
currentlocal=$(rbd ls -l $src_image_pool | grep $src_image_name@$opt_snapshot_prefix | cut -d ' ' -f 1|tail -n 1)
localts=$(rbd ls -l $src_image_pool | grep $src_image_name@$opt_snapshot_prefix | cut -d ' ' -f 1 | sed -r -e 's/.*@mirror-(.*)/\1/')
localts=$(rbd ls -l $src_image_pool | grep $src_image_name@$opt_snapshot_prefix | cut -d ' ' -f 1 | sed -r -e 's/.*@'$opt_snapshot_prefix'(.*)/\1/')
fi
latestremote=$(ssh $opt_destination rbd ls -l $dst_image_pool | grep $dst_image_name@$opt_snapshot_prefix | cut -d ' ' -f 1|tail -n 1)
if [ $latestremote ]; then
if [ "$latestremote" ]; then
[[ $latestremote =~ ^.*@$opt_snapshot_prefix([0-9]+)$ ]]
latestremotets=${BASH_REMATCH[1]}
for ts in $localts; do
if [ $ts == $latestremotets ]; then
if [ "$ts" == "$latestremotets" ]; then
basets=$ts
fi
done
fi
if [ -z $basets ]; then
if [ -z "$basets" ]; then
xmittype='full'
log debug "No matching Snapshot found on destination - Full Copy $src_image_pool/$src_image_name$snapshot_name to $dst_image_pool/$dst_image_name"
xmitjob="rbd export --rbd-concurrent-management-ops 8 $src_image_pool/$src_image_name$snapshot_name --no-progress -|pv -r|ssh $opt_destination rbd import --image-format 2 - $dst_image_pool/$dst_image_name"
#snapts=$(echo $currentlocal | sed -r -e 's/.*@mirror-(.*)/\1/')
snapshotsize=$(rbd du --pretty-format --format json $src_image_pool/$src_image_name|jq '.images[] | select (.snapshot_id == null) | {provisioned_size}.provisioned_size'|tail -1)
log debug "snapsize: $snapshotsize "
xmitjob="rbd export --rbd-concurrent-management-ops 8 $src_image_pool/$src_image_name$snapshot_name --no-progress - | tee >({ wc -c; } >/tmp/$PROGNAME.$pid.$dst_image_pool-$dst_image_name.size) | pv -s $snapshotsize -F \"VM $vm_id - F $src_image_pool/$src_image_name$snapshot_name: $PVFORMAT_FULL\" | ssh -c $opt_sshcipher $opt_destination rbd import --image-format 2 - $dst_image_pool/$dst_image_name $dst_data_opt 2>/dev/null"
# create initial snapshot on destination
if ! do_run $xmitjob; then
log debug "xmitjob: $xmitjob"
startdisk=$(date +%s)
do_run "$xmitjob"
enddisk=$(date +%s)
xmitrc=$?
if [ ! $xmitrc ]; then
log error "Transmitting Image failed"
(( perf_full_failed++ ))
return 1
else
(( perf_full_ok++ ))
fi
cmd="ssh $opt_destination rbd snap create $dst_image_pool/$dst_image_name$snapshot_name"
do_run $cmd
do_run "$cmd" 2>/dev/null
log info "VM $vm_id - created snapshot on $dvmid [rc:$?]"
perf_bytes_full=$(( perf_bytes_full + $(cat /tmp/"$PROGNAME"."$pid"."$dst_image_pool"-"$dst_image_name".size) ))
else
xmittype='incremental'
log debug "Basecopy + snapshot on destination - let's just transfer the diff"
xmitjob="rbd export-diff --from-snap $opt_snapshot_prefix$basets $src_image_pool/$currentlocal - | ssh $opt_destination rbd import-diff - $dst_image_pool/$dst_image_name"
if ! do_run $xmitjob; then
log debug "sizer: rbd diff $src_image_pool/$currentlocal --from-snap $opt_snapshot_prefix$basets|gawk --bignum '{ SUM += \$2 } END { print SUM }'"
snapshotsize=$(rbd diff $src_image_pool/$currentlocal --from-snap $opt_snapshot_prefix$basets|gawk --bignum '{ SUM += $2 } END { print SUM }')
log debug "snapshotsize: $snapshotsize"
if [ -z "$snapshotsize" ]; then
#disk was not attached, or really nothing has changed..
snapshotsize=0
fi
xmitjob="rbd export-diff --no-progress --from-snap $opt_snapshot_prefix$basets $src_image_pool/$currentlocal - | tee >({ wc -c; } >/tmp/$PROGNAME.$pid.$dst_image_pool-$dst_image_name.size) | pv -F \"VM $vm_id - I $src_image_pool/$src_image_name$snapshot_name: $PVFORMAT_SNAP\" | ssh -c $opt_sshcipher $opt_destination rbd import-diff --no-progress - $dst_image_pool/$dst_image_name"
log debug "xmitjob: $xmitjob"
startdisk=$(date +%s)
do_run "$xmitjob"
enddisk=$(date +%s)
xmitrc=$?
if [ ! $xmitrc ]; then
log error "Transmitting Image failed"
(( perf_diff_failed++ ))
return 1
else
(( perf_diff_ok++ ))
fi
do_housekeeping "localhost" "$src_image_pool" "$src_image_name" $opt_keep_local
do_housekeeping "$opt_destination" "$dst_image_pool" "$dst_image_name" $opt_keep_remote
do_housekeeping "localhost" "$src_image_pool" "$src_image_name" "$opt_keep_local" "$vm_id"
do_housekeeping "$opt_destination" "$dst_image_pool" "$dst_image_name" "$opt_keep_remote" "$vm_id"
perf_bytes_diff=$(( perf_bytes_diff + $(cat /tmp/"$PROGNAME"."$pid"."$dst_image_pool"-"$dst_image_name".size) ))
fi
perf_bytes_total=$(( perf_bytes_total + $(cat /tmp/"$PROGNAME"."$pid"."$dst_image_pool"-"$dst_image_name".size) ))
rm /tmp/"$PROGNAME"."$pid"."$dst_image_pool"-"$dst_image_name".size
log info "VM $vm_id - Disk Summary: Took $(( enddisk - startdisk )) Seconds to transfer $(human_readable "$perf_bytes_total" 2) in a $xmittype run"
if [ -n "$opt_influx_api_url" ]; then
log info "VM $vm_id - Logging to InfluxDB: $opt_influx_api_url"
influxlp="$opt_influx_job_metrics,vmname=$vmname,jobname=$opt_influx_jobname,destination=$opt_destination,srcimage=$src_image_name,dstimage=$dst_image_name,xmittype=$xmittype bytescalculated=$snapshotsize""i,bytesonwire=$perf_bytes_total""i,xmitrc=$xmitrc""i,freezerc=$freezerc""i,unfreezerc=$unfreezerc""i,basets=$basets""i"
log debug "InfluxLP: --->\n $influxlp"
cmd="curl --request POST \"$opt_influx_api_url/v2/write?org=$opt_influx_api_org&bucket=$opt_influx_bucket&precision=ns\" --header \"Authorization: Token $opt_influx_token\" --header \"Content-Type: text/plain; charset=utf-8\" --header \"Accept: application/json\" --data-binary '$influxlp'"
do_run "$cmd"
fi
unset basets
vmdiskcount=0
done
if [ ! $opt_keepslock -eq 1 ]; then
ssh root@${pvnode[$vm_id]} qm unlock $vm_id
log info "Unlocking source VM $vm_id"
fi
if [ $opt_keepdlock -eq 0 ]; then
ssh root@${dstpvnode[$dvmid]} qm unlock $dvmid
log info "Unlocking destination VM $dvmid"
log info "VM $dvmid - Unlocking destination VM $dvmid"
fi
#--migrate so start on destination?
if [ $opt_migrate -eq 1 ]; then
log info "VM $dvmid - Starting VM on node ${dstpvnode[$dvmid]} in cluster [$dcluster]"
do_run "ssh root@""${dstpvnode[$dvmid]}"" qm start "$dvmid >/dev/null
enddowntime=$(date +%s)
log info "VM $dvmid - Downtime: $(( enddowntime - startdowntime )) Seconds"
fi
done
endjob=$(date +%s)
log info "Finnished mirror $(date "+%F %T")"
humantime=$(date -ud "@$((endjob-startjob))" +'%H hours %M minutes %S seconds')
log info "Job Summary: Bytes transferred $(human_readable $perf_bytes_total) for $diskcount Disks on $vmcount VMs in $humantime"
if [ "$perf_freeze_failed" -gt 0 ]; then disp_perf_freeze_failed="$(echored $perf_freeze_failed)"; else disp_perf_freeze_failed="$(echogreen $perf_freeze_failed)"; fi
if [ "$perf_ss_failed" -gt 0 ]; then disp_perf_ss_failed="$(echored $perf_ss_failed)"; else disp_perf_ss_failed="$(echogreen $perf_ss_failed)"; fi
if [ "$perf_full_failed" -gt 0 ]; then disp_perf_full_failed="$(echored $perf_full_failed)"; else disp_perf_full_failed="$(echogreen $perf_full_failed)"; fi
if [ "$perf_diff_failed" -gt 0 ]; then disp_perf_diff_failed="$(echored $perf_diff_failed)"; else disp_perf_diff_failed="$(echogreen $perf_diff_failed)"; fi
if [ "$skipped_vm_count" -gt 0 ]; then disp_skipped_vm_count="$(echored $skipped_vm_count)"; else disp_skipped_vm_count="$(echogreen $skipped_vm_count)"; fi
log info "VM Freeze OK/failed.......: $perf_freeze_ok/$disp_perf_freeze_failed"
log info "RBD Snapshot OK/failed....: $perf_ss_ok/$disp_perf_ss_failed"
log info "RBD export-full OK/failed.: $perf_full_ok/$disp_perf_full_failed"
log info "RBD export-diff OK/failed.: $perf_diff_ok/$disp_perf_diff_failed"
log info "Full xmitted..............: $(human_readable $perf_bytes_full)"
log info "Differential Bytes .......: $(human_readable $perf_bytes_diff)"
log info "Skipped VMs ..............: $disp_skipped_vm_count"
if [ -n "$opt_influx_api_url" ]; then
log info "VM $vm_id - Logging Job summary to InfluxDB: $opt_influx_api_url"
influxlp="$opt_influx_summary_metrics,jobname=$opt_influx_jobname perf_bytes_diff=$perf_bytes_diff""i,perf_bytes_full=$perf_bytes_full""i,perf_bytes_total=$perf_bytes_total""i,perf_diff_failed=$perf_diff_failed""i,perf_diff_ok=$perf_diff_ok""i,perf_freeze_failed=$perf_freeze_failed""i,perf_freeze_ok=$perf_freeze_ok""i,perf_full_failed=$perf_full_failed""i,perf_full_ok=$perf_full_ok""i,perf_ss_failed=$perf_ss_failed""i,perf_ss_ok=$perf_ss_ok""i,perf_vm_running=$perf_vm_running""i,perf_vm_stopped=$perf_vm_stopped""i"
log debug "InfluxLP: --->\n $influxlp"
cmd="curl --request POST \"$opt_influx_api_url/v2/write?org=$opt_influx_api_org&bucket=$opt_influx_bucket&precision=ns\" --header \"Authorization: Token $opt_influx_token\" --header \"Content-Type: text/plain; charset=utf-8\" --header \"Accept: application/json\" --data-binary '$influxlp'"
do_run "$cmd"
fi
(( perf_vm_ok++ ))
end_process 0
}
function do_housekeeping(){
local horst=$1
local rbdpool=$2
local rbdimage=$3
local keep=$4
local vm=$5
local snap
local -i keeptime
local -i ts
local -i snapepoch
local -i age
log info "VM $vm - Housekeeping: $horst $rbdpool/$rbdimage, keeping Snapshots for $keep"
cmd="ssh $horst rbd ls -l $rbdpool | grep $rbdimage@$opt_snapshot_prefix | cut -d ' ' -f 1|head -n -1"
snapshots=$($cmd)
if [ "${keep:(-1)}" == "d" ]; then
keep=${keep%?}
keeptime=$(( $keep * 86400 ))
elif [ "${keep:(-1)}" == "s" ]; then
keep=${keep%?}
keeptime=$keep
fi
for snap in $snapshots; do
[[ $snap =~ ^.*@$opt_snapshot_prefix([0-9]+)$ ]]
ts=${BASH_REMATCH[1]}
[[ $ts =~ $redateex ]]
snapepoch=$(date --date "${BASH_REMATCH[1]}/${BASH_REMATCH[2]}/${BASH_REMATCH[3]} ${BASH_REMATCH[4]}:${BASH_REMATCH[5]}:${BASH_REMATCH[6]}" +%s)
age=$(($(date -u +"%s")-$snapepoch ))
if [ $age -gt "$keeptime" ]; then
cmd="ssh $horst rbd snap rm $rbdpool/$snap"
do_run "$cmd" 2>/dev/null
log info "VM $vm_id - Removing Snapshot $horst $rbdpool/$snap ($age""s) [rc:$?]"
if [ $rc -eq 0 ]; then
(( perf_snaps_removed++ ))
fi
fi
done
}
function do_housekeeping(){
horst=$1
rbdpool=$2
rbdimage=$3
keep=$4
snapshotstokill=$(ssh $horst rbd ls -l $rbdpool | grep $rbdimage@$opt_snapshot_prefix | cut -d ' ' -f 1|head -n -1 |head -n -$keep)
log info "Houskeeping $horst $rbdpool $rbdimage, keeping previous $keep Snapshots"
for snap in $snapshotstokill; do
cmd="ssh $horst rbd snap rm $rbdpool/$snap"
if ! do_run $cmd; then
log error "Housekeeping failed: $cmd"
return 1
fi
done
function gaping() {
local vmid=$1
local rc
cmd="ssh root@${pvnode[$vmid]} qm guest cmd $vmid ping >/dev/null 2>&1"
eval "$cmd"
rc=$?
echo $rc
}
function create_snapshot(){
local snap="$1"
log info "VM $vm_id - Creating snapshot $snap"
if ! do_run "rbd snap create $snap"; then
return 1;
fi
do_run "rbd snap create $snap"
rc=$?
log debug "create_snapshot() return $rc"
return $rc
}
function vm_freeze() {
local fvm=$1;
local fhost=$2;
status=$(ssh root@$fhost qm status $fvm|cut -d' ' -f 2)
status=$(ssh root@"$fhost" qm status "$fvm"|cut -d' ' -f 2)
if ! [[ "$status" == "running" ]]; then
log info "VM $fvm - Not running, skipping fsfreeze-freeze"
(( perf_vm_stopped++ ))
return
else
(( perf_vm_running++ ))
fi
local cmd="ssh root@$fhost /usr/sbin/qm guest cmd $fvm fsfreeze-freeze"
log info "VM $fvm - Issuing fsfreeze-freeze to $fvm on $fhost"
do_run "$cmd"
rc=$?
log debug "vm_freeze() return $rc"
return $rc
}
function vm_unfreeze() {
local fvm=$1;
local fhost=$2;
status=$(ssh root@$fhost qm status $fvm|cut -d' ' -f 2)
status=$(ssh root@"$fhost" qm status "$fvm"|cut -d' ' -f 2)
if ! [[ "$status" == "running" ]]; then
log info "VM $fvm - Not running, skipping fsfreeze-thaw"
return
@@ -467,6 +855,7 @@ function vm_unfreeze() {
do_run "$cmd"
rc=$?
log debug "vm_unfreeze() return $rc"
return $rc
}
function rewriteconfig(){
@@ -475,7 +864,13 @@ function rewriteconfig(){
local newpool=$3
local newconfig=$4
local newvmid=$5
cat "$oldconfig" | sed -r -e "s/^(virtio|ide|scsi|sata|mp)([0-9]+):\s([a-zA-Z0-9]+):(.*)-([0-9]+)-disk-([0-9]+),(.*)$/\1\2: $newpool:\4-$newvmid-disk-\6,\7/g" | ssh $dst "cat - >$newconfig"
local sedcmd
if [ ! -z "$opt_rewrite" ]; then
sedcmd='sed -r -e '$opt_rewrite
else
sedcmd='sed -e /^$/,$d'
fi
cat "$oldconfig" | sed -r -e "s/^(efidisk|virtio|ide|scsi|sata|mp)([0-9]+):\s([a-zA-Z0-9]+):(.*)-([0-9]+)-disk-([0-9]+).*,(.*)$/\1\2: $newpool:\4-$newvmid-disk-\6,\7/g" | $sedcmd | sed -e '/^$/,$d' | sed -e '/ide[0-9]:.*-cloudinit,media=cdrom.*/d' | grep -v "^parent:\s.*$" | ssh "$dst" "cat - >$newconfig"
}
function checkvmid(){
@@ -483,7 +878,7 @@ function checkvmid(){
local vmid=$2
cmd="ssh $dst ls -l $QEMU_CONF_CLUSTER/$vmid.conf|wc -l"
rval=$($cmd)
echo $rval
echo "$rval"
}
function do_run(){
@@ -506,23 +901,25 @@ function do_run(){
function end_process(){
local -i rc=$1;
# if ! [[ -z "$startts" && -z "$endts" ]]; then
# local -i runtime=$(expr $endts - $startts)
# local -i bps=$(expr $bytecount/$runtime)
# fi
# local subject="Ceph [VM:$vmok/$vmtotal SS:$snapshotok/$snapshottotal EX:$exportok/$exporttotal] [$(bytesToHuman "$bytecount")@$(bytesToHuman "$bps")/s]"
# [ $rc != 0 ] && subject="$subject [ERROR]"
local -i runtime
local -i bps
local -i ss_total
local subject
if ! [[ -z "$startjob" || -z "$endjob" ]]; then
runtime=$(expr $endjob - $startjob)
bps=$(expr $perf_bytes_total/$runtime)
fi
ss_total=$(expr $perf_ss_ok + $perf_ss_failed)
subject="Crossover [VM:$perf_vm_ok/$vmcount SS:$perf_ss_ok/$ss_total]"
[ $rc != 0 ] && subject="[ERROR] $subject" || subject="[OK] $subject"
#send email
# local mail;
# local mailhead="Backup $imgcount Images in $vmcount VMs (Bytes: $bytecount)"
# for mail in $(echo "$opt_addr_mail" | tr "," "\n"); do
# do_run "cat '$LOG_FILE' | mail -s '$subject' '$mail'"
# done
local mail;
for mail in $(echo "$opt_addr_mail" | tr "," "\n"); do
do_run "cat '$LOG_FILE' | mail -s '$subject' '$mail'"
done
#remove log
# rm "$LOG_FILE"
rm "$LOG_FILE"
exit "$rc";
}
@@ -539,6 +936,29 @@ function get_image_spec(){
echo "$image_spec"
}
function get_ha_status() {
local havmid="$1"
ha_status=$(ha-manager status| grep vm:"$havmid" | cut -d " " -f 4| sed 's/.$//')
echo "$ha_status"
}
function check_pool_exist() {
local poolname="$1"
local -i exists=255
pool_status=$(ssh $opt_destination pvesm status|grep rbd|cut -d " " -f 1|grep $poolname)
if [ "$pool_status" == "$poolname" ]; then
exists=1
else
exists=0
fi
echo $exists
}
function get_ceph_version() {
scephversion=$(ceph -v | cut -d " " -f 3)
dcephversion=$(ssh $opt_destination ceph -v | cut -d " " -f 3)
}
function main(){
[ $# = 0 ] && usage;

51
rainbow.sh Normal file
View File

@@ -0,0 +1,51 @@
# https://github.com/xr09/rainbow.sh
# Bash helper functions to put colors on your scripts
#
# Usage example:
# vargreen=$(echogreen "Grass is green")
# echo "Coming next: $vargreen"
#
__RAINBOWPALETTE="1"
function __colortext()
{
echo -e " \e[$__RAINBOWPALETTE;$2m$1\e[0m"
}
function echogreen()
{
echo $(__colortext "$1" "32")
}
function echored()
{
echo $(__colortext "$1" "31")
}
function echoblue()
{
echo $(__colortext "$1" "34")
}
function echopurple()
{
echo $(__colortext "$1" "35")
}
function echoyellow()
{
echo $(__colortext "$1" "33")
}
function echocyan()
{
echo $(__colortext "$1" "36")
}
function echowhite()
{
echo $(__colortext "$1" "37")
}