Some checks failed
Deploy to Phoenix / deploy (push) Has been cancelled
- ADD_CHAIN138_TO_LEDGER_LIVE: Ledger form done; public code review repo bis-innovations/LedgerLive; init/push commands - CONTRACT_DEPLOYMENT_RUNBOOK: Chain 138 gas price 1 gwei, 36-addr check, TransactionMirror workaround - CONTRACT_*: AddressMapper, MirrorManager deployed 2026-02-12; 36-address on-chain check - NEXT_STEPS_FOR_YOU: Ledger done; steps completable now (no LAN); run-completable-tasks-from-anywhere - MASTER_INDEX, OPERATOR_OPTIONAL, SMART_CONTRACTS_INVENTORY_SIMPLE: updates - LEDGER_BLOCKCHAIN_INTEGRATION_COMPLETE: bis-innovations/LedgerLive reference Co-authored-by: Cursor <cursoragent@cursor.com>
249 lines
8.2 KiB
Bash
Executable File
249 lines
8.2 KiB
Bash
Executable File
#!/usr/bin/env bash
|
|
set -euo pipefail
|
|
|
|
# Migrate containers VMIDs 100-1000 from r630-02 to r630-01
|
|
# These containers have data and will replace empty volumes on r630-01
|
|
|
|
set -u
|
|
|
|
SOURCE_NODE="r630-02"
|
|
SOURCE_NODE_IP="192.168.11.12"
|
|
SOURCE_NODE_PASS="password"
|
|
|
|
TARGET_NODE="r630-01"
|
|
TARGET_NODE_IP="192.168.11.11"
|
|
TARGET_NODE_PASS="password"
|
|
|
|
# Target storage - use thin1 for best performance
|
|
TARGET_STORAGE="thin1"
|
|
|
|
# VMID range
|
|
VMID_START=100
|
|
VMID_END=1000
|
|
|
|
# Colors
|
|
GREEN='\033[0;32m'
|
|
RED='\033[0;31m'
|
|
YELLOW='\033[1;33m'
|
|
BLUE='\033[0;34m'
|
|
CYAN='\033[0;36m'
|
|
NC='\033[0m'
|
|
|
|
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
|
|
log_success() { echo -e "${GREEN}[✓]${NC} $1"; }
|
|
log_error() { echo -e "${RED}[✗]${NC} $1"; }
|
|
log_warn() { echo -e "${YELLOW}[⚠]${NC} $1"; }
|
|
log_header() { echo -e "${CYAN}=== $1 ===${NC}"; }
|
|
|
|
# SSH helpers
|
|
ssh_r630_02() {
|
|
sshpass -p "$SOURCE_NODE_PASS" ssh -o StrictHostKeyChecking=no -o ConnectTimeout=10 root@"$SOURCE_NODE_IP" "$@" 2>&1
|
|
}
|
|
|
|
ssh_r630_01() {
|
|
sshpass -p "$TARGET_NODE_PASS" ssh -o StrictHostKeyChecking=no -o ConnectTimeout=10 root@"$TARGET_NODE_IP" "$@" 2>&1
|
|
}
|
|
|
|
# Check if container exists on source
|
|
container_exists_on_source() {
|
|
local vmid="$1"
|
|
ssh_r630_02 "pct list 2>/dev/null | grep -q '^$vmid'" && return 0 || return 1
|
|
}
|
|
|
|
# Check if container exists on target
|
|
container_exists_on_target() {
|
|
local vmid="$1"
|
|
ssh_r630_01 "pct list 2>/dev/null | grep -q '^$vmid'" && return 0 || return 1
|
|
}
|
|
|
|
# Get container status
|
|
get_container_status() {
|
|
local vmid="$1"
|
|
ssh_r630_02 "pct status $vmid 2>/dev/null | awk '{print \$2}'" || echo "unknown"
|
|
}
|
|
|
|
# Get container name
|
|
get_container_name() {
|
|
local vmid="$1"
|
|
ssh_r630_02 "pct config $vmid 2>/dev/null | grep '^hostname:' | awk '{print \$2}'" || echo "unknown"
|
|
}
|
|
|
|
# Check if target storage is available
|
|
check_target_storage() {
|
|
log_info "Checking target storage $TARGET_STORAGE on $TARGET_NODE..."
|
|
if ssh_r630_01 "pvesm status | grep -q '$TARGET_STORAGE.*active'"; then
|
|
log_success "Target storage $TARGET_STORAGE is available"
|
|
return 0
|
|
else
|
|
log_error "Target storage $TARGET_STORAGE is not available on $TARGET_NODE"
|
|
return 1
|
|
fi
|
|
}
|
|
|
|
# Migrate a single container
|
|
migrate_container() {
|
|
local vmid="$1"
|
|
local name=$(get_container_name "$vmid")
|
|
local status=$(get_container_status "$vmid")
|
|
|
|
log_header "Migrating Container $vmid: $name"
|
|
|
|
# Check if container exists on source
|
|
if ! container_exists_on_source "$vmid"; then
|
|
log_warn "Container $vmid does not exist on $SOURCE_NODE, skipping..."
|
|
return 0
|
|
fi
|
|
|
|
# Check if container already exists on target
|
|
if container_exists_on_target "$vmid"; then
|
|
log_warn "Container $vmid already exists on $TARGET_NODE"
|
|
log_info "Destroying existing container on target to allow restore..."
|
|
ssh_r630_01 "pct destroy $vmid --force" || log_warn "Failed to destroy container $vmid on target"
|
|
sleep 2
|
|
fi
|
|
|
|
# Check if container is running
|
|
local was_running=false
|
|
if [ "$status" = "running" ]; then
|
|
was_running=true
|
|
log_info "Container $vmid is running, will stop before migration..."
|
|
ssh_r630_02 "pct stop $vmid" || log_warn "Failed to stop container $vmid"
|
|
sleep 3
|
|
fi
|
|
|
|
# Clear any locks
|
|
log_info "Clearing locks for container $vmid..."
|
|
ssh_r630_02 "pvesh delete /nodes/$SOURCE_NODE/lxc/$vmid/lock 2>/dev/null" || true
|
|
ssh_r630_02 "rm -f /var/lock/pve-manager/lxc-$vmid.lock 2>/dev/null" || true
|
|
ssh_r630_02 "pkill -f 'vzmigrate.*$vmid' 2>/dev/null" || true
|
|
sleep 2
|
|
|
|
# Use backup/restore method since storage names may not match
|
|
log_info "Creating backup of container $vmid on $SOURCE_NODE..."
|
|
log_info "This may take 5-15 minutes depending on container size..."
|
|
|
|
# Create backup on source node
|
|
if ssh_r630_02 "vzdump $vmid --storage local --compress gzip --mode stop"; then
|
|
log_success "Backup created successfully"
|
|
|
|
# Find the backup file
|
|
local backup_file=$(ssh_r630_02 "ls -t /var/lib/vz/dump/vzdump-lxc-$vmid-*.tar.gz 2>/dev/null | head -1" || echo "")
|
|
|
|
if [ -z "$backup_file" ]; then
|
|
log_error "Could not find backup file for container $vmid"
|
|
return 1
|
|
fi
|
|
|
|
log_info "Found backup: $backup_file"
|
|
|
|
# Copy backup to target
|
|
log_info "Copying backup to $TARGET_NODE..."
|
|
ssh_r630_01 "mkdir -p /var/lib/vz/dump" || true
|
|
|
|
local backup_name=$(basename "$backup_file")
|
|
local target_backup="/var/lib/vz/dump/$backup_name"
|
|
|
|
if sshpass -p "$SOURCE_NODE_PASS" scp -o StrictHostKeyChecking=no root@"$SOURCE_NODE_IP:$backup_file" root@"$TARGET_NODE_IP:$target_backup"; then
|
|
log_success "Backup copied to $TARGET_NODE"
|
|
else
|
|
log_error "Failed to copy backup file"
|
|
return 1
|
|
fi
|
|
|
|
# Destroy container on source before restore (Proxmox requires this)
|
|
log_info "Destroying container $vmid on source node (required for restore)..."
|
|
ssh_r630_02 "pct destroy $vmid --force" || log_warn "Failed to destroy container on source (may not exist)"
|
|
sleep 2
|
|
|
|
# Restore container on target
|
|
log_info "Restoring container $vmid on $TARGET_NODE..."
|
|
if ssh_r630_01 "pct restore $vmid $target_backup --storage $TARGET_STORAGE"; then
|
|
log_success "Container restored successfully"
|
|
else
|
|
log_error "Failed to restore container $vmid"
|
|
log_warn "Container may need to be manually restored"
|
|
return 1
|
|
fi
|
|
|
|
# Clean up backup on source (optional)
|
|
log_info "Cleaning up backup on source node..."
|
|
ssh_r630_02 "rm -f $backup_file" || log_warn "Failed to clean up backup on source"
|
|
|
|
# Start container if it was running before
|
|
if [ "$was_running" = true ]; then
|
|
log_info "Starting container $vmid on $TARGET_NODE..."
|
|
sleep 5
|
|
ssh_r630_01 "pct start $vmid" || log_warn "Failed to start container $vmid (may need manual start)"
|
|
fi
|
|
|
|
log_success "Container $vmid migration completed successfully"
|
|
echo ""
|
|
return 0
|
|
else
|
|
log_error "Migration failed for container $vmid"
|
|
return 1
|
|
fi
|
|
}
|
|
|
|
# Main function
|
|
main() {
|
|
log_header "Migration: r630-02 to r630-01 (VMIDs $VMID_START-$VMID_END)"
|
|
echo ""
|
|
|
|
# Check prerequisites
|
|
log_info "Checking prerequisites..."
|
|
|
|
if ! check_target_storage; then
|
|
log_error "Prerequisites check failed"
|
|
exit 1
|
|
fi
|
|
|
|
# Get list of containers to migrate
|
|
log_info "Finding containers in range $VMID_START-$VMID_END on $SOURCE_NODE..."
|
|
local containers=$(ssh_r630_02 "pct list | awk 'NR>1 && \$1 >= $VMID_START && \$1 <= $VMID_END {print \$1}' | sort -n")
|
|
|
|
if [ -z "$containers" ]; then
|
|
log_warn "No containers found in range $VMID_START-$VMID_END on $SOURCE_NODE"
|
|
exit 0
|
|
fi
|
|
|
|
local container_count=$(echo "$containers" | wc -l)
|
|
log_info "Found $container_count containers to migrate:"
|
|
echo "$containers" | while read vmid; do
|
|
local name=$(get_container_name "$vmid")
|
|
local status=$(get_container_status "$vmid")
|
|
echo " - $vmid: $name ($status)"
|
|
done
|
|
echo ""
|
|
|
|
# Confirm migration
|
|
log_warn "This will migrate containers from $SOURCE_NODE to $TARGET_NODE"
|
|
log_warn "Containers on $TARGET_NODE with same VMIDs will be replaced"
|
|
log_info "Press Enter to continue or Ctrl+C to cancel..."
|
|
read -r
|
|
|
|
# Migrate each container
|
|
local success=0
|
|
local failed=0
|
|
|
|
for vmid in $containers; do
|
|
if migrate_container "$vmid"; then
|
|
success=$((success + 1))
|
|
else
|
|
failed=$((failed + 1))
|
|
log_warn "Migration failed for container $vmid, continuing with next..."
|
|
fi
|
|
done
|
|
|
|
# Summary
|
|
echo ""
|
|
log_header "Migration Summary"
|
|
log_success "Successfully migrated: $success"
|
|
if [ $failed -gt 0 ]; then
|
|
log_error "Failed migrations: $failed"
|
|
fi
|
|
log_info "Total containers processed: $((success + failed))"
|
|
}
|
|
|
|
main "$@"
|