Some checks failed
Deploy to Phoenix / deploy (push) Has been cancelled
- ADD_CHAIN138_TO_LEDGER_LIVE: Ledger form done; public code review repo bis-innovations/LedgerLive; init/push commands - CONTRACT_DEPLOYMENT_RUNBOOK: Chain 138 gas price 1 gwei, 36-addr check, TransactionMirror workaround - CONTRACT_*: AddressMapper, MirrorManager deployed 2026-02-12; 36-address on-chain check - NEXT_STEPS_FOR_YOU: Ledger done; steps completable now (no LAN); run-completable-tasks-from-anywhere - MASTER_INDEX, OPERATOR_OPTIONAL, SMART_CONTRACTS_INVENTORY_SIMPLE: updates - LEDGER_BLOCKCHAIN_INTEGRATION_COMPLETE: bis-innovations/LedgerLive reference Co-authored-by: Cursor <cursoragent@cursor.com>
281 lines
7.3 KiB
Bash
Executable File
281 lines
7.3 KiB
Bash
Executable File
#!/usr/bin/env bash
|
|
set -euo pipefail
|
|
|
|
# Simplified script to create RAID 10 with 4 available disks (sde-sdh)
|
|
# This is the safer approach - creates RAID, migrates data, keeps 4-disk RAID
|
|
|
|
set -u
|
|
|
|
TARGET_NODE="r630-01"
|
|
TARGET_NODE_IP="192.168.11.11"
|
|
TARGET_NODE_PASS="password"
|
|
|
|
# Colors
|
|
GREEN='\033[0;32m'
|
|
RED='\033[0;31m'
|
|
YELLOW='\033[1;33m'
|
|
BLUE='\033[0;34m'
|
|
NC='\033[0m'
|
|
|
|
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
|
|
log_success() { echo -e "${GREEN}[✓]${NC} $1"; }
|
|
log_error() { echo -e "${RED}[✗]${NC} $1"; }
|
|
log_warn() { echo -e "${YELLOW}[⚠]${NC} $1"; }
|
|
|
|
ssh_r630_01() {
|
|
sshpass -p "$TARGET_NODE_PASS" ssh -o StrictHostKeyChecking=no -o ConnectTimeout=10 root@"$TARGET_NODE_IP" "$@" 2>&1
|
|
}
|
|
|
|
install_mdadm() {
|
|
log_info "Installing mdadm..."
|
|
if ssh_r630_01 "which mdadm >/dev/null 2>&1"; then
|
|
log_success "mdadm already installed"
|
|
return 0
|
|
fi
|
|
|
|
ssh_r630_01 "apt-get update && apt-get install -y mdadm" || {
|
|
log_error "Failed to install mdadm"
|
|
return 1
|
|
}
|
|
log_success "mdadm installed"
|
|
return 0
|
|
}
|
|
|
|
check_disks() {
|
|
log_info "Checking disk availability..."
|
|
|
|
local available_disks=("sde" "sdf" "sdg" "sdh")
|
|
local missing_disks=()
|
|
|
|
for disk in "${available_disks[@]}"; do
|
|
if ! ssh_r630_01 "test -b /dev/$disk"; then
|
|
missing_disks+=("$disk")
|
|
fi
|
|
done
|
|
|
|
if [ ${#missing_disks[@]} -gt 0 ]; then
|
|
log_error "Missing disks: ${missing_disks[*]}"
|
|
return 1
|
|
fi
|
|
|
|
# Check if disks are in use
|
|
for disk in "${available_disks[@]}"; do
|
|
if ssh_r630_01 "pvs 2>/dev/null | grep -q /dev/$disk || mount | grep -q /dev/$disk"; then
|
|
log_error "Disk /dev/$disk is already in use"
|
|
return 1
|
|
fi
|
|
done
|
|
|
|
log_success "All disks are available"
|
|
return 0
|
|
}
|
|
|
|
create_raid10() {
|
|
log_info "Creating RAID 10 with 4 disks (sde, sdf, sdg, sdh)..."
|
|
|
|
ssh_r630_01 "mdadm --create /dev/md0 --level=10 --raid-devices=4 /dev/sde /dev/sdf /dev/sdg /dev/sdh" || {
|
|
log_error "Failed to create RAID 10"
|
|
return 1
|
|
}
|
|
|
|
log_success "RAID 10 created on /dev/md0"
|
|
|
|
# Wait for sync
|
|
log_info "Waiting for RAID array to synchronize (this may take 30-60 minutes)..."
|
|
local max_wait=7200 # 2 hours max
|
|
local waited=0
|
|
|
|
while [ $waited -lt $max_wait ]; do
|
|
local status=$(ssh_r630_01 "cat /proc/mdstat 2>/dev/null | grep -A 2 md0 | tail -1")
|
|
|
|
if echo "$status" | grep -q "\[UUUU\]"; then
|
|
log_success "RAID array is fully synchronized"
|
|
break
|
|
elif echo "$status" | grep -q "recovery\|resync"; then
|
|
local progress=$(echo "$status" | grep -oP '\d+\.\d+%' || echo "in progress")
|
|
if [ $((waited % 300)) -eq 0 ]; then # Log every 5 minutes
|
|
log_info "RAID sync progress: $progress (elapsed: $((waited/60)) minutes)"
|
|
fi
|
|
sleep 30
|
|
waited=$((waited + 30))
|
|
else
|
|
sleep 10
|
|
waited=$((waited + 10))
|
|
fi
|
|
done
|
|
|
|
if [ $waited -ge $max_wait ]; then
|
|
log_warn "RAID sync may still be in progress. Check manually: cat /proc/mdstat"
|
|
fi
|
|
|
|
# Save configuration
|
|
log_info "Saving RAID configuration..."
|
|
ssh_r630_01 "mdadm --detail --scan >> /etc/mdadm/mdadm.conf" || {
|
|
log_warn "Failed to save to mdadm.conf, but RAID is created"
|
|
}
|
|
|
|
ssh_r630_01 "update-initramfs -u" || true
|
|
|
|
return 0
|
|
}
|
|
|
|
add_to_lvm() {
|
|
log_info "Adding RAID to pve volume group..."
|
|
|
|
# Create physical volume
|
|
ssh_r630_01 "pvcreate /dev/md0" || {
|
|
log_error "Failed to create PV on RAID"
|
|
return 1
|
|
}
|
|
|
|
# Extend VG
|
|
ssh_r630_01 "vgextend pve /dev/md0" || {
|
|
log_error "Failed to extend pve VG"
|
|
return 1
|
|
}
|
|
|
|
log_success "RAID added to pve VG"
|
|
|
|
# Show status
|
|
log_info "Updated VG status:"
|
|
ssh_r630_01 "vgs pve"
|
|
ssh_r630_01 "pvs | grep pve"
|
|
|
|
return 0
|
|
}
|
|
|
|
migrate_data() {
|
|
log_info "Migrating data from sdc and sdd to RAID..."
|
|
log_warn "This will take 1-3 hours depending on data size (~408GB)"
|
|
|
|
# Migrate from sdc
|
|
log_info "Migrating data from sdc to RAID..."
|
|
ssh_r630_01 "pvmove /dev/sdc /dev/md0" || {
|
|
log_error "Failed to migrate data from sdc"
|
|
return 1
|
|
}
|
|
log_success "Data migrated from sdc"
|
|
|
|
# Migrate from sdd
|
|
log_info "Migrating data from sdd to RAID..."
|
|
ssh_r630_01 "pvmove /dev/sdd /dev/md0" || {
|
|
log_error "Failed to migrate data from sdd"
|
|
return 1
|
|
}
|
|
log_success "Data migrated from sdd"
|
|
|
|
return 0
|
|
}
|
|
|
|
remove_old_pvs() {
|
|
log_info "Removing sdc and sdd from pve VG..."
|
|
|
|
# Remove from VG
|
|
ssh_r630_01 "vgreduce pve /dev/sdc /dev/sdd" || {
|
|
log_error "Failed to remove PVs from VG"
|
|
return 1
|
|
}
|
|
|
|
# Remove PV labels
|
|
ssh_r630_01 "pvremove /dev/sdc /dev/sdd" || {
|
|
log_error "Failed to remove PV labels"
|
|
return 1
|
|
}
|
|
|
|
log_success "sdc and sdd removed from pve VG"
|
|
|
|
# Verify
|
|
log_info "Final VG status:"
|
|
ssh_r630_01 "vgs pve"
|
|
ssh_r630_01 "pvs | grep pve"
|
|
|
|
return 0
|
|
}
|
|
|
|
show_status() {
|
|
log_info "=== RAID Status ==="
|
|
ssh_r630_01 "cat /proc/mdstat"
|
|
echo ""
|
|
ssh_r630_01 "mdadm --detail /dev/md0"
|
|
echo ""
|
|
log_info "=== LVM Status ==="
|
|
ssh_r630_01 "vgs pve"
|
|
ssh_r630_01 "pvs | grep pve"
|
|
}
|
|
|
|
main() {
|
|
echo ""
|
|
log_info "=== RAID 10 Setup for R630-01 ==="
|
|
log_info "Creating RAID 10 with 4 disks: sde, sdf, sdg, sdh"
|
|
log_info "Then migrating data from sdc/sdd to RAID"
|
|
echo ""
|
|
|
|
# Install mdadm
|
|
if ! install_mdadm; then
|
|
exit 1
|
|
fi
|
|
|
|
# Check disks
|
|
if ! check_disks; then
|
|
exit 1
|
|
fi
|
|
|
|
# Show current status
|
|
log_info "Current storage status:"
|
|
ssh_r630_01 "vgs pve"
|
|
ssh_r630_01 "pvs | grep pve"
|
|
echo ""
|
|
|
|
log_warn "WARNING: This will:"
|
|
log_warn "1. Create RAID 10 with 4 disks (sde-sdh)"
|
|
log_warn "2. Migrate all data from sdc/sdd to RAID (~408GB)"
|
|
log_warn "3. Remove sdc/sdd from pve VG"
|
|
log_warn ""
|
|
log_warn "Estimated time: 2-4 hours"
|
|
log_warn "Some containers may need downtime during migration"
|
|
echo ""
|
|
read -p "Continue? (yes/no): " confirm
|
|
if [ "$confirm" != "yes" ]; then
|
|
log_info "Operation cancelled"
|
|
exit 0
|
|
fi
|
|
|
|
# Create RAID
|
|
if ! create_raid10; then
|
|
exit 1
|
|
fi
|
|
|
|
# Add to LVM
|
|
if ! add_to_lvm; then
|
|
exit 1
|
|
fi
|
|
|
|
# Migrate data
|
|
if ! migrate_data; then
|
|
log_error "Data migration failed"
|
|
log_warn "RAID is created and added to VG, but migration incomplete"
|
|
exit 1
|
|
fi
|
|
|
|
# Remove old PVs
|
|
if ! remove_old_pvs; then
|
|
log_error "Failed to remove old PVs"
|
|
exit 1
|
|
fi
|
|
|
|
# Show final status
|
|
show_status
|
|
|
|
log_success "RAID 10 setup completed successfully!"
|
|
log_info ""
|
|
log_info "RAID Device: /dev/md0"
|
|
log_info "Capacity: ~466GB (RAID 10 with 4 disks)"
|
|
log_info "Performance: Excellent (4x read, 2x write)"
|
|
log_info "Redundancy: Can survive 1-2 disk failures"
|
|
log_info ""
|
|
log_info "sdc and sdd are now free and can be used for other purposes"
|
|
log_info "or added to the RAID later if needed"
|
|
}
|
|
|
|
main "$@"
|