chore: sync submodule state (parent ref update)

Made-with: Cursor
This commit is contained in:
defiQUG
2026-03-02 12:14:14 -08:00
parent b6a776e5d7
commit 25c96e210a
316 changed files with 29779 additions and 677 deletions

View File

@@ -0,0 +1,408 @@
#!/bin/bash
# Configure Cloudflare DNS for ChainID 138 MetaMask Integration
# This script creates DNS configuration files and instructions
set -e
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
# Colors
GREEN='\033[0;32m'
RED='\033[0;31m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m'
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
log_success() { echo -e "${GREEN}[✓]${NC} $1"; }
log_warn() { echo -e "${YELLOW}[WARN]${NC} $1"; }
log_error() { echo -e "${RED}[ERROR]${NC} $1"; }
log_info "========================================="
log_info "Cloudflare DNS Configuration"
log_info "========================================="
log_info ""
# Create DNS configuration directory
DNS_DIR="$PROJECT_ROOT/cloudflare-dns-config"
mkdir -p "$DNS_DIR"
# Create DNS records configuration
log_info "Creating DNS records configuration..."
cat > "$DNS_DIR/dns-records.json" << 'EOF'
{
"records": [
{
"type": "A",
"name": "rpc",
"content": "<server-ip-address>",
"ttl": 300,
"proxied": true,
"comment": "Primary RPC endpoint for ChainID 138"
},
{
"type": "A",
"name": "rpc2",
"content": "<server-ip-address>",
"ttl": 300,
"proxied": true,
"comment": "Secondary RPC endpoint for ChainID 138"
},
{
"type": "A",
"name": "explorer",
"content": "<server-ip-address>",
"ttl": 300,
"proxied": true,
"comment": "Blockscout explorer for ChainID 138"
},
{
"type": "CNAME",
"name": "rpc-core",
"content": "rpc.d-bis.org",
"ttl": 300,
"proxied": true,
"comment": "RPC core endpoint alias"
}
]
}
EOF
log_success "Created: $DNS_DIR/dns-records.json"
# Create Cloudflare API script
log_info "Creating Cloudflare API configuration script..."
cat > "$DNS_DIR/configure-dns-api.sh" << 'EOF'
#!/bin/bash
# Configure Cloudflare DNS via API
# Requires: CLOUDFLARE_API_TOKEN and CLOUDFLARE_ZONE_ID
set -e
ZONE_ID="${CLOUDFLARE_ZONE_ID}"
API_TOKEN="${CLOUDFLARE_API_TOKEN}"
DOMAIN="d-bis.org"
if [ -z "$ZONE_ID" ] || [ -z "$API_TOKEN" ]; then
echo "Error: CLOUDFLARE_ZONE_ID and CLOUDFLARE_API_TOKEN must be set"
exit 1
fi
# Function to create DNS record
create_record() {
local type=$1
local name=$2
local content=$3
local proxied=${4:-true}
curl -X POST "https://api.cloudflare.com/client/v4/zones/$ZONE_ID/dns_records" \
-H "Authorization: Bearer $API_TOKEN" \
-H "Content-Type: application/json" \
--data "{
\"type\": \"$type\",
\"name\": \"$name\",
\"content\": \"$content\",
\"ttl\": 300,
\"proxied\": $proxied
}"
}
# Create RPC endpoint
echo "Creating rpc.d-bis.org..."
create_record "A" "rpc" "<server-ip>" true
# Create secondary RPC endpoint
echo "Creating rpc2.d-bis.org..."
create_record "A" "rpc2" "<server-ip>" true
# Create explorer endpoint
echo "Creating explorer.d-bis.org..."
create_record "A" "explorer" "<server-ip>" true
echo "DNS records created successfully!"
EOF
chmod +x "$DNS_DIR/configure-dns-api.sh"
log_success "Created: $DNS_DIR/configure-dns-api.sh"
# Create manual configuration guide
cat > "$DNS_DIR/MANUAL_CONFIGURATION.md" << 'EOF'
# Cloudflare DNS Manual Configuration Guide
## Prerequisites
1. Cloudflare account
2. Domain `d-bis.org` added to Cloudflare
3. Access to Cloudflare dashboard
## DNS Records to Create
### 1. Primary RPC Endpoint (rpc.d-bis.org)
**Type**: A
**Name**: `rpc`
**IPv4 address**: `<your-server-ip>`
**Proxy status**: Proxied (orange cloud)
**TTL**: Auto
**Purpose**: Primary RPC endpoint for ChainID 138
---
### 2. Secondary RPC Endpoint (rpc2.d-bis.org)
**Type**: A
**Name**: `rpc2`
**IPv4 address**: `<your-server-ip>`
**Proxy status**: Proxied (orange cloud)
**TTL**: Auto
**Purpose**: Secondary RPC endpoint for redundancy
---
### 3. Explorer Endpoint (explorer.d-bis.org)
**Type**: A
**Name**: `explorer`
**IPv4 address**: `<your-server-ip>`
**Proxy status**: Proxied (orange cloud)
**TTL**: Auto
**Purpose**: Blockscout explorer for ChainID 138
---
### 4. RPC Core Alias (rpc-core.d-bis.org)
**Type**: CNAME
**Name**: `rpc-core`
**Target**: `rpc.d-bis.org`
**Proxy status**: Proxied (orange cloud)
**TTL**: Auto
**Purpose**: Alias for primary RPC endpoint
---
## Configuration Steps
### Step 1: Access Cloudflare Dashboard
1. Go to https://dash.cloudflare.com
2. Select your account
3. Select domain `d-bis.org`
### Step 2: Navigate to DNS
1. Click "DNS" in the left sidebar
2. Click "Records"
3. Click "Add record"
### Step 3: Create Records
For each record above:
1. Select record type
2. Enter name
3. Enter content (IP address or target)
4. Enable proxy (orange cloud)
5. Click "Save"
### Step 4: Verify Records
1. Check all records are created
2. Verify proxy status is enabled
3. Verify TTL is set correctly
4. Test DNS resolution
---
## DNS Verification
### Test DNS Resolution
```bash
# Test primary RPC
dig rpc.d-bis.org +short
# Test secondary RPC
dig rpc2.d-bis.org +short
# Test explorer
dig explorer.d-bis.org +short
# Test RPC core alias
dig rpc-core.d-bis.org +short
```
### Expected Results
All should resolve to your server IP address (or Cloudflare proxy IPs if proxied).
---
## SSL/TLS Configuration
### Automatic SSL
Cloudflare provides automatic SSL certificates:
1. Go to SSL/TLS settings
2. Set encryption mode to "Full" or "Full (strict)"
3. Enable "Always Use HTTPS"
4. SSL certificates are automatically provisioned
### SSL Verification
```bash
# Test SSL certificate
openssl s_client -connect rpc.d-bis.org:443 -servername rpc.d-bis.org
# Check certificate validity
echo | openssl s_client -connect rpc.d-bis.org:443 2>/dev/null | openssl x509 -noout -dates
```
---
## Proxy Configuration
### Benefits of Proxying
- DDoS protection
- CDN caching
- SSL termination
- IP hiding
### Considerations
- Proxy adds latency (~10-50ms)
- Some features may require direct IP access
- RPC endpoints may need direct access
### Configuration
For RPC endpoints, you may want to:
1. Start with proxy enabled
2. Monitor performance
3. Disable proxy if needed for low latency
---
## Page Rules
### Recommended Page Rules
1. **Cache Level**: Standard
2. **Browser Cache TTL**: 4 hours
3. **Edge Cache TTL**: 2 hours
### Create Page Rule
1. Go to Rules → Page Rules
2. Click "Create Page Rule"
3. URL pattern: `rpc.d-bis.org/*`
4. Settings:
- Cache Level: Standard
- Browser Cache TTL: 4 hours
- Edge Cache TTL: 2 hours
---
## Security Settings
### Recommended Settings
1. **Security Level**: Medium
2. **Challenge Passage**: 30 minutes
3. **Browser Integrity Check**: On
4. **Privacy Pass Support**: On
### Rate Limiting
Create rate limiting rules:
- Rate: 10 requests per second per IP
- Burst: 20 requests
- Action: Challenge or Block
---
## Monitoring
### Cloudflare Analytics
1. Monitor DNS queries
2. Monitor traffic
3. Monitor errors
4. Monitor performance
### Alerts
Set up alerts for:
- High error rates
- DDoS attacks
- SSL certificate expiration
- DNS resolution issues
---
## Troubleshooting
### DNS Not Resolving
1. Check DNS records are correct
2. Check proxy status
3. Wait for DNS propagation (up to 48 hours)
4. Clear DNS cache
### SSL Certificate Issues
1. Check SSL/TLS mode is "Full"
2. Verify origin server has valid certificate
3. Check certificate expiration
4. Review SSL errors in Cloudflare dashboard
### Performance Issues
1. Check proxy status
2. Review Cloudflare analytics
3. Check origin server performance
4. Consider disabling proxy for RPC endpoints
---
## Next Steps
After DNS configuration:
1. ✅ Verify DNS resolution
2. ✅ Configure SSL certificates
3. ✅ Test RPC endpoints
4. ✅ Test explorer
5. ✅ Update MetaMask network config
6. ✅ Update token lists
---
**Last Updated**: 2026-01-26
EOF
log_success "Created: $DNS_DIR/MANUAL_CONFIGURATION.md"
log_info ""
log_info "========================================="
log_info "DNS Configuration Complete!"
log_info "========================================="
log_info ""
log_info "Files created in: $DNS_DIR"
log_info " - dns-records.json (DNS records config)"
log_info " - configure-dns-api.sh (API script)"
log_info " - MANUAL_CONFIGURATION.md (manual guide)"
log_info ""
log_info "Next steps:"
log_info "1. Review DNS configuration"
log_info "2. Configure Cloudflare DNS"
log_info "3. Verify DNS resolution"
log_info "4. Configure SSL certificates"
log_info ""

View File

@@ -0,0 +1,263 @@
#!/bin/bash
# Configure MetaMask Embedded Wallets for ChainID 138
# This script generates configuration for the MetaMask Embedded Wallets dashboard
set -e
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
# Colors
GREEN='\033[0;32m'
RED='\033[0;31m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m'
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
log_success() { echo -e "${GREEN}[✓]${NC} $1"; }
log_warn() { echo -e "${YELLOW}[WARN]${NC} $1"; }
log_error() { echo -e "${RED}[ERROR]${NC} $1"; }
log_info "========================================="
log_info "MetaMask Embedded Wallets Configuration"
log_info "========================================="
log_info ""
# Create configuration directory
CONFIG_DIR="$PROJECT_ROOT/embedded-wallets-config"
mkdir -p "$CONFIG_DIR"
# Create network configuration
log_info "Creating network configuration..."
cat > "$CONFIG_DIR/network-config.json" << 'EOF'
{
"chainId": 138,
"chainIdHex": "0x8a",
"chainName": "DeFi Oracle Meta Mainnet",
"currencySymbol": "ETH",
"currencyName": "Ether",
"decimals": 18,
"blockExplorerUrl": "https://explorer.d-bis.org",
"namespace": "eip155",
"rpcUrls": [
"https://rpc.d-bis.org",
"https://rpc2.d-bis.org"
],
"isTestnet": false,
"isMainnet": true
}
EOF
log_success "Created: $CONFIG_DIR/network-config.json"
# Create SDK configuration
log_info "Creating SDK configuration..."
cat > "$CONFIG_DIR/sdk-config.ts" << 'EOF'
// MetaMask Embedded Wallets SDK Configuration for ChainID 138
import { CHAIN_NAMESPACES } from '@web3auth/base';
export const CHAIN_138_CONFIG = {
chainNamespace: CHAIN_NAMESPACES.EIP155,
chainId: '0x8a', // 138 in hex
rpcTarget: 'https://rpc.d-bis.org',
displayName: 'DeFi Oracle Meta Mainnet',
blockExplorerUrl: 'https://explorer.d-bis.org',
ticker: 'ETH',
tickerName: 'Ether',
logo: 'https://explorer.d-bis.org/images/logo.png',
};
// Web3Auth Configuration
export const WEB3AUTH_CONFIG = {
clientId: process.env.WEB3AUTH_CLIENT_ID || 'YOUR_CLIENT_ID',
chainConfig: CHAIN_138_CONFIG,
web3AuthNetwork: 'mainnet', // or 'testnet' for development
uiConfig: {
appName: 'DeFi Oracle Meta Mainnet',
mode: 'auto', // 'light', 'dark', or 'auto'
primaryColor: '#667eea',
loginGridCol: 3,
primaryButtonColor: '#667eea',
},
};
EOF
log_success "Created: $CONFIG_DIR/sdk-config.ts"
# Create dashboard configuration guide
cat > "$CONFIG_DIR/DASHBOARD_CONFIGURATION.md" << 'EOF'
# MetaMask Embedded Wallets Dashboard Configuration
## Step-by-Step Configuration Guide
### 1. Create Project
1. Go to [MetaMask Developer Dashboard](https://dashboard.metamask.io)
2. Click "Create New Project"
3. Enter project name: "ChainID 138 Integration"
4. Select project type: "Embedded Wallets"
5. Click "Create"
### 2. Configure ChainID 138 Network
Navigate to **Configuration → Chains and Networks**:
1. Click "Add Custom Network"
2. Enter the following:
**Chain ID**: `138`
**Currency Symbol**: `ETH`
**Block Explorer URL**: `https://explorer.d-bis.org`
**Namespace**: `eip155`
**RPC URL**: `https://rpc.d-bis.org`
3. Click "Save"
4. Toggle network to "Enabled"
5. Mark as "Mainnet" (not testnet)
### 3. Configure Branding
Navigate to **Configuration → Customization → Branding**:
1. **Upload Logo**:
- Upload ChainID 138 network logo
- Recommended: 512x512px PNG
- Enable "Use logo as loader"
2. **Application Name**: "DeFi Oracle Meta Mainnet"
3. **Terms and Privacy**:
- Add Terms of Service URL (if available)
- Add Privacy Policy URL (if available)
4. **Default Language**: English
### 4. Configure Theme
Navigate to **Configuration → Customization → Theme and Colors**:
1. **Select Mode**: Auto (adapts to user preference)
2. **Primary Color**: `#667eea` (or your brand color)
3. **On Primary Color**: `#ffffff` (white text on primary)
### 5. Configure Login Modal
Navigate to **Configuration → Customization → Login Modal**:
1. **Design**:
- **Modal Appearance**: Modal Widget (pop-up)
- **Logo Alignment**: Center
- **Border Radius**: Medium
- **Border Radius Type**: Rounded
2. **Authentication Order**:
- Arrange login methods (drag and drop)
- Recommended order:
1. External Wallets (MetaMask, WalletConnect)
2. Social Logins (Google, Twitter)
3. Email/Phone
3. **External Wallets**:
- Enable "Show installed external wallets"
- Set number of wallets to display: 3-5
### 6. Add Token List
Navigate to **Configuration → Chains and Networks → ChainID 138**:
1. **Token List URL**: Add your hosted token list URL
- Example: `https://your-domain.com/token-list.json`
- Or: `https://ipfs.io/ipfs/YOUR_HASH`
2. **Verify Tokens**: Check that tokens appear correctly
### 7. Save and Publish
1. Review all configurations
2. Click "Save & Publish"
3. Changes take effect immediately
### 8. Get Client ID
1. Navigate to **Project Settings**
2. Copy your **Client ID**
3. Use in SDK configuration
---
## Configuration Values Summary
| Setting | Value |
|---------|-------|
| Chain ID | 138 (0x8a) |
| Chain Name | DeFi Oracle Meta Mainnet |
| Currency Symbol | ETH |
| RPC URL | https://rpc.d-bis.org |
| Block Explorer | https://explorer.d-bis.org |
| Namespace | eip155 |
| Network Type | Mainnet |
---
## Testing
After configuration:
1. **Test Network Addition**: Verify ChainID 138 appears in network list
2. **Test Connection**: Connect wallet using embedded wallet
3. **Test Token Display**: Verify tokens appear correctly
4. **Test Transactions**: Send test transaction
---
## Troubleshooting
### Network Not Appearing
- Verify Chain ID is correct (138)
- Check RPC URL is accessible
- Ensure network is enabled in dashboard
### Tokens Not Displaying
- Verify token list URL is accessible
- Check token list format is correct
- Ensure tokens have correct ChainID (138)
### Connection Issues
- Verify Client ID is correct
- Check SDK configuration matches dashboard
- Review browser console for errors
---
**Last Updated**: 2026-01-26
EOF
log_success "Created: $CONFIG_DIR/DASHBOARD_CONFIGURATION.md"
log_info ""
log_info "========================================="
log_info "Embedded Wallets Config Complete!"
log_info "========================================="
log_info ""
log_info "Files created in: $CONFIG_DIR"
log_info " - network-config.json (network config)"
log_info " - sdk-config.ts (SDK configuration)"
log_info " - DASHBOARD_CONFIGURATION.md (setup guide)"
log_info ""
log_info "Next steps:"
log_info "1. Review DASHBOARD_CONFIGURATION.md"
log_info "2. Configure dashboard with provided values"
log_info "3. Get Client ID from dashboard"
log_info "4. Integrate SDK in your dApp"
log_info ""

366
scripts/deploy-azure-gateway.sh Executable file
View File

@@ -0,0 +1,366 @@
#!/bin/bash
# Deploy Azure Application Gateway for ChainID 138 MetaMask Integration
# This script creates Terraform configuration and deployment instructions
set -e
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
# Colors
GREEN='\033[0;32m'
RED='\033[0;31m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m'
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
log_success() { echo -e "${GREEN}[✓]${NC} $1"; }
log_warn() { echo -e "${YELLOW}[WARN]${NC} $1"; }
log_error() { echo -e "${RED}[ERROR]${NC} $1"; }
log_info "========================================="
log_info "Azure Application Gateway Deployment"
log_info "========================================="
log_info ""
# Create deployment directory
GATEWAY_DIR="$PROJECT_ROOT/azure-gateway-deployment"
mkdir -p "$GATEWAY_DIR"
# Create Terraform configuration
log_info "Creating Terraform configuration..."
cat > "$GATEWAY_DIR/main.tf" << 'EOF'
# Azure Application Gateway for ChainID 138 MetaMask Integration
terraform {
required_providers {
azurerm = {
source = "hashicorp/azurerm"
version = "~> 3.0"
}
}
}
provider "azurerm" {
features {}
}
# Resource Group
resource "azurerm_resource_group" "main" {
name = "rg-chain138-metamask"
location = "East US"
}
# Public IP
resource "azurerm_public_ip" "gateway" {
name = "pip-chain138-gateway"
resource_group_name = azurerm_resource_group.main.name
location = azurerm_resource_group.main.location
allocation_method = "Static"
sku = "Standard"
}
# Application Gateway
resource "azurerm_application_gateway" "main" {
name = "agw-chain138"
resource_group_name = azurerm_resource_group.main.name
location = azurerm_resource_group.main.location
sku {
name = "Standard_v2"
tier = "Standard_v2"
capacity = 2
}
gateway_ip_configuration {
name = "gateway-ip-config"
subnet_id = azurerm_subnet.gateway.id
}
frontend_port {
name = "https"
port = 443
}
frontend_port {
name = "http"
port = 80
}
frontend_ip_configuration {
name = "public-ip"
public_ip_address_id = azurerm_public_ip.gateway.id
}
# Backend Pool for RPC
backend_address_pool {
name = "rpc-backend-pool"
ip_addresses = ["192.168.11.211"]
}
# Backend Pool for Explorer
backend_address_pool {
name = "explorer-backend-pool"
ip_addresses = ["<explorer-ip>"]
}
# HTTP Settings with CORS
backend_http_settings {
name = "rpc-http-settings"
cookie_based_affinity = "Disabled"
port = 8545
protocol = "Http"
request_timeout = 60
}
backend_http_settings {
name = "explorer-http-settings"
cookie_based_affinity = "Disabled"
port = 4000
protocol = "Http"
request_timeout = 60
}
# HTTP Listener for RPC
http_listener {
name = "rpc-https-listener"
frontend_ip_configuration_name = "public-ip"
frontend_port_name = "https"
protocol = "Https"
ssl_certificate_name = "ssl-certificate"
}
# Request Routing Rule for RPC
request_routing_rule {
name = "rpc-https-rule"
rule_type = "Basic"
http_listener_name = "rpc-https-listener"
backend_address_pool_name = "rpc-backend-pool"
backend_http_settings_name = "rpc-http-settings"
}
# Rewrite Rule Set for CORS
rewrite_rule_set {
name = "cors-headers"
rewrite_rule {
name = "add-cors-headers"
rule_sequence = 100
response_header_configuration {
header_name = "Access-Control-Allow-Origin"
header_value = "*"
}
response_header_configuration {
header_name = "Access-Control-Allow-Methods"
header_value = "GET, POST, OPTIONS"
}
response_header_configuration {
header_name = "Access-Control-Allow-Headers"
header_value = "Content-Type, Authorization"
}
response_header_configuration {
header_name = "Access-Control-Max-Age"
header_value = "3600"
}
}
}
# SSL Certificate (use Key Vault or upload)
ssl_certificate {
name = "ssl-certificate"
data = filebase64("ssl-certificate.pfx")
password = var.ssl_certificate_password
}
}
# Virtual Network
resource "azurerm_virtual_network" "main" {
name = "vnet-chain138"
address_space = ["10.0.0.0/16"]
location = azurerm_resource_group.main.location
resource_group_name = azurerm_resource_group.main.name
}
# Subnet for Gateway
resource "azurerm_subnet" "gateway" {
name = "subnet-gateway"
resource_group_name = azurerm_resource_group.main.name
virtual_network_name = azurerm_virtual_network.main.name
address_prefixes = ["10.0.1.0/24"]
}
variable "ssl_certificate_password" {
description = "Password for SSL certificate"
type = string
sensitive = true
}
EOF
log_success "Created: $GATEWAY_DIR/main.tf"
# Create deployment guide
cat > "$GATEWAY_DIR/DEPLOYMENT_GUIDE.md" << 'EOF'
# Azure Application Gateway Deployment Guide
## Overview
Azure Application Gateway provides load balancing, SSL termination, and CORS support for ChainID 138 MetaMask integration endpoints.
## Prerequisites
1. Azure subscription
2. Azure CLI installed
3. Terraform installed
4. SSL certificate (PFX format)
5. Resource group permissions
## Deployment Steps
### Step 1: Azure Login
```bash
az login
az account set --subscription "<subscription-id>"
```
### Step 2: Configure Terraform
1. **Set Variables**:
```bash
export TF_VAR_ssl_certificate_password="your-certificate-password"
```
2. **Initialize Terraform**:
```bash
terraform init
```
3. **Plan Deployment**:
```bash
terraform plan
```
4. **Apply Configuration**:
```bash
terraform apply
```
### Step 3: Configure DNS
1. Get Public IP from Terraform output
2. Create DNS A records pointing to Public IP:
- `rpc.d-bis.org` → Public IP
- `rpc2.d-bis.org` → Public IP
- `explorer.d-bis.org` → Public IP
### Step 4: Configure SSL Certificate
1. **Upload Certificate**:
- Convert certificate to PFX format
- Upload to Azure Key Vault (recommended)
- Or include in Terraform configuration
2. **Key Vault Integration** (Recommended):
```hcl
data "azurerm_key_vault_certificate" "ssl" {
name = "ssl-certificate"
key_vault_id = azurerm_key_vault.main.id
}
```
### Step 5: Verify Deployment
```bash
# Test RPC endpoint
curl -X POST https://rpc.d-bis.org \
-H "Content-Type: application/json" \
-d '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":1}'
# Test CORS headers
curl -I -X OPTIONS https://rpc.d-bis.org \
-H "Origin: https://metamask.io" \
-H "Access-Control-Request-Method: POST"
```
## Configuration Details
### CORS Headers
Application Gateway adds CORS headers via rewrite rules:
- `Access-Control-Allow-Origin: *`
- `Access-Control-Allow-Methods: GET, POST, OPTIONS`
- `Access-Control-Allow-Headers: Content-Type, Authorization`
- `Access-Control-Max-Age: 3600`
### Backend Pools
- **RPC Backend**: Points to `192.168.11.211:8545`
- **Explorer Backend**: Points to Blockscout instance
### SSL/TLS
- TLS 1.2 minimum
- TLS 1.3 enabled
- Strong cipher suites
- HSTS enabled
## Monitoring
### Azure Monitor
1. Set up alerts for:
- High error rates
- High latency
- Backend health issues
2. Monitor metrics:
- Request count
- Response time
- Failed requests
- Backend health
## Troubleshooting
### Gateway Not Responding
1. Check backend pool health
2. Check NSG rules
3. Check backend server status
4. Review gateway logs
### CORS Not Working
1. Verify rewrite rule set is applied
2. Check response headers
3. Test CORS preflight
4. Review gateway configuration
---
**Last Updated**: 2026-01-26
EOF
log_success "Created: $GATEWAY_DIR/DEPLOYMENT_GUIDE.md"
log_info ""
log_info "========================================="
log_info "Azure Gateway Config Complete!"
log_info "========================================="
log_info ""
log_info "Files created in: $GATEWAY_DIR"
log_info " - main.tf (Terraform configuration)"
log_info " - DEPLOYMENT_GUIDE.md (deployment guide)"
log_info ""
log_info "Next steps:"
log_info "1. Configure Azure credentials"
log_info "2. Prepare SSL certificate"
log_info "3. Run terraform apply"
log_info "4. Configure DNS"
log_info "5. Test endpoints"
log_info ""

382
scripts/deploy-blockscout.sh Executable file
View File

@@ -0,0 +1,382 @@
#!/bin/bash
# Deploy Blockscout Explorer for ChainID 138 with MetaMask integration
# This script creates deployment configuration and setup instructions
set -e
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
# Colors
GREEN='\033[0;32m'
RED='\033[0;31m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m'
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
log_success() { echo -e "${GREEN}[✓]${NC} $1"; }
log_warn() { echo -e "${YELLOW}[WARN]${NC} $1"; }
log_error() { echo -e "${RED}[ERROR]${NC} $1"; }
log_info "========================================="
log_info "Blockscout Deployment Configuration"
log_info "========================================="
log_info ""
# Create deployment directory
DEPLOY_DIR="$PROJECT_ROOT/blockscout-deployment"
mkdir -p "$DEPLOY_DIR"
# Create Docker Compose configuration
log_info "Creating Docker Compose configuration..."
cat > "$DEPLOY_DIR/docker-compose.yml" << 'EOF'
version: "3.8"
services:
blockscout-db:
image: postgres:15
container_name: blockscout-db
environment:
- POSTGRES_USER=blockscout
- POSTGRES_PASSWORD=${POSTGRES_PASSWORD:-blockscout}
- POSTGRES_DB=blockscout
volumes:
- blockscout-db-data:/var/lib/postgresql/data
networks:
- blockscout-network
restart: unless-stopped
healthcheck:
test: ["CMD-SHELL", "pg_isready -U blockscout"]
interval: 10s
timeout: 5s
retries: 5
blockscout:
image: blockscout/blockscout:latest
container_name: blockscout
depends_on:
blockscout-db:
condition: service_healthy
environment:
# Database
- DATABASE_URL=postgresql://blockscout:${POSTGRES_PASSWORD:-blockscout}@blockscout-db:5432/blockscout
# Network
- ETHEREUM_JSONRPC_HTTP_URL=http://192.168.11.211:8545
- ETHEREUM_JSONRPC_WS_URL=ws://192.168.11.211:8546
- ETHEREUM_JSONRPC_TRACE_URL=http://192.168.11.211:8545
# Chain Configuration
- COIN=ETH
- NETWORK=DeFi Oracle Meta Mainnet
- SUBNETWORK=Mainnet
- BLOCK_TRANSFORMER=base
- CHAIN_ID=138
# Features
- SHOW_ADDRESS_MARKETCAP_PERCENTAGE=true
- ENABLE_ACCOUNT_BALANCE_CACHE=true
- ENABLE_EXCHANGE_RATES=true
- EXCHANGE_RATES_COINGECKO_COIN_ID=ethereum
- ENABLE_SOURCIFY_INTEGRATION=true
- SOURCIFY_SERVER_URL=https://sourcify.dev/server
- ENABLE_TXS_STATS=true
- TXS_STATS_DAYS_TO_COMPILE_AT_INIT=1
# MetaMask Portfolio CORS Configuration
- ENABLE_CORS=true
- CORS_ALLOWED_ORIGINS=https://portfolio.metamask.io,https://metamask.io,https://chainlist.org,https://explorer.d-bis.org
- CORS_ALLOWED_METHODS=GET,POST,OPTIONS
- CORS_ALLOWED_HEADERS=Content-Type,Authorization,Accept
- CORS_MAX_AGE=3600
# Token Metadata API
- ENABLE_TOKEN_METADATA_API=true
- TOKEN_METADATA_CACHE_ENABLED=true
- TOKEN_METADATA_CACHE_TTL=3600
# Logo Serving
- ENABLE_TOKEN_LOGO_SERVING=true
- TOKEN_LOGO_BASE_URL=https://explorer.d-bis.org/images/tokens
# API Rate Limiting
- API_RATE_LIMIT_ENABLED=true
- API_RATE_LIMIT_PER_MINUTE=120
# Security
- SECRET_KEY_BASE=${SECRET_KEY_BASE:-change-me-in-production-use-openssl-rand-hex-32}
ports:
- "4000:4000"
networks:
- blockscout-network
restart: unless-stopped
volumes:
- blockscout-logs:/var/log/blockscout
- blockscout-static:/var/www/blockscout/priv/static
volumes:
blockscout-db-data:
blockscout-logs:
blockscout-static:
networks:
blockscout-network:
driver: bridge
EOF
log_success "Created: $DEPLOY_DIR/docker-compose.yml"
# Create Kubernetes deployment
log_info "Creating Kubernetes deployment configuration..."
cat > "$DEPLOY_DIR/blockscout-deployment.yaml" << 'EOF'
apiVersion: v1
kind: Namespace
metadata:
name: blockscout
---
apiVersion: v1
kind: ConfigMap
metadata:
name: blockscout-config
namespace: blockscout
data:
# Database
DATABASE_URL: "postgresql://blockscout:blockscout@blockscout-db:5432/blockscout"
# Network
ETHEREUM_JSONRPC_HTTP_URL: "http://192.168.11.211:8545"
ETHEREUM_JSONRPC_WS_URL: "ws://192.168.11.211:8546"
ETHEREUM_JSONRPC_TRACE_URL: "http://192.168.11.211:8545"
# Chain Configuration
COIN: "ETH"
NETWORK: "DeFi Oracle Meta Mainnet"
SUBNETWORK: "Mainnet"
BLOCK_TRANSFORMER: "base"
CHAIN_ID: "138"
# MetaMask Portfolio CORS
ENABLE_CORS: "true"
CORS_ALLOWED_ORIGINS: "https://portfolio.metamask.io,https://metamask.io,https://chainlist.org,https://explorer.d-bis.org"
CORS_ALLOWED_METHODS: "GET,POST,OPTIONS"
CORS_ALLOWED_HEADERS: "Content-Type,Authorization,Accept"
CORS_MAX_AGE: "3600"
# Token Metadata API
ENABLE_TOKEN_METADATA_API: "true"
TOKEN_METADATA_CACHE_ENABLED: "true"
TOKEN_METADATA_CACHE_TTL: "3600"
# Logo Serving
ENABLE_TOKEN_LOGO_SERVING: "true"
TOKEN_LOGO_BASE_URL: "https://explorer.d-bis.org/images/tokens"
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: blockscout
namespace: blockscout
spec:
replicas: 1
selector:
matchLabels:
app: blockscout
template:
metadata:
labels:
app: blockscout
spec:
containers:
- name: blockscout
image: blockscout/blockscout:latest
ports:
- containerPort: 4000
envFrom:
- configMapRef:
name: blockscout-config
env:
- name: SECRET_KEY_BASE
valueFrom:
secretKeyRef:
name: blockscout-secrets
key: secret-key-base
volumeMounts:
- name: blockscout-static
mountPath: /var/www/blockscout/priv/static
volumes:
- name: blockscout-static
persistentVolumeClaim:
claimName: blockscout-static-pvc
---
apiVersion: v1
kind: Service
metadata:
name: blockscout
namespace: blockscout
spec:
selector:
app: blockscout
ports:
- port: 80
targetPort: 4000
type: LoadBalancer
EOF
log_success "Created: $DEPLOY_DIR/blockscout-deployment.yaml"
# Create deployment checklist
cat > "$DEPLOY_DIR/DEPLOYMENT_CHECKLIST.md" << 'EOF'
# Blockscout Deployment Checklist
## Pre-Deployment
- [ ] Server/Cluster is provisioned
- [ ] Docker/Kubernetes is installed
- [ ] Database is ready
- [ ] RPC endpoints are accessible
- [ ] DNS is configured
- [ ] SSL certificates are ready
## Deployment Steps
### Docker Compose Deployment
1. **Prepare Environment**:
```bash
cd blockscout-deployment
cp .env.example .env
# Edit .env with your values
```
2. **Generate Secret Key**:
```bash
SECRET_KEY_BASE=$(openssl rand -hex 32)
echo "SECRET_KEY_BASE=$SECRET_KEY_BASE" >> .env
```
3. **Start Services**:
```bash
docker-compose up -d
```
4. **Verify Deployment**:
```bash
docker-compose ps
docker-compose logs blockscout
```
5. **Access Blockscout**:
- URL: http://localhost:4000
- Or via nginx reverse proxy
### Kubernetes Deployment
1. **Create Namespace**:
```bash
kubectl apply -f blockscout-deployment.yaml
```
2. **Create Secrets**:
```bash
kubectl create secret generic blockscout-secrets \
--from-literal=secret-key-base=$(openssl rand -hex 32) \
-n blockscout
```
3. **Verify Deployment**:
```bash
kubectl get pods -n blockscout
kubectl get services -n blockscout
```
4. **Check Logs**:
```bash
kubectl logs -f deployment/blockscout -n blockscout
```
## Post-Deployment
- [ ] Blockscout is accessible
- [ ] CORS headers are configured
- [ ] Token metadata API works
- [ ] Logo serving works
- [ ] Explorer shows transactions
- [ ] API endpoints are accessible
- [ ] Portfolio integration tested
## Verification
### Test Blockscout
```bash
# Test Blockscout is running
curl http://localhost:4000/api/v2/health
# Test CORS headers
curl -I -X OPTIONS http://localhost:4000/api/v2/tokens/0x... \
-H "Origin: https://portfolio.metamask.io" \
-H "Access-Control-Request-Method: GET"
# Test token metadata API
curl http://localhost:4000/api/v2/tokens/0x93E66202A11B1772E55407B32B44e5Cd8eda7f22
```
### Expected Results
- ✅ Blockscout is accessible
- ✅ CORS headers are present
- ✅ Token metadata API returns data
- ✅ Logo URLs are accessible
- ✅ Transactions are visible
## Troubleshooting
### Blockscout Not Starting
1. Check database connection
2. Check RPC endpoint accessibility
3. Check logs: `docker-compose logs blockscout`
4. Verify environment variables
5. Check resource limits
### CORS Not Working
1. Verify CORS environment variables
2. Check nginx configuration (if using reverse proxy)
3. Test CORS headers
4. Verify allowed origins
### API Not Working
1. Check API endpoints are enabled
2. Verify database is populated
3. Check API logs
4. Test API endpoints directly
---
**Last Updated**: 2026-01-26
EOF
log_success "Created: $DEPLOY_DIR/DEPLOYMENT_CHECKLIST.md"
log_info ""
log_info "========================================="
log_info "Blockscout Deployment Config Complete!"
log_info "========================================="
log_info ""
log_info "Files created in: $DEPLOY_DIR"
log_info " - docker-compose.yml (Docker deployment)"
log_info " - blockscout-deployment.yaml (Kubernetes deployment)"
log_info " - DEPLOYMENT_CHECKLIST.md (deployment guide)"
log_info ""
log_info "Next steps:"
log_info "1. Review deployment files"
log_info "2. Configure environment variables"
log_info "3. Deploy Blockscout"
log_info "4. Verify CORS configuration"
log_info "5. Test Portfolio integration"
log_info ""

334
scripts/deploy-rpc-endpoints.sh Executable file
View File

@@ -0,0 +1,334 @@
#!/bin/bash
# Deploy Production RPC Endpoints for ChainID 138
# This script helps configure and deploy RPC endpoints
set -e
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
# Colors
GREEN='\033[0;32m'
RED='\033[0;31m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m'
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
log_success() { echo -e "${GREEN}[✓]${NC} $1"; }
log_warn() { echo -e "${YELLOW}[WARN]${NC} $1"; }
log_error() { echo -e "${RED}[ERROR]${NC} $1"; }
log_info "========================================="
log_info "RPC Endpoint Deployment Guide"
log_info "========================================="
log_info ""
# RPC Configuration
PRIMARY_RPC="https://rpc.d-bis.org"
SECONDARY_RPC="https://rpc2.d-bis.org"
INTERNAL_RPC="http://192.168.11.211:8545"
log_info "RPC Endpoint Configuration:"
log_info " Primary: $PRIMARY_RPC"
log_info " Secondary: $SECONDARY_RPC"
log_info " Internal: $INTERNAL_RPC"
log_info ""
# Create deployment directory
DEPLOY_DIR="$PROJECT_ROOT/rpc-deployment"
mkdir -p "$DEPLOY_DIR"
# Create nginx configuration for RPC
log_info "Creating nginx configuration..."
cat > "$DEPLOY_DIR/nginx-rpc.conf" << 'EOF'
# Nginx configuration for RPC endpoints
# This config provides HTTPS, CORS, and rate limiting for RPC endpoints
upstream besu_rpc {
server 192.168.11.211:8545;
keepalive 32;
}
server {
listen 443 ssl http2;
server_name rpc.d-bis.org;
# SSL Configuration
ssl_certificate /etc/ssl/certs/d-bis.org.crt;
ssl_certificate_key /etc/ssl/private/d-bis.org.key;
ssl_protocols TLSv1.2 TLSv1.3;
ssl_ciphers HIGH:!aNULL:!MD5;
# CORS Headers for MetaMask
add_header Access-Control-Allow-Origin * always;
add_header Access-Control-Allow-Methods "GET, POST, OPTIONS" always;
add_header Access-Control-Allow-Headers "Content-Type, Authorization" always;
add_header Access-Control-Max-Age 3600 always;
# Handle OPTIONS requests
if ($request_method = OPTIONS) {
return 204;
}
# Rate Limiting
limit_req_zone $binary_remote_addr zone=rpc_limit:10m rate=10r/s;
limit_req zone=rpc_limit burst=20 nodelay;
# RPC Endpoint
location / {
proxy_pass http://besu_rpc;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
# Timeouts
proxy_connect_timeout 60s;
proxy_send_timeout 60s;
proxy_read_timeout 60s;
}
# Health Check
location /health {
access_log off;
return 200 "healthy\n";
add_header Content-Type text/plain;
}
}
# Secondary RPC endpoint
server {
listen 443 ssl http2;
server_name rpc2.d-bis.org;
# SSL Configuration (same as primary)
ssl_certificate /etc/ssl/certs/d-bis.org.crt;
ssl_certificate_key /etc/ssl/private/d-bis.org.key;
ssl_protocols TLSv1.2 TLSv1.3;
# CORS Headers
add_header Access-Control-Allow-Origin * always;
add_header Access-Control-Allow-Methods "GET, POST, OPTIONS" always;
add_header Access-Control-Allow-Headers "Content-Type, Authorization" always;
# Rate Limiting
limit_req_zone $binary_remote_addr zone=rpc_limit:10m rate=10r/s;
limit_req zone=rpc_limit burst=20 nodelay;
location / {
proxy_pass http://besu_rpc;
proxy_http_version 1.1;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
}
}
# HTTP to HTTPS redirect
server {
listen 80;
server_name rpc.d-bis.org rpc2.d-bis.org;
return 301 https://$server_name$request_uri;
}
EOF
log_success "Created: $DEPLOY_DIR/nginx-rpc.conf"
# Create Cloudflare configuration
log_info "Creating Cloudflare configuration..."
cat > "$DEPLOY_DIR/cloudflare-dns-config.md" << 'EOF'
# Cloudflare DNS Configuration for RPC Endpoints
## DNS Records Required
### Primary RPC Endpoint (rpc.d-bis.org)
**A Record**:
- Name: `rpc`
- Type: `A`
- Content: `<server-ip-address>`
- TTL: `Auto` or `300`
- Proxy: `Proxied` (for DDoS protection)
**AAAA Record** (if IPv6 available):
- Name: `rpc`
- Type: `AAAA`
- Content: `<server-ipv6-address>`
- TTL: `Auto` or `300`
- Proxy: `Proxied`
### Secondary RPC Endpoint (rpc2.d-bis.org)
**A Record**:
- Name: `rpc2`
- Type: `A`
- Content: `<server-ip-address>`
- TTL: `Auto` or `300`
- Proxy: `Proxied`
**AAAA Record** (if IPv6 available):
- Name: `rpc2`
- Type: `AAAA`
- Content: `<server-ipv6-address>`
- TTL: `Auto` or `300`
- Proxy: `Proxied`
## SSL/TLS Configuration
1. **Enable SSL/TLS**:
- Go to Cloudflare Dashboard → SSL/TLS
- Set encryption mode to "Full" or "Full (strict)"
- Enable "Always Use HTTPS"
2. **SSL Certificate**:
- Cloudflare provides free SSL certificates
- Automatic certificate provisioning
- Certificate auto-renewal
3. **Minimum TLS Version**:
- Set to TLS 1.2 minimum
- Recommended: TLS 1.3
## Page Rules
Create page rules for optimal performance:
1. **Cache Level**: Standard
2. **Browser Cache TTL**: 4 hours
3. **Edge Cache TTL**: 2 hours
## Security Settings
1. **Security Level**: Medium
2. **Challenge Passage**: 30 minutes
3. **Browser Integrity Check**: On
4. **Privacy Pass Support**: On
## Rate Limiting
Configure rate limiting rules:
- Rate: 10 requests per second per IP
- Burst: 20 requests
- Action: Challenge or Block
## Monitoring
Set up Cloudflare Analytics:
- Monitor RPC endpoint traffic
- Track error rates
- Monitor response times
EOF
log_success "Created: $DEPLOY_DIR/cloudflare-dns-config.md"
# Create deployment checklist
cat > "$DEPLOY_DIR/DEPLOYMENT_CHECKLIST.md" << 'EOF'
# RPC Endpoint Deployment Checklist
## Pre-Deployment
- [ ] Server is provisioned and accessible
- [ ] Nginx is installed and configured
- [ ] SSL certificates are obtained
- [ ] DNS records are configured
- [ ] Firewall rules are configured
- [ ] Monitoring is set up
## Deployment Steps
1. **Configure Nginx**:
- [ ] Copy nginx-rpc.conf to /etc/nginx/sites-available/
- [ ] Create symlink to sites-enabled
- [ ] Test configuration: `nginx -t`
- [ ] Reload nginx: `systemctl reload nginx`
2. **Configure SSL**:
- [ ] Install SSL certificates
- [ ] Verify certificate validity
- [ ] Test HTTPS connection
- [ ] Verify certificate auto-renewal
3. **Configure DNS**:
- [ ] Add A records for rpc.d-bis.org
- [ ] Add A records for rpc2.d-bis.org
- [ ] Verify DNS propagation
- [ ] Test DNS resolution
4. **Configure Cloudflare**:
- [ ] Add domain to Cloudflare
- [ ] Update nameservers
- [ ] Configure SSL/TLS
- [ ] Enable proxy
- [ ] Configure page rules
5. **Test Endpoints**:
- [ ] Test primary RPC: `curl https://rpc.d-bis.org`
- [ ] Test secondary RPC: `curl https://rpc2.d-bis.org`
- [ ] Test CORS headers
- [ ] Test rate limiting
- [ ] Test from MetaMask
6. **Monitor**:
- [ ] Set up monitoring alerts
- [ ] Configure logging
- [ ] Test health checks
- [ ] Monitor performance
## Post-Deployment
- [ ] Update MetaMask network config with new RPC URLs
- [ ] Update token lists with new RPC URLs
- [ ] Test MetaMask connection
- [ ] Document RPC endpoints
- [ ] Announce RPC endpoints
## Verification
Test RPC endpoints:
```bash
# Test primary RPC
curl -X POST https://rpc.d-bis.org \
-H "Content-Type: application/json" \
-d '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":1}'
# Test secondary RPC
curl -X POST https://rpc2.d-bis.org \
-H "Content-Type: application/json" \
-d '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":1}'
# Test CORS
curl -I -X OPTIONS https://rpc.d-bis.org \
-H "Origin: https://metamask.io" \
-H "Access-Control-Request-Method: POST"
```
Expected CORS headers:
- `Access-Control-Allow-Origin: *`
- `Access-Control-Allow-Methods: GET, POST, OPTIONS`
- `Access-Control-Allow-Headers: Content-Type, Authorization`
EOF
log_success "Created: $DEPLOY_DIR/DEPLOYMENT_CHECKLIST.md"
log_info ""
log_info "========================================="
log_info "RPC Deployment Guide Complete!"
log_info "========================================="
log_info ""
log_info "Files created in: $DEPLOY_DIR"
log_info " - nginx-rpc.conf (nginx configuration)"
log_info " - cloudflare-dns-config.md (DNS setup)"
log_info " - DEPLOYMENT_CHECKLIST.md (deployment steps)"
log_info ""
log_info "Next steps:"
log_info "1. Review deployment files"
log_info "2. Follow DEPLOYMENT_CHECKLIST.md"
log_info "3. Deploy RPC endpoints"
log_info ""

View File

@@ -0,0 +1,181 @@
#!/bin/bash
# Complete Smart Accounts Deployment Script
# Orchestrates all deployment steps
set -e
# Colors
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
# Logging functions
log_info() {
echo -e "${BLUE}[INFO]${NC} $1"
}
log_success() {
echo -e "${GREEN}[SUCCESS]${NC} $1"
}
log_warning() {
echo -e "${YELLOW}[WARNING]${NC} $1"
}
log_error() {
echo -e "${RED}[ERROR]${NC} $1"
}
# Get script directory
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
SMOM_DIR="$(cd "$PROJECT_ROOT/../smom-dbis-138" && pwd)"
log_info "Smart Accounts Complete Deployment Script"
log_info "=========================================="
# Check prerequisites
log_info "Checking prerequisites..."
# Check Foundry
if ! command -v forge &> /dev/null; then
log_error "Foundry not found. Please install Foundry first."
exit 1
fi
# Check Node.js
if ! command -v node &> /dev/null; then
log_error "Node.js not found. Please install Node.js v18+ first."
exit 1
fi
# Check .env file
if [ ! -f "$SMOM_DIR/.env" ]; then
log_error ".env file not found in $SMOM_DIR"
exit 1
fi
# Load environment variables
source "$SMOM_DIR/.env"
if [ -z "$RPC_URL_138" ]; then
log_error "RPC_URL_138 not set in .env"
exit 1
fi
if [ -z "$PRIVATE_KEY" ]; then
log_error "PRIVATE_KEY not set in .env"
exit 1
fi
log_success "Prerequisites check passed"
# Phase 1: Install SDK
log_info "Phase 1: Installing Smart Accounts SDK..."
cd "$PROJECT_ROOT"
if [ -f "scripts/install-smart-accounts-sdk.sh" ]; then
bash scripts/install-smart-accounts-sdk.sh
log_success "SDK installation complete"
else
log_warning "SDK installation script not found, skipping..."
fi
# Phase 2: Deploy Smart Accounts Kit Contracts
log_info "Phase 2: Deploying Smart Accounts Kit contracts..."
cd "$SMOM_DIR"
log_info "Deploying EntryPoint and AccountFactory..."
if [ -f "script/smart-accounts/DeploySmartAccountsKit.s.sol" ]; then
forge script script/smart-accounts/DeploySmartAccountsKit.s.sol \
--rpc-url "$RPC_URL_138" \
--broadcast \
--verify \
-vvv
log_success "Smart Accounts Kit contracts deployed"
# Extract addresses from output (user will need to update config)
log_warning "Please record the deployed contract addresses and update config/smart-accounts-config.json"
else
log_error "Deployment script not found: script/smart-accounts/DeploySmartAccountsKit.s.sol"
exit 1
fi
# Phase 3: Update Configuration
log_info "Phase 3: Updating configuration..."
cd "$PROJECT_ROOT"
if [ -f "scripts/update-smart-accounts-config.sh" ]; then
log_info "Run the following command to update configuration:"
log_info " ./scripts/update-smart-accounts-config.sh --interactive"
log_warning "Configuration update requires manual input of contract addresses"
else
log_warning "Configuration update script not found"
fi
# Phase 4: Deploy AccountWalletRegistryExtended
log_info "Phase 4: Deploying AccountWalletRegistryExtended..."
cd "$SMOM_DIR"
if [ -f "script/smart-accounts/DeployAccountWalletRegistryExtended.s.sol" ]; then
# Check if addresses are set
if [ -z "$SMART_ACCOUNT_FACTORY" ] || [ -z "$ENTRY_POINT" ]; then
log_warning "SMART_ACCOUNT_FACTORY or ENTRY_POINT not set in .env"
log_warning "Please set these after deploying Smart Accounts Kit contracts"
log_warning "Then run: forge script script/smart-accounts/DeployAccountWalletRegistryExtended.s.sol --rpc-url \$RPC_URL_138 --broadcast"
else
forge script script/smart-accounts/DeployAccountWalletRegistryExtended.s.sol \
--rpc-url "$RPC_URL_138" \
--broadcast \
--verify \
-vvv
log_success "AccountWalletRegistryExtended deployed"
fi
else
log_error "Deployment script not found: script/smart-accounts/DeployAccountWalletRegistryExtended.s.sol"
exit 1
fi
# Phase 5: Setup Monitoring
log_info "Phase 5: Setting up monitoring..."
cd "$PROJECT_ROOT"
if [ -f "scripts/setup-monitoring.sh" ]; then
bash scripts/setup-monitoring.sh
log_success "Monitoring setup complete"
else
log_warning "Monitoring setup script not found, skipping..."
fi
# Phase 6: Run Tests
log_info "Phase 6: Running tests..."
cd "$SMOM_DIR"
log_info "Running unit tests..."
if forge test --match-path "test/smart-accounts/**" -vv; then
log_success "Unit tests passed"
else
log_warning "Some unit tests failed (this may be expected if contracts not deployed)"
fi
cd "$PROJECT_ROOT"
log_info "Running integration tests..."
if [ -f "package.json" ] && npm test 2>/dev/null; then
log_success "Integration tests passed"
else
log_warning "Integration tests skipped (may require deployed contracts)"
fi
# Summary
log_info "=========================================="
log_success "Deployment script completed!"
log_info "Next steps:"
log_info "1. Update config/smart-accounts-config.json with deployed addresses"
log_info "2. Run verification script: ./scripts/verify-deployment.sh"
log_info "3. Review deployment checklist: DEPLOYMENT_CHECKLIST.md"
log_info "4. Setup monitoring and alerts"
log_info "5. Perform security audit before production use"

View File

@@ -0,0 +1,366 @@
#!/bin/bash
# Deploy MetaMask Smart Accounts Kit for ChainID 138
# This script prepares deployment configuration for Smart Accounts Kit
set -e
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
# Colors
GREEN='\033[0;32m'
RED='\033[0;31m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m'
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
log_success() { echo -e "${GREEN}[✓]${NC} $1"; }
log_warn() { echo -e "${YELLOW}[WARN]${NC} $1"; }
log_error() { echo -e "${RED}[ERROR]${NC} $1"; }
log_info "========================================="
log_info "MetaMask Smart Accounts Kit Deployment"
log_info "========================================="
log_info ""
# Create deployment directory
DEPLOY_DIR="$PROJECT_ROOT/smart-accounts-kit-deployment"
mkdir -p "$DEPLOY_DIR"
# Create deployment guide
log_info "Creating deployment guide..."
cat > "$DEPLOY_DIR/DEPLOYMENT_GUIDE.md" << 'EOF'
# MetaMask Smart Accounts Kit Deployment Guide
**Reference**: [MetaMask Smart Accounts Kit Documentation](https://docs.metamask.io/smart-accounts-kit#partner-integrations)
---
## Overview
MetaMask Smart Accounts Kit enables:
- Programmable account behavior
- Delegation framework
- Advanced Permissions (ERC-7715)
- User operation batching
- Gas abstraction
---
## Installation
### NPM Installation
```bash
npm install @metamask/smart-accounts-kit
```
### Yarn Installation
```bash
yarn add @metamask/smart-accounts-kit
```
### PNPM Installation
```bash
pnpm add @metamask/smart-accounts-kit
```
---
## Configuration
### ChainID 138 Configuration
```typescript
import { SmartAccountsKit } from '@metamask/smart-accounts-kit';
const smartAccountsKit = new SmartAccountsKit({
chainId: 138,
rpcUrl: 'https://rpc.d-bis.org',
entryPointAddress: '0x...', // EntryPoint contract address
accountFactoryAddress: '0x...', // AccountFactory contract address
});
```
---
## Deployment Steps
### Step 1: Deploy EntryPoint Contract
The EntryPoint contract handles user operations.
```bash
# Deploy EntryPoint
forge script script/DeployEntryPoint.s.sol --rpc-url $RPC_URL_138
```
### Step 2: Deploy AccountFactory Contract
The AccountFactory creates smart accounts.
```bash
# Deploy AccountFactory
forge script script/DeployAccountFactory.s.sol --rpc-url $RPC_URL_138
```
### Step 3: Deploy Paymaster Contract (Optional)
For gas abstraction, deploy a Paymaster contract.
```bash
# Deploy Paymaster
forge script script/DeployPaymaster.s.sol --rpc-url $RPC_URL_138
```
### Step 4: Configure SDK
```typescript
import { SmartAccountsKit } from '@metamask/smart-accounts-kit';
const kit = new SmartAccountsKit({
chainId: 138,
rpcUrl: 'https://rpc.d-bis.org',
entryPointAddress: '0x...',
accountFactoryAddress: '0x...',
paymasterAddress: '0x...', // Optional
});
```
---
## Integration with AccountWalletRegistry
### Extend AccountWalletRegistry
Add smart account support to existing AccountWalletRegistry:
```solidity
// Add to AccountWalletRegistry
function linkSmartAccountToWallet(
bytes32 accountRefId,
address smartAccount,
bytes32 provider
) external onlyRole(ACCOUNT_MANAGER_ROLE) {
bytes32 walletRefId = keccak256(abi.encodePacked(smartAccount));
linkAccountToWallet(accountRefId, walletRefId, provider);
}
```
---
## Features
### 1. Create Smart Account
```typescript
const smartAccount = await kit.createAccount({
owner: userAddress,
salt: '0x...', // Optional
});
```
### 2. Request Delegation
```typescript
const delegation = await kit.requestDelegation({
target: dAppAddress,
permissions: ['execute_transactions'],
expiry: Date.now() + 86400000,
});
```
### 3. Advanced Permissions (ERC-7715)
```typescript
const permission = await kit.requestAdvancedPermission({
target: dAppAddress,
functionSelector: '0x...',
allowed: true,
});
```
### 4. Batch User Operations
```typescript
const userOps = await kit.batchUserOperations([
{ to: tokenAddress, data: transferData },
{ to: anotherAddress, data: anotherData },
]);
```
---
## Testing
### Test Smart Account Creation
```typescript
const account = await kit.createAccount({ owner: userAddress });
console.log('Smart Account:', account.address);
```
### Test Delegation
```typescript
const delegation = await kit.requestDelegation({
target: dAppAddress,
permissions: ['execute_transactions'],
});
console.log('Delegation approved:', delegation.approved);
```
---
## Next Steps
1. Deploy contracts to ChainID 138
2. Configure SDK
3. Integrate with AccountWalletRegistry
4. Test all features
5. Deploy to production
---
**Last Updated**: 2026-01-26
EOF
log_success "Created: $DEPLOY_DIR/DEPLOYMENT_GUIDE.md"
# Create integration guide
cat > "$DEPLOY_DIR/ACCOUNT_WALLET_INTEGRATION.md" << 'EOF'
# Smart Accounts Kit + AccountWalletRegistry Integration
## Overview
Integrate MetaMask Smart Accounts Kit with existing AccountWalletRegistry to enable:
- Smart accounts linked to fiat accounts
- Delegation for payment rails
- Advanced permissions for dApps
- Enhanced user experience
## Integration Architecture
```
┌─────────────────────┐
│ Fiat Account │
│ (IBAN/ABA) │
└──────────┬──────────┘
┌─────────────────────┐
│ AccountWalletRegistry│
│ (Existing) │
└──────────┬──────────┘
├──► EOA Wallet (MetaMask)
└──► Smart Account (New)
├──► Delegation Framework
├──► Advanced Permissions
└──► User Operations
```
## Implementation
### 1. Extend AccountWalletRegistry
Add smart account support:
```solidity
// Add to AccountWalletRegistry.sol
function linkSmartAccount(
bytes32 accountRefId,
address smartAccount,
bytes32 provider
) external onlyRole(ACCOUNT_MANAGER_ROLE) {
bytes32 walletRefId = keccak256(abi.encodePacked(smartAccount));
linkAccountToWallet(accountRefId, walletRefId, provider);
}
function isSmartAccount(bytes32 walletRefId) external view returns (bool) {
// Check if wallet is a smart account
// Implementation depends on smart account detection
}
```
### 2. Create Smart Account on Link
```typescript
// When linking account to wallet, create smart account if needed
async function linkAccountWithSmartAccount(
accountRefId: string,
userAddress: string
) {
// Create smart account
const smartAccount = await smartAccountsKit.createAccount({
owner: userAddress,
});
// Link to AccountWalletRegistry
await accountWalletRegistry.linkSmartAccount(
accountRefId,
smartAccount.address,
'METAMASK_SMART_ACCOUNT'
);
}
```
### 3. Use Smart Account for Payments
```typescript
// Use smart account for payment rail operations
async function initiatePayment(
accountRefId: string,
amount: bigint,
token: string
) {
// Get smart account from registry
const wallets = await accountWalletRegistry.getWallets(accountRefId);
const smartAccount = wallets.find(w => w.provider === 'METAMASK_SMART_ACCOUNT');
// Use smart account for settlement
await settlementOrchestrator.validateAndLock(triggerId, {
account: smartAccount.address,
amount,
token,
});
}
```
---
## Benefits
1. **Enhanced Capabilities**: Smart accounts enable delegation and permissions
2. **Better UX**: Gas abstraction and batch operations
3. **Compliance**: Maintain compliance with smart accounts
4. **Flexibility**: Support both EOA and smart accounts
---
**Last Updated**: 2026-01-26
EOF
log_success "Created: $DEPLOY_DIR/ACCOUNT_WALLET_INTEGRATION.md"
log_info ""
log_info "========================================="
log_info "Smart Accounts Kit Config Complete!"
log_info "========================================="
log_info ""
log_info "Files created in: $DEPLOY_DIR"
log_info " - DEPLOYMENT_GUIDE.md (deployment guide)"
log_info " - ACCOUNT_WALLET_INTEGRATION.md (integration guide)"
log_info ""
log_info "Next steps:"
log_info "1. Review deployment guide"
log_info "2. Deploy Smart Accounts Kit contracts"
log_info "3. Integrate with AccountWalletRegistry"
log_info "4. Test smart account features"
log_info ""

309
scripts/deploy-to-explorer.sh Executable file
View File

@@ -0,0 +1,309 @@
#!/usr/bin/env bash
# Deploy all MetaMask integration changes to explorer.d-bis.org (VMID 5000)
# Phases: 1) Backend API, 2) Frontend, 3) Verify, 4) Optional enhancements
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
REPO_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
# VMID 5000 connection
VMID=5000
VMID_IP="192.168.11.140"
PROXMOX_HOST="192.168.11.12" # r630-02
PROXMOX_USER="${PROXMOX_USER:-root}"
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m'
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
log_ok() { echo -e "${GREEN}[OK]${NC} $1"; }
log_warn() { echo -e "${YELLOW}[WARN]${NC} $1"; }
log_fail() { echo -e "${RED}[FAIL]${NC} $1"; }
# Check SSH access
check_access() {
log_info "Checking access to VMID $VMID..."
if ! ssh -o ConnectTimeout=5 -o BatchMode=yes "$PROXMOX_USER@$PROXMOX_HOST" "pct status $VMID" &>/dev/null; then
log_fail "Cannot access VMID $VMID via $PROXMOX_HOST. Check SSH keys and network."
exit 1
fi
log_ok "Access confirmed"
}
# Phase 1: Deploy backend API with config routes
deploy_backend_api() {
log_info "========================================="
log_info "PHASE 1: Deploy backend API (config routes)"
log_info "========================================="
# Build Go API
log_info "Building Go API..."
(cd "$REPO_ROOT/explorer-monorepo/backend" && go build -o bin/api-server ./api/rest/cmd/)
log_ok "Go API built: explorer-monorepo/backend/bin/api-server"
# Copy to VMID 5000
log_info "Copying API server to VMID $VMID..."
scp -o ConnectTimeout=10 "$REPO_ROOT/explorer-monorepo/backend/bin/api-server" \
"$PROXMOX_USER@$PROXMOX_HOST:/tmp/api-server-config"
ssh "$PROXMOX_USER@$PROXMOX_HOST" "pct push $VMID /tmp/api-server-config /usr/local/bin/explorer-config-api && \
pct exec $VMID -- chmod +x /usr/local/bin/explorer-config-api"
log_ok "API server copied to VMID $VMID:/usr/local/bin/explorer-config-api"
# Create systemd service
log_info "Creating systemd service for config API..."
ssh "$PROXMOX_USER@$PROXMOX_HOST" "pct exec $VMID -- bash -c 'cat > /etc/systemd/system/explorer-config-api.service <<EOF
[Unit]
Description=Explorer Config API (MetaMask networks and token list)
After=network.target postgresql.service
[Service]
Type=simple
User=root
WorkingDirectory=/opt/explorer
Environment=\"PORT=8081\"
Environment=\"CHAIN_ID=138\"
Environment=\"DATABASE_URL=postgresql://explorer:explorer@localhost:5432/explorer_db\"
ExecStart=/usr/local/bin/explorer-config-api
Restart=on-failure
RestartSec=5s
[Install]
WantedBy=multi-user.target
EOF
'"
ssh "$PROXMOX_USER@$PROXMOX_HOST" "pct exec $VMID -- systemctl daemon-reload && \
pct exec $VMID -- systemctl enable explorer-config-api && \
pct exec $VMID -- systemctl start explorer-config-api"
sleep 3
if ssh "$PROXMOX_USER@$PROXMOX_HOST" "pct exec $VMID -- systemctl is-active explorer-config-api" | grep -q "active"; then
log_ok "Config API service started on port 8081"
else
log_warn "Config API service may not be running; check logs: journalctl -u explorer-config-api -n 50"
fi
# Update nginx to proxy /api/config
log_info "Updating nginx config for /api/config proxy..."
ssh "$PROXMOX_USER@$PROXMOX_HOST" "pct exec $VMID -- bash -c '
NGINX_CONF=\$(find /etc/nginx/sites-enabled -name \"*blockscout*\" -o -name \"default\" | head -1)
if [ -z \"\$NGINX_CONF\" ]; then NGINX_CONF=\"/etc/nginx/sites-enabled/default\"; fi
# Add /api/config location if not present
if ! grep -q \"location /api/config\" \"\$NGINX_CONF\"; then
sed -i \"/server_name.*explorer.d-bis.org/a\\
# MetaMask config API\\
location /api/config/ {\\
proxy_pass http://127.0.0.1:8081/api/config/;\\
proxy_set_header Host \\\$host;\\
proxy_set_header X-Real-IP \\\$remote_addr;\\
add_header Access-Control-Allow-Origin \"*\" always;\\
add_header Cache-Control \"public, max-age=3600\";\\
}\" \"\$NGINX_CONF\"
nginx -t && systemctl reload nginx
echo \"Nginx updated and reloaded\"
else
echo \"/api/config already configured\"
fi
'"
log_ok "Phase 1 complete: Backend API deployed"
echo ""
}
# Phase 2: Deploy frontend with Wallet page
deploy_frontend() {
log_info "========================================="
log_info "PHASE 2: Deploy frontend (Wallet page)"
log_info "========================================="
# Build frontend with production env
log_info "Building frontend for production..."
(cd "$REPO_ROOT/explorer-monorepo/frontend" && \
echo "NEXT_PUBLIC_API_URL=https://explorer.d-bis.org" > .env.production && \
echo "NEXT_PUBLIC_CHAIN_ID=138" >> .env.production && \
pnpm run build)
log_ok "Frontend built"
# Create deployment tarball
log_info "Creating deployment package..."
(cd "$REPO_ROOT/explorer-monorepo/frontend" && \
tar czf /tmp/explorer-frontend.tar.gz .next public src package.json next.config.js)
# Copy to VMID 5000
log_info "Copying frontend to VMID $VMID..."
scp -o ConnectTimeout=10 /tmp/explorer-frontend.tar.gz \
"$PROXMOX_USER@$PROXMOX_HOST:/tmp/explorer-frontend.tar.gz"
ssh "$PROXMOX_USER@$PROXMOX_HOST" "pct push $VMID /tmp/explorer-frontend.tar.gz /tmp/explorer-frontend.tar.gz && \
pct exec $VMID -- bash -c '
mkdir -p /opt/explorer-frontend
cd /opt/explorer-frontend
tar xzf /tmp/explorer-frontend.tar.gz
rm /tmp/explorer-frontend.tar.gz
# Install deps if needed
if ! command -v node &>/dev/null; then
curl -fsSL https://deb.nodesource.com/setup_20.x | bash -
apt-get install -y nodejs
fi
# Install pnpm if needed
if ! command -v pnpm &>/dev/null; then
npm install -g pnpm@10
fi
# Install production deps
pnpm install --prod
'"
# Create systemd service for Next.js
log_info "Creating Next.js systemd service..."
ssh "$PROXMOX_USER@$PROXMOX_HOST" "pct exec $VMID -- bash -c 'cat > /etc/systemd/system/explorer-frontend.service <<EOF
[Unit]
Description=Explorer Frontend (Next.js)
After=network.target
[Service]
Type=simple
User=root
WorkingDirectory=/opt/explorer-frontend
Environment=\"NODE_ENV=production\"
Environment=\"PORT=3000\"
ExecStart=/usr/bin/pnpm start
Restart=on-failure
RestartSec=5s
[Install]
WantedBy=multi-user.target
EOF
'"
ssh "$PROXMOX_USER@$PROXMOX_HOST" "pct exec $VMID -- systemctl daemon-reload && \
pct exec $VMID -- systemctl enable explorer-frontend && \
pct exec $VMID -- systemctl restart explorer-frontend"
sleep 3
if ssh "$PROXMOX_USER@$PROXMOX_HOST" "pct exec $VMID -- systemctl is-active explorer-frontend" | grep -q "active"; then
log_ok "Frontend service started on port 3000"
else
log_warn "Frontend service may not be running; check logs: journalctl -u explorer-frontend -n 50"
fi
# Update nginx to proxy frontend routes
log_info "Updating nginx for frontend routes..."
ssh "$PROXMOX_USER@$PROXMOX_HOST" "pct exec $VMID -- bash -c '
NGINX_CONF=\$(find /etc/nginx/sites-enabled -name \"*blockscout*\" -o -name \"default\" | head -1)
if [ -z \"\$NGINX_CONF\" ]; then NGINX_CONF=\"/etc/nginx/sites-enabled/default\"; fi
# Add /wallet and /_next proxies if not present
if ! grep -q \"location /wallet\" \"\$NGINX_CONF\"; then
sed -i \"/location \\/api\\/config/a\\
# Frontend routes (Next.js)\\
location /wallet {\\
proxy_pass http://127.0.0.1:3000;\\
proxy_set_header Host \\\$host;\\
proxy_set_header X-Real-IP \\\$remote_addr;\\
}\\
location /_next/ {\\
proxy_pass http://127.0.0.1:3000;\\
proxy_set_header Host \\\$host;\\
}\" \"\$NGINX_CONF\"
nginx -t && systemctl reload nginx
echo \"Nginx updated for frontend\"
else
echo \"/wallet already configured\"
fi
'"
log_ok "Phase 2 complete: Frontend deployed"
echo ""
}
# Phase 3: Verify integration
verify_integration() {
log_info "========================================="
log_info "PHASE 3: Verify integration"
log_info "========================================="
# Test config endpoints
log_info "Testing /api/config/networks..."
if curl -sf --max-time 10 "https://explorer.d-bis.org/api/config/networks" | grep -q "chains"; then
log_ok "GET /api/config/networks OK"
else
log_fail "GET /api/config/networks failed"
fi
log_info "Testing /api/config/token-list..."
if curl -sf --max-time 10 "https://explorer.d-bis.org/api/config/token-list" | grep -q "tokens"; then
log_ok "GET /api/config/token-list OK"
else
log_fail "GET /api/config/token-list failed"
fi
log_info "Testing /wallet page..."
if curl -sf --max-time 10 "https://explorer.d-bis.org/wallet" | grep -q "MetaMask"; then
log_ok "GET /wallet OK"
else
log_warn "GET /wallet may not be serving (check Next.js service)"
fi
# Run full integration script
log_info "Running full integration script..."
(cd "$REPO_ROOT/metamask-integration" && \
EXPLORER_API_URL=https://explorer.d-bis.org ./scripts/integration-test-all.sh)
log_ok "Phase 3 complete: Integration verified"
echo ""
}
# Phase 4: Optional enhancements
deploy_optional() {
log_info "========================================="
log_info "PHASE 4: Optional enhancements"
log_info "========================================="
log_info "Token-aggregation service deployment (optional):"
log_info " - Requires DB and env configuration"
log_info " - See: smom-dbis-138/services/token-aggregation/docs/DEPLOYMENT.md"
log_info " - Skip for now (can deploy separately)"
log_info "Chain 138 Snap deployment (optional):"
log_info " - Run: cd metamask-integration/chain138-snap && pnpm run start"
log_info " - Install in MetaMask Flask via http://localhost:8000"
log_info " - Skip for now (manual testing)"
log_ok "Phase 4 noted: Optional items documented"
echo ""
}
# Main execution
main() {
log_info "Deploying MetaMask integration to explorer.d-bis.org (VMID $VMID)"
echo ""
check_access
echo ""
deploy_backend_api
deploy_frontend
verify_integration
deploy_optional
log_ok "========================================="
log_ok "All phases complete"
log_ok "========================================="
echo ""
echo "Next steps:"
echo " 1. Visit https://explorer.d-bis.org/wallet to test Add to MetaMask"
echo " 2. Add token list URL in MetaMask: https://explorer.d-bis.org/api/config/token-list"
echo " 3. Test adding Chain 138, Ethereum Mainnet, ALL Mainnet"
echo ""
}
main "$@"

237
scripts/execute-network-tasks.sh Executable file
View File

@@ -0,0 +1,237 @@
#!/bin/bash
# Network-Dependent Tasks Execution Script
# Orchestrates execution of all network-dependent tasks
set -e
# Colors
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
# Logging functions
log_info() {
echo -e "${BLUE}[INFO]${NC} $1"
}
log_success() {
echo -e "${GREEN}[SUCCESS]${NC} $1"
}
log_warning() {
echo -e "${YELLOW}[WARNING]${NC} $1"
}
log_error() {
echo -e "${RED}[ERROR]${NC} $1"
}
# Get script directory
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
SMOM_DIR="$(cd "$PROJECT_ROOT/../smom-dbis-138" && pwd)"
log_info "Network-Dependent Tasks Execution Script"
log_info "========================================="
# Check prerequisites
log_info "Checking prerequisites..."
# Check Foundry
if ! command -v forge &> /dev/null; then
log_error "Foundry not found. Please install Foundry first."
exit 1
fi
# Check Node.js
if ! command -v node &> /dev/null; then
log_error "Node.js not found. Please install Node.js v18+ first."
exit 1
fi
# Check .env file
if [ ! -f "$SMOM_DIR/.env" ]; then
log_error ".env file not found in $SMOM_DIR"
exit 1
fi
# Load environment variables
source "$SMOM_DIR/.env"
if [ -z "$RPC_URL_138" ]; then
log_error "RPC_URL_138 not set in .env"
exit 1
fi
if [ -z "$PRIVATE_KEY" ]; then
log_error "PRIVATE_KEY not set in .env"
exit 1
fi
log_success "Prerequisites check passed"
# Function to deploy contracts
deploy_contracts() {
log_info "Phase 1: Deploying Smart Accounts Contracts..."
cd "$SMOM_DIR"
log_info "Deploying Smart Accounts Kit contracts..."
log_warning "NOTE: EntryPoint and AccountFactory contracts need to be deployed from:"
log_warning " - MetaMask Smart Accounts Kit SDK/package"
log_warning " - Standard ERC-4337 implementations"
log_warning " - Or use existing deployed addresses"
# Check if contract sources exist
if [ ! -f "script/smart-accounts/DeploySmartAccountsKit.s.sol" ]; then
log_error "Deployment script not found: script/smart-accounts/DeploySmartAccountsKit.s.sol"
log_info "The script exists but requires actual contract implementations."
log_info "Please ensure EntryPoint and AccountFactory contracts are available."
return 1
fi
# Run deployment script (will show TODO placeholders)
forge script script/smart-accounts/DeploySmartAccountsKit.s.sol \
--rpc-url "$RPC_URL_138" \
--broadcast \
-vvv || {
log_warning "Deployment script executed (may show TODO placeholders)"
log_warning "Actual contract deployment requires EntryPoint and AccountFactory implementations"
}
log_warning "Please deploy EntryPoint and AccountFactory contracts manually"
log_warning "Then update config/smart-accounts-config.json with deployed addresses"
}
# Function to deploy extended registry
deploy_extended_registry() {
log_info "Phase 2: Deploying AccountWalletRegistryExtended..."
cd "$SMOM_DIR"
if [ -z "$SMART_ACCOUNT_FACTORY" ] || [ -z "$ENTRY_POINT" ]; then
log_error "SMART_ACCOUNT_FACTORY or ENTRY_POINT not set in .env"
log_error "Please set these after deploying Smart Accounts Kit contracts"
return 1
fi
forge script script/smart-accounts/DeployAccountWalletRegistryExtended.s.sol \
--rpc-url "$RPC_URL_138" \
--broadcast \
--verify \
-vvv
log_success "AccountWalletRegistryExtended deployed"
}
# Function to run unit tests
run_unit_tests() {
log_info "Phase 3: Running Unit Tests..."
cd "$SMOM_DIR"
log_info "Running Foundry unit tests..."
forge test --match-path "test/smart-accounts/**" -vv --rpc-url "$RPC_URL_138"
log_success "Unit tests completed"
}
# Function to run integration tests
run_integration_tests() {
log_info "Phase 4: Running Integration Tests..."
cd "$PROJECT_ROOT"
if [ -f "package.json" ]; then
log_info "Running npm integration tests..."
npm test
log_success "Integration tests completed"
else
log_warning "package.json not found, skipping npm tests"
fi
}
# Function to run end-to-end tests
run_e2e_tests() {
log_info "Phase 5: Running End-to-End Tests..."
cd "$PROJECT_ROOT"
if [ -f "package.json" ]; then
log_info "Running E2E tests..."
npm run test:e2e 2>/dev/null || npm test
log_success "E2E tests completed"
else
log_warning "E2E test scripts not configured"
fi
}
# Function to verify deployment
verify_deployment() {
log_info "Phase 6: Verifying Deployment..."
cd "$PROJECT_ROOT"
if [ -f "scripts/verify-deployment.sh" ]; then
bash scripts/verify-deployment.sh
log_success "Deployment verification completed"
else
log_warning "Verification script not found"
fi
}
# Function to run health check
run_health_check() {
log_info "Phase 7: Running Health Check..."
cd "$PROJECT_ROOT"
if [ -f "scripts/health-check.sh" ]; then
bash scripts/health-check.sh
log_success "Health check completed"
else
log_warning "Health check script not found"
fi
}
# Main execution
main() {
local phase=$1
case $phase in
"deploy")
deploy_contracts
deploy_extended_registry
;;
"test")
run_unit_tests
run_integration_tests
run_e2e_tests
;;
"verify")
verify_deployment
run_health_check
;;
"all")
deploy_contracts
deploy_extended_registry
verify_deployment
run_unit_tests
run_integration_tests
run_e2e_tests
run_health_check
;;
*)
log_info "Usage: $0 [deploy|test|verify|all]"
log_info ""
log_info "Phases:"
log_info " deploy - Deploy all contracts"
log_info " test - Run all tests"
log_info " verify - Verify deployment and health"
log_info " all - Execute all phases"
exit 1
;;
esac
}
# Execute
main "${1:-all}"
log_info "========================================="
log_success "Network-dependent tasks execution completed!"

163
scripts/health-check.sh Executable file
View File

@@ -0,0 +1,163 @@
#!/bin/bash
# Smart Accounts Health Check Script
# Checks the health of deployed Smart Accounts infrastructure
set -e
# Colors
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
# Logging functions
log_info() {
echo -e "${BLUE}[INFO]${NC} $1"
}
log_success() {
echo -e "${GREEN}[SUCCESS]${NC} $1"
}
log_warning() {
echo -e "${YELLOW}[WARNING]${NC} $1"
}
log_error() {
echo -e "${RED}[ERROR]${NC} $1"
}
# Get script directory
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
SMOM_DIR="$(cd "$PROJECT_ROOT/../smom-dbis-138" && pwd)"
log_info "Smart Accounts Health Check"
log_info "=========================="
# Check if cast is available
if ! command -v cast &> /dev/null; then
log_error "cast (Foundry) not found. Please install Foundry first."
exit 1
fi
# Load environment variables
if [ ! -f "$SMOM_DIR/.env" ]; then
log_error ".env file not found in $SMOM_DIR"
exit 1
fi
source "$SMOM_DIR/.env"
if [ -z "$RPC_URL_138" ]; then
log_error "RPC_URL_138 not set in .env"
exit 1
fi
# Load configuration
CONFIG_FILE="$PROJECT_ROOT/config/smart-accounts-config.json"
if [ ! -f "$CONFIG_FILE" ]; then
log_error "Configuration file not found: $CONFIG_FILE"
exit 1
fi
# Check if jq is available
if ! command -v jq &> /dev/null; then
log_error "jq not found. Please install jq first."
exit 1
fi
# Extract addresses from config
ENTRY_POINT=$(jq -r '.entryPointAddress // empty' "$CONFIG_FILE")
ACCOUNT_FACTORY=$(jq -r '.accountFactoryAddress // empty' "$CONFIG_FILE")
PAYMASTER=$(jq -r '.paymasterAddress // empty' "$CONFIG_FILE")
HEALTH_STATUS=0
# Check RPC connectivity
log_info "1. Checking RPC connectivity..."
BLOCK_NUMBER=$(cast block-number --rpc-url "$RPC_URL_138" 2>/dev/null || echo "")
if [ -n "$BLOCK_NUMBER" ]; then
log_success " RPC is accessible (block: $BLOCK_NUMBER)"
else
log_error " RPC is not accessible"
HEALTH_STATUS=1
fi
# Check EntryPoint
if [ -n "$ENTRY_POINT" ] && [ "$ENTRY_POINT" != "null" ] && [ "$ENTRY_POINT" != "" ]; then
log_info "2. Checking EntryPoint contract..."
CODE=$(cast code "$ENTRY_POINT" --rpc-url "$RPC_URL_138" 2>/dev/null || echo "")
if [ -n "$CODE" ] && [ "$CODE" != "0x" ]; then
BALANCE=$(cast balance "$ENTRY_POINT" --rpc-url "$RPC_URL_138" 2>/dev/null || echo "0")
log_success " EntryPoint is deployed (balance: $(cast --to-unit "$BALANCE" ether) ETH)"
else
log_error " EntryPoint contract not found"
HEALTH_STATUS=1
fi
else
log_warning "2. EntryPoint not configured"
fi
# Check AccountFactory
if [ -n "$ACCOUNT_FACTORY" ] && [ "$ACCOUNT_FACTORY" != "null" ] && [ "$ACCOUNT_FACTORY" != "" ]; then
log_info "3. Checking AccountFactory contract..."
CODE=$(cast code "$ACCOUNT_FACTORY" --rpc-url "$RPC_URL_138" 2>/dev/null || echo "")
if [ -n "$CODE" ] && [ "$CODE" != "0x" ]; then
BALANCE=$(cast balance "$ACCOUNT_FACTORY" --rpc-url "$RPC_URL_138" 2>/dev/null || echo "0")
log_success " AccountFactory is deployed (balance: $(cast --to-unit "$BALANCE" ether) ETH)"
else
log_error " AccountFactory contract not found"
HEALTH_STATUS=1
fi
else
log_warning "3. AccountFactory not configured"
fi
# Check Paymaster (optional)
if [ -n "$PAYMASTER" ] && [ "$PAYMASTER" != "null" ] && [ "$PAYMASTER" != "" ]; then
log_info "4. Checking Paymaster contract..."
CODE=$(cast code "$PAYMASTER" --rpc-url "$RPC_URL_138" 2>/dev/null || echo "")
if [ -n "$CODE" ] && [ "$CODE" != "0x" ]; then
BALANCE=$(cast balance "$PAYMASTER" --rpc-url "$RPC_URL_138" 2>/dev/null || echo "0")
log_success " Paymaster is deployed (balance: $(cast --to-unit "$BALANCE" ether) ETH)"
else
log_warning " Paymaster contract not found (optional)"
fi
else
log_info "4. Paymaster not configured (optional)"
fi
# Check configuration file
log_info "5. Checking configuration file..."
if jq empty "$CONFIG_FILE" 2>/dev/null; then
CHAIN_ID=$(jq -r '.chainId // empty' "$CONFIG_FILE")
if [ "$CHAIN_ID" = "138" ]; then
log_success " Configuration file is valid (ChainID: $CHAIN_ID)"
else
log_warning " Configuration file ChainID mismatch (expected: 138, found: $CHAIN_ID)"
fi
else
log_error " Configuration file is invalid"
HEALTH_STATUS=1
fi
# Check SDK installation
log_info "6. Checking SDK installation..."
if [ -d "$PROJECT_ROOT/node_modules/@metamask/smart-accounts-kit" ]; then
log_success " Smart Accounts Kit SDK is installed"
else
log_warning " Smart Accounts Kit SDK not found (run: ./scripts/install-smart-accounts-sdk.sh)"
fi
# Summary
log_info "=========================="
if [ $HEALTH_STATUS -eq 0 ]; then
log_success "Health check passed! ✅"
exit 0
else
log_error "Health check failed! ❌"
exit 1
fi

View File

@@ -0,0 +1,77 @@
#!/bin/bash
# Install MetaMask Smart Accounts Kit SDK
# This script installs the SDK and sets up the project
set -e
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
# Colors
GREEN='\033[0;32m'
RED='\033[0;31m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m'
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
log_success() { echo -e "${GREEN}[✓]${NC} $1"; }
log_warn() { echo -e "${YELLOW}[WARN]${NC} $1"; }
log_error() { echo -e "${RED}[ERROR]${NC} $1"; }
log_info "========================================="
log_info "Installing MetaMask Smart Accounts Kit SDK"
log_info "========================================="
log_info ""
cd "$PROJECT_ROOT"
# Check if package.json exists
if [ ! -f "package.json" ]; then
log_error "package.json not found. Creating it..."
# package.json should already exist, but create if missing
fi
# Check if node_modules exists
if [ -d "node_modules" ]; then
log_warn "node_modules already exists. Removing..."
rm -rf node_modules
fi
# Install dependencies
log_info "Installing dependencies..."
if command -v npm &> /dev/null; then
npm install
log_success "Dependencies installed successfully"
elif command -v yarn &> /dev/null; then
yarn install
log_success "Dependencies installed successfully"
elif command -v pnpm &> /dev/null; then
pnpm install
log_success "Dependencies installed successfully"
else
log_error "No package manager found (npm, yarn, or pnpm)"
exit 1
fi
# Verify installation
log_info "Verifying installation..."
if [ -d "node_modules/@metamask/smart-accounts-kit" ]; then
log_success "Smart Accounts Kit SDK installed successfully"
log_info "Version: $(cat node_modules/@metamask/smart-accounts-kit/package.json | grep version | head -1 | cut -d'"' -f4)"
else
log_error "Smart Accounts Kit SDK not found after installation"
exit 1
fi
log_info ""
log_info "========================================="
log_success "Installation Complete!"
log_info "========================================="
log_info ""
log_info "Next steps:"
log_info "1. Review config/smart-accounts-config.json"
log_info "2. Deploy contracts using deployment scripts"
log_info "3. Update config with deployed addresses"
log_info ""

176
scripts/integration-test-all.sh Executable file
View File

@@ -0,0 +1,176 @@
#!/usr/bin/env bash
# Full integration test: provider, explorer config, optional explorer API and token-aggregation API.
# Usage: ./scripts/integration-test-all.sh
# Optional env: EXPLORER_API_URL (e.g. http://localhost:8080), TOKEN_AGGREGATION_URL (e.g. http://localhost:3000)
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
# Repo root: parent of metamask-integration (proxmox workspace)
REPO_ROOT="$(cd "$PROJECT_ROOT/.." && pwd)"
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m'
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
log_ok() { echo -e "${GREEN}[PASS]${NC} $1"; }
log_warn() { echo -e "${YELLOW}[WARN]${NC} $1"; }
log_fail() { echo -e "${RED}[FAIL]${NC} $1"; }
PASSED=0
FAILED=0
# --- 1. Provider integration test (Node) ---
log_info "========================================="
log_info "1. Provider integration test"
log_info "========================================="
if (cd "$PROJECT_ROOT/provider" && node test-integration.mjs); then
log_ok "Provider test passed"
PASSED=$((PASSED + 1))
else
log_fail "Provider test failed"
FAILED=$((FAILED + 1))
fi
echo ""
# --- 2. Validate explorer config JSONs (in-repo) ---
log_info "========================================="
log_info "2. Validate explorer config JSONs"
log_info "========================================="
CONFIG_DIR="$REPO_ROOT/docs/04-configuration/metamask"
NETWORKS_JSON="$CONFIG_DIR/DUAL_CHAIN_NETWORKS.json"
TOKENLIST_JSON="$CONFIG_DIR/DUAL_CHAIN_TOKEN_LIST.tokenlist.json"
validate_networks() {
if [[ ! -f "$NETWORKS_JSON" ]]; then
log_fail "Missing $NETWORKS_JSON"
return 1
fi
local chains
chains=$(node -e "
const fs = require('fs');
const path = process.argv[1];
const data = JSON.parse(fs.readFileSync(path, 'utf8'));
if (!data.chains || !Array.isArray(data.chains)) { process.exit(1); }
const ids = data.chains.map(c => c.chainIdDecimal || c.chainId).filter(Boolean);
if (!ids.includes(138) || !ids.includes(1)) { process.exit(2); }
console.log(ids.join(','));
" "$NETWORKS_JSON" 2>/dev/null) || true
if [[ -z "$chains" ]]; then
log_fail "DUAL_CHAIN_NETWORKS.json invalid or missing chain 138/1"
return 1
fi
log_ok "DUAL_CHAIN_NETWORKS.json valid (chains: $chains)"
return 0
}
validate_tokenlist() {
if [[ ! -f "$TOKENLIST_JSON" ]]; then
log_fail "Missing $TOKENLIST_JSON"
return 1
fi
local ok
ok=$(node -e "
const fs = require('fs');
const path = process.argv[1];
const data = JSON.parse(fs.readFileSync(path, 'utf8'));
if (!data.tokens || !Array.isArray(data.tokens)) { process.exit(1); }
const chainIds = [...new Set(data.tokens.map(t => t.chainId))];
console.log(chainIds.join(','));
" "$TOKENLIST_JSON" 2>/dev/null) || true
if [[ -z "$ok" ]]; then
log_fail "DUAL_CHAIN_TOKEN_LIST.tokenlist.json invalid (no tokens array)"
return 1
fi
log_ok "DUAL_CHAIN_TOKEN_LIST.tokenlist.json valid (chainIds: $ok)"
return 0
}
if validate_networks; then PASSED=$((PASSED + 1)); else FAILED=$((FAILED + 1)); fi
if validate_tokenlist; then PASSED=$((PASSED + 1)); else FAILED=$((FAILED + 1)); fi
echo ""
# --- 3. Optional: Explorer API (config endpoints) ---
EXPLORER_API_URL="${EXPLORER_API_URL:-}"
if [[ -n "$EXPLORER_API_URL" ]]; then
log_info "========================================="
log_info "3. Explorer API ($EXPLORER_API_URL)"
log_info "========================================="
if curl -sf --max-time 10 "$EXPLORER_API_URL/api/config/networks" | node -e "
const chunks = [];
process.stdin.on('data', c => chunks.push(c));
process.stdin.on('end', () => {
const data = JSON.parse(Buffer.concat(chunks).toString());
if (!data.chains || !data.chains.length) process.exit(1);
const has138 = data.chains.some(c => (c.chainIdDecimal || c.chainId) == 138);
if (!has138) process.exit(2);
console.log('ok');
});
" 2>/dev/null; then
log_ok "GET /api/config/networks OK"
PASSED=$((PASSED + 1))
else
log_fail "GET /api/config/networks failed or invalid"
FAILED=$((FAILED + 1))
fi
if curl -sf --max-time 10 "$EXPLORER_API_URL/api/config/token-list" | node -e "
const chunks = [];
process.stdin.on('data', c => chunks.push(c));
process.stdin.on('end', () => {
const data = JSON.parse(Buffer.concat(chunks).toString());
if (!data.tokens || !Array.isArray(data.tokens)) process.exit(1);
console.log('ok');
});
" 2>/dev/null; then
log_ok "GET /api/config/token-list OK"
PASSED=$((PASSED + 1))
else
log_fail "GET /api/config/token-list failed or invalid"
FAILED=$((FAILED + 1))
fi
echo ""
else
log_info "Skip Explorer API (set EXPLORER_API_URL to test)"
fi
# --- 4. Optional: Token-aggregation API ---
TOKEN_AGGREGATION_URL="${TOKEN_AGGREGATION_URL:-}"
if [[ -n "$TOKEN_AGGREGATION_URL" ]]; then
log_info "========================================="
log_info "4. Token-aggregation API ($TOKEN_AGGREGATION_URL)"
log_info "========================================="
if curl -sf --max-time 10 "$TOKEN_AGGREGATION_URL/api/v1/chains" | node -e "
const chunks = [];
process.stdin.on('data', c => chunks.push(c));
process.stdin.on('end', () => {
const data = JSON.parse(Buffer.concat(chunks).toString());
if (!data.chains || !data.chains.length) process.exit(1);
const has138 = data.chains.some(c => c.chainId === 138);
if (!has138) process.exit(2);
console.log('ok');
});
" 2>/dev/null; then
log_ok "GET /api/v1/chains OK"
PASSED=$((PASSED + 1))
else
log_fail "GET /api/v1/chains failed or invalid"
FAILED=$((FAILED + 1))
fi
echo ""
else
log_info "Skip Token-aggregation API (set TOKEN_AGGREGATION_URL to test)"
fi
# --- Summary ---
log_info "========================================="
log_info "Summary"
log_info "========================================="
echo "Passed: $PASSED, Failed: $FAILED"
if [[ $FAILED -gt 0 ]]; then
exit 1
fi
exit 0

109
scripts/performance-test.sh Executable file
View File

@@ -0,0 +1,109 @@
#!/bin/bash
# Performance Testing Script for Smart Accounts
# Tests smart account creation, delegation, and operations performance
set -e
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
# Colors
GREEN='\033[0;32m'
RED='\033[0;31m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m'
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
log_success() { echo -e "${GREEN}[✓]${NC} $1"; }
log_warn() { echo -e "${YELLOW}[WARN]${NC} $1"; }
log_error() { echo -e "${RED}[ERROR]${NC} $1"; }
# Load configuration
CONFIG_FILE="$PROJECT_ROOT/config/smart-accounts-config.json"
if [ ! -f "$CONFIG_FILE" ]; then
log_error "Config file not found: $CONFIG_FILE"
exit 1
fi
RPC_URL=$(jq -r '.rpcUrl' "$CONFIG_FILE")
ENTRY_POINT=$(jq -r '.entryPointAddress' "$CONFIG_FILE")
ACCOUNT_FACTORY=$(jq -r '.accountFactoryAddress' "$CONFIG_FILE")
log_info "========================================="
log_info "Smart Accounts Performance Testing"
log_info "========================================="
log_info ""
log_info "RPC URL: $RPC_URL"
log_info "EntryPoint: $ENTRY_POINT"
log_info "AccountFactory: $ACCOUNT_FACTORY"
log_info ""
# Check if addresses are set
if [ "$ENTRY_POINT" = "null" ] || [ "$ENTRY_POINT" = "" ]; then
log_error "EntryPoint address not configured"
exit 1
fi
if [ "$ACCOUNT_FACTORY" = "null" ] || [ "$ACCOUNT_FACTORY" = "" ]; then
log_error "AccountFactory address not configured"
exit 1
fi
# Test smart account creation performance
test_account_creation() {
log_info "Testing Smart Account Creation Performance..."
local iterations=10
local total_time=0
for i in $(seq 1 $iterations); do
start_time=$(date +%s%N)
# Simulate account creation (replace with actual SDK call)
# const account = await smartAccountsKit.createAccount({ owner: userAddress });
end_time=$(date +%s%N)
duration=$((($end_time - $start_time) / 1000000))
total_time=$(($total_time + $duration))
log_info " Iteration $i: ${duration}ms"
done
avg_time=$(($total_time / $iterations))
log_success "Average creation time: ${avg_time}ms"
}
# Test delegation performance
test_delegation() {
log_info "Testing Delegation Performance..."
# Test delegation request time
# Test delegation check time
# Test delegation revocation time
log_success "Delegation performance tests complete"
}
# Test batch operations performance
test_batch_operations() {
log_info "Testing Batch Operations Performance..."
# Test batch creation time
# Test batch execution time
# Test gas savings
log_success "Batch operations performance tests complete"
}
# Run all tests
log_info "Starting performance tests..."
log_info ""
test_account_creation
test_delegation
test_batch_operations
log_info ""
log_success "Performance testing complete!"

View File

@@ -0,0 +1,262 @@
#!/bin/bash
# Prepare Ethereum-Lists PR submission
# This script validates and prepares the chain metadata for ethereum-lists/chains PR
set -e
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
CHAIN_METADATA="$PROJECT_ROOT/../smom-dbis-138/metamask/ethereum-lists-chain.json"
# Colors
GREEN='\033[0;32m'
RED='\033[0;31m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m'
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
log_success() { echo -e "${GREEN}[✓]${NC} $1"; }
log_warn() { echo -e "${YELLOW}[WARN]${NC} $1"; }
log_error() { echo -e "${RED}[ERROR]${NC} $1"; }
log_info "========================================="
log_info "Ethereum-Lists PR Preparation"
log_info "========================================="
log_info ""
# Check if jq is installed
if ! command -v jq &> /dev/null; then
log_error "jq is required but not installed"
exit 1
fi
# Validate chain metadata file
if [ ! -f "$CHAIN_METADATA" ]; then
log_error "Chain metadata file not found: $CHAIN_METADATA"
exit 1
fi
log_info "Validating chain metadata..."
if ! jq empty "$CHAIN_METADATA" 2>/dev/null; then
log_error "Chain metadata JSON is invalid"
exit 1
fi
log_success "Chain metadata JSON is valid"
# Extract and validate fields
CHAIN_ID=$(jq -r '.chainId' "$CHAIN_METADATA")
CHAIN_NAME=$(jq -r '.name' "$CHAIN_METADATA")
SHORT_NAME=$(jq -r '.shortName' "$CHAIN_METADATA")
RPC_URLS=$(jq -r '.rpc[]' "$CHAIN_METADATA" | head -1)
EXPLORER_URL=$(jq -r '.explorers[0].url' "$CHAIN_METADATA")
log_info ""
log_info "Chain Metadata:"
log_info " Chain ID: $CHAIN_ID"
log_info " Name: $CHAIN_NAME"
log_info " Short Name: $SHORT_NAME"
log_info " RPC URL: $RPC_URLS"
log_info " Explorer: $EXPLORER_URL"
log_info ""
# Validate required fields
log_info "Validating required fields..."
REQUIRED_FIELDS=("chainId" "name" "shortName" "chain" "network" "nativeCurrency" "rpc" "explorers")
MISSING_FIELDS=()
for field in "${REQUIRED_FIELDS[@]}"; do
if ! jq -e ".$field" "$CHAIN_METADATA" > /dev/null 2>&1; then
MISSING_FIELDS+=("$field")
fi
done
if [ ${#MISSING_FIELDS[@]} -gt 0 ]; then
log_error "Missing required fields: ${MISSING_FIELDS[*]}"
exit 1
fi
log_success "All required fields present"
# Validate RPC URLs
log_info "Validating RPC URLs..."
RPC_COUNT=$(jq '.rpc | length' "$CHAIN_METADATA")
if [ "$RPC_COUNT" -lt 1 ]; then
log_error "At least one RPC URL is required"
exit 1
fi
# Check if RPC URLs use HTTPS
HTTPS_RPC_COUNT=$(jq -r '.rpc[]' "$CHAIN_METADATA" | grep -c "^https://" || echo "0")
if [ "$HTTPS_RPC_COUNT" -eq 0 ]; then
log_warn "No HTTPS RPC URLs found (recommended for production)"
fi
log_success "RPC URLs validated"
# Validate explorer
log_info "Validating explorer configuration..."
if ! jq -e '.explorers[0]' "$CHAIN_METADATA" > /dev/null 2>&1; then
log_error "At least one explorer is required"
exit 1
fi
EXPLORER_NAME=$(jq -r '.explorers[0].name' "$CHAIN_METADATA")
EXPLORER_STANDARD=$(jq -r '.explorers[0].standard' "$CHAIN_METADATA")
if [ "$EXPLORER_STANDARD" != "EIP3091" ]; then
log_warn "Explorer standard should be EIP3091 (found: $EXPLORER_STANDARD)"
fi
log_success "Explorer validated: $EXPLORER_NAME"
# Create PR directory
PR_DIR="$PROJECT_ROOT/ethereum-lists-pr"
mkdir -p "$PR_DIR"
# Copy chain metadata
cp "$CHAIN_METADATA" "$PR_DIR/138.json"
log_success "Created: $PR_DIR/138.json"
# Create PR description
cat > "$PR_DIR/PR_DESCRIPTION.md" << EOF
# Add ChainID 138 - DeFi Oracle Meta Mainnet
## Network Information
- **Chain ID**: 138 (0x8a)
- **Network Name**: DeFi Oracle Meta Mainnet
- **Short Name**: defi-oracle
- **Native Currency**: ETH (18 decimals)
- **Consensus**: IBFT 2.0 (Istanbul BFT)
## RPC Endpoints
- Primary: \`https://rpc.d-bis.org\`
- Secondary: \`https://rpc2.d-bis.org\`
- WebSocket: \`wss://rpc.d-bis.org\`
## Block Explorer
- **Name**: Blockscout
- **URL**: \`https://explorer.d-bis.org\`
- **Standard**: EIP3091
## Network Status
- ✅ Network is live and operational
- ✅ RPC endpoints are publicly accessible
- ✅ Block explorer is deployed
- ✅ Token contracts are deployed
- ✅ Network is stable and tested
## Additional Information
- **Info URL**: https://github.com/Defi-Oracle-Tooling/smom-dbis-138
- **Icon**: https://explorer.d-bis.org/images/logo.png
## Testing
The network has been tested with:
- ✅ MetaMask wallet connection
- ✅ Token transfers
- ✅ Contract interactions
- ✅ Block explorer functionality
## Checklist
- [x] Chain ID is unique (138)
- [x] All required fields are present
- [x] RPC endpoints are accessible
- [x] Block explorer is accessible
- [x] Network is stable
- [x] Documentation is complete
EOF
log_success "Created: $PR_DIR/PR_DESCRIPTION.md"
# Create submission instructions
cat > "$PR_DIR/SUBMISSION_INSTRUCTIONS.md" << 'EOF'
# Ethereum-Lists PR Submission Instructions
## Prerequisites
1. Fork the ethereum-lists/chains repository
2. Clone your fork locally
3. Create a new branch: `git checkout -b add-chainid-138`
## Steps
1. **Copy chain metadata**:
```bash
cp 138.json <ethereum-lists-repo>/_data/chains/eip155-138.json
```
2. **Validate the file**:
```bash
cd <ethereum-lists-repo>
npm install
npm run validate
```
3. **Commit and push**:
```bash
git add _data/chains/eip155-138.json
git commit -m "Add ChainID 138 - DeFi Oracle Meta Mainnet"
git push origin add-chainid-138
```
4. **Create PR**:
- Go to https://github.com/ethereum-lists/chains
- Click "New Pull Request"
- Select your branch
- Use PR_DESCRIPTION.md as the PR description
- Submit PR
## PR Requirements
- [x] Chain ID is unique
- [x] All required fields are present
- [x] RPC endpoints are accessible
- [x] Block explorer is accessible
- [x] Network is stable
- [x] Follows ethereum-lists format
## Review Process
1. Automated validation will run
2. Maintainers will review the PR
3. Network will be tested
4. PR will be merged if approved
## Timeline
- Initial review: 1-2 weeks
- Testing: 1-2 weeks
- Merge: After approval
## Contact
For questions, contact the ethereum-lists maintainers or open an issue.
EOF
log_success "Created: $PR_DIR/SUBMISSION_INSTRUCTIONS.md"
log_info ""
log_info "========================================="
log_info "PR Preparation Complete!"
log_info "========================================="
log_info ""
log_info "Files created in: $PR_DIR"
log_info " - 138.json (chain metadata)"
log_info " - PR_DESCRIPTION.md (PR description)"
log_info " - SUBMISSION_INSTRUCTIONS.md (submission guide)"
log_info ""
log_info "Next steps:"
log_info "1. Review the files in $PR_DIR"
log_info "2. Follow SUBMISSION_INSTRUCTIONS.md"
log_info "3. Submit PR to ethereum-lists/chains"
log_info ""

View File

@@ -0,0 +1,255 @@
#!/bin/bash
# Prepare token list for submission to aggregators (CoinGecko, Uniswap, etc.)
set -e
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
TOKEN_LIST="$PROJECT_ROOT/../token-lists/lists/dbis-138.tokenlist.json"
# Colors
GREEN='\033[0;32m'
RED='\033[0;31m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m'
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
log_success() { echo -e "${GREEN}[✓]${NC} $1"; }
log_warn() { echo -e "${YELLOW}[WARN]${NC} $1"; }
log_error() { echo -e "${RED}[ERROR]${NC} $1"; }
log_info "========================================="
log_info "Token List Submission Preparation"
log_info "========================================="
log_info ""
# Validate token list
if [ ! -f "$TOKEN_LIST" ]; then
log_error "Token list not found: $TOKEN_LIST"
exit 1
fi
if ! jq empty "$TOKEN_LIST" 2>/dev/null; then
log_error "Token list JSON is invalid"
exit 1
fi
log_success "Token list JSON is valid"
# Extract token list info
TOKEN_LIST_NAME=$(jq -r '.name' "$TOKEN_LIST")
TOKEN_LIST_VERSION=$(jq -r '.version | "\(.major).\(.minor).\(.patch)"' "$TOKEN_LIST")
TOKEN_COUNT=$(jq '.tokens | length' "$TOKEN_LIST")
log_info "Token List: $TOKEN_LIST_NAME v$TOKEN_LIST_VERSION"
log_info "Tokens: $TOKEN_COUNT"
log_info ""
# Create submission directory
SUBMISSION_DIR="$PROJECT_ROOT/token-list-submissions"
mkdir -p "$SUBMISSION_DIR"
# Copy token list
cp "$TOKEN_LIST" "$SUBMISSION_DIR/dbis-138.tokenlist.json"
log_success "Created: $SUBMISSION_DIR/dbis-138.tokenlist.json"
# Create CoinGecko submission package
log_info "Creating CoinGecko submission package..."
cat > "$SUBMISSION_DIR/coingecko-submission.md" << EOF
# CoinGecko Token List Submission - ChainID 138
## Network Information
- **Chain ID**: 138
- **Network Name**: DeFi Oracle Meta Mainnet
- **RPC URL**: https://rpc.d-bis.org
- **Explorer**: https://explorer.d-bis.org
## Token List
- **File**: dbis-138.tokenlist.json
- **Version**: $TOKEN_LIST_VERSION
- **Tokens**: $TOKEN_COUNT
## Submission Method
1. Go to https://www.coingecko.com/en/api
2. Navigate to Token List submission
3. Upload dbis-138.tokenlist.json
4. Provide network information
5. Submit for review
## Contact
For questions about this submission, please contact the network maintainers.
## Token List URL
Once hosted, the token list will be available at:
\`https://[hosted-url]/dbis-138.tokenlist.json\`
EOF
log_success "Created: $SUBMISSION_DIR/coingecko-submission.md"
# Create Uniswap submission package
log_info "Creating Uniswap submission package..."
cat > "$SUBMISSION_DIR/uniswap-submission.md" << EOF
# Uniswap Token List Submission - ChainID 138
## Network Information
- **Chain ID**: 138
- **Network Name**: DeFi Oracle Meta Mainnet
- **RPC URL**: https://rpc.d-bis.org
- **Explorer**: https://explorer.d-bis.org
## Token List
- **File**: dbis-138.tokenlist.json
- **Version**: $TOKEN_LIST_VERSION
- **Tokens**: $TOKEN_COUNT
## Submission Method
1. Go to https://tokenlists.org/
2. Click "Submit a List"
3. Provide token list URL (once hosted)
4. Fill out submission form
5. Submit for review
## Requirements
- [x] Token list follows Token Lists schema
- [x] All tokens are deployed on-chain
- [x] Token metadata is accurate
- [x] Logo URLs are accessible
- [x] Network is stable
## Contact
For questions about this submission, please contact the network maintainers.
EOF
log_success "Created: $SUBMISSION_DIR/uniswap-submission.md"
# Create 1inch submission package
log_info "Creating 1inch submission package..."
cat > "$SUBMISSION_DIR/1inch-submission.md" << EOF
# 1inch Token List Submission - ChainID 138
## Network Information
- **Chain ID**: 138
- **Network Name**: DeFi Oracle Meta Mainnet
- **RPC URL**: https://rpc.d-bis.org
- **Explorer**: https://explorer.d-bis.org
## Token List
- **File**: dbis-138.tokenlist.json
- **Version**: $TOKEN_LIST_VERSION
- **Tokens**: $TOKEN_COUNT
## Submission Method
1. Contact 1inch team via their support channels
2. Provide token list URL (once hosted)
3. Request ChainID 138 integration
4. Provide network information
## Requirements
- [x] Token list follows Token Lists schema
- [x] All tokens are deployed on-chain
- [x] Network has sufficient liquidity
- [x] Network is stable
## Contact
- 1inch Support: https://help.1inch.io/
- 1inch Discord: https://discord.gg/1inch
EOF
log_success "Created: $SUBMISSION_DIR/1inch-submission.md"
# Create general submission guide
cat > "$SUBMISSION_DIR/SUBMISSION_GUIDE.md" << 'EOF'
# Token List Submission Guide
This directory contains materials for submitting the ChainID 138 token list to various aggregators.
## Files
- `dbis-138.tokenlist.json` - The token list file
- `coingecko-submission.md` - CoinGecko submission instructions
- `uniswap-submission.md` - Uniswap/tokenlists.org submission instructions
- `1inch-submission.md` - 1inch submission instructions
## Prerequisites
Before submitting, ensure:
1. ✅ Token list is hosted on a public URL (HTTPS)
2. ✅ All tokens are deployed and verified on-chain
3. ✅ Token metadata is accurate
4. ✅ Logo URLs are accessible
5. ✅ Network is stable and operational
## Submission Order
1. **Host Token List** (Priority 1)
- Host on GitHub Pages, IPFS, or custom domain
- Ensure HTTPS and CORS headers are configured
2. **Submit to Token Lists** (Priority 2)
- Submit to tokenlists.org (Uniswap)
- This enables auto-discovery in MetaMask
3. **Submit to CoinGecko** (Priority 3)
- Enables price data and market information
4. **Submit to 1inch** (Priority 4)
- Enables DEX aggregation support
## Token List URL Format
Once hosted, the token list should be accessible at:
```
https://[your-domain]/dbis-138.tokenlist.json
```
## Verification
After submission, verify:
- [ ] Token list is accessible via URL
- [ ] Tokens appear in MetaMask Portfolio
- [ ] Token logos display correctly
- [ ] Token metadata is accurate
- [ ] Network is listed on aggregators
## Support
For questions or issues with submissions, contact the network maintainers.
EOF
log_success "Created: $SUBMISSION_DIR/SUBMISSION_GUIDE.md"
# List tokens
log_info ""
log_info "Tokens in list:"
jq -r '.tokens[] | " - \(.symbol) (\(.name)): \(.address)"' "$TOKEN_LIST"
log_info ""
log_info "========================================="
log_info "Submission Preparation Complete!"
log_info "========================================="
log_info ""
log_info "Files created in: $SUBMISSION_DIR"
log_info ""
log_info "Next steps:"
log_info "1. Host token list on public URL"
log_info "2. Review submission guides"
log_info "3. Submit to aggregators"
log_info ""

64
scripts/run-all.sh Executable file
View File

@@ -0,0 +1,64 @@
#!/usr/bin/env bash
# Run all integration tests and builds using pnpm as package manager.
# Usage: pnpm run run-all or ./scripts/run-all.sh
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
REPO_ROOT="$(cd "$PROJECT_ROOT/.." && pwd)"
RED='\033[0;31m'
GREEN='\033[0;32m'
BLUE='\033[0;34m'
NC='\033[0m'
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
log_ok() { echo -e "${GREEN}[OK]${NC} $1"; }
log_info "Using pnpm as package manager"
command -v pnpm >/dev/null 2>&1 || { echo "pnpm not found. Install: npm install -g pnpm"; exit 1; }
# 1. Full integration script (provider test + config validation)
log_info "1. Full integration script (provider test + config validation)"
"$SCRIPT_DIR/integration-test-all.sh"
log_ok "Integration script passed"
echo ""
# 2. Token-aggregation (pnpm install + build)
TOKEN_AGG="$REPO_ROOT/smom-dbis-138/services/token-aggregation"
if [[ -d "$TOKEN_AGG" ]] && [[ -f "$TOKEN_AGG/package.json" ]]; then
log_info "2. Token-aggregation (pnpm install + build)"
(cd "$TOKEN_AGG" && pnpm install && pnpm run build)
log_ok "Token-aggregation build passed"
else
log_info "2. Token-aggregation: skip (dir not found)"
fi
echo ""
# 3. Explorer frontend (pnpm install + build)
EXPLORER_FRONT="$REPO_ROOT/explorer-monorepo/frontend"
if [[ -d "$EXPLORER_FRONT" ]] && [[ -f "$EXPLORER_FRONT/package.json" ]]; then
log_info "3. Explorer frontend (pnpm install + build)"
(cd "$EXPLORER_FRONT" && pnpm install && pnpm run build)
log_ok "Explorer frontend build passed"
else
log_info "3. Explorer frontend: skip (dir not found)"
fi
echo ""
# 4. Chain 138 Snap (yarn template; try pnpm)
SNAP_ROOT="$PROJECT_ROOT/chain138-snap"
if [[ -d "$SNAP_ROOT" ]] && [[ -f "$SNAP_ROOT/package.json" ]]; then
log_info "4. Chain 138 Snap (pnpm install + build)"
if (cd "$SNAP_ROOT" && pnpm install 2>/dev/null && pnpm run build 2>/dev/null); then
log_ok "Chain 138 Snap build passed"
else
log_info "4. Chain 138 Snap: use yarn in template (yarn install && yarn build)"
fi
else
log_info "4. Chain 138 Snap: skip (dir not found)"
fi
echo ""
log_ok "Run-all complete (pnpm)"

62
scripts/run-security-scan.sh Executable file
View File

@@ -0,0 +1,62 @@
#!/bin/bash
# Security Scan for Smart Accounts Contracts
# Runs Slither and other security tools
set -e
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
CONTRACTS_DIR="$PROJECT_ROOT/../smom-dbis-138/contracts/smart-accounts"
# Colors
GREEN='\033[0;32m'
RED='\033[0;31m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m'
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
log_success() { echo -e "${GREEN}[✓]${NC} $1"; }
log_warn() { echo -e "${YELLOW}[WARN]${NC} $1"; }
log_error() { echo -e "${RED}[ERROR]${NC} $1"; }
log_info "========================================="
log_info "Security Scan - Smart Accounts Contracts"
log_info "========================================="
log_info ""
# Check if contracts directory exists
if [ ! -d "$CONTRACTS_DIR" ]; then
log_error "Contracts directory not found: $CONTRACTS_DIR"
exit 1
fi
# Check if Slither is installed
if ! command -v slither &> /dev/null; then
log_warn "Slither not installed. Installing..."
pip install slither-analyzer
fi
# Run Slither
log_info "Running Slither analysis..."
cd "$PROJECT_ROOT/../smom-dbis-138"
slither contracts/smart-accounts/ \
--exclude-informational \
--exclude-optimization \
--exclude-low \
--print human \
--json - \
> "$PROJECT_ROOT/security-scan-results.json" 2>&1 || true
log_success "Security scan complete!"
log_info "Results saved to: security-scan-results.json"
log_info ""
# Check for high/critical issues
if grep -q "High\|Critical" "$PROJECT_ROOT/security-scan-results.json" 2>/dev/null; then
log_warn "High or critical issues found. Review security-scan-results.json"
else
log_success "No high or critical issues found"
fi

View File

@@ -0,0 +1,78 @@
#!/bin/bash
# Setup Backup and Recovery Procedures for Smart Accounts
# This script sets up backup and recovery procedures
set -e
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
# Colors
GREEN='\033[0;32m'
RED='\033[0;31m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m'
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
log_success() { echo -e "${GREEN}[✓]${NC} $1"; }
log_warn() { echo -e "${YELLOW}[WARN]${NC} $1"; }
log_error() { echo -e "${RED}[ERROR]${NC} $1"; }
log_info "========================================="
log_info "Setup Backup and Recovery Procedures"
log_info "========================================="
log_info ""
BACKUP_DIR="$PROJECT_ROOT/backups"
mkdir -p "$BACKUP_DIR"
# Create backup script
cat > "$BACKUP_DIR/backup-smart-accounts-config.sh" << 'EOF'
#!/bin/bash
# Backup Smart Accounts configuration
BACKUP_DIR="$(dirname "$0")"
TIMESTAMP=$(date +%Y%m%d_%H%M%S)
# Backup configuration files
cp config/smart-accounts-config.json "$BACKUP_DIR/smart-accounts-config_${TIMESTAMP}.json"
cp config/monitoring-config.json "$BACKUP_DIR/monitoring-config_${TIMESTAMP}.json"
echo "Backup completed: ${TIMESTAMP}"
EOF
chmod +x "$BACKUP_DIR/backup-smart-accounts-config.sh"
# Create recovery script
cat > "$BACKUP_DIR/recover-smart-accounts-config.sh" << 'EOF'
#!/bin/bash
# Recover Smart Accounts configuration from backup
BACKUP_DIR="$(dirname "$0")"
if [ -z "$1" ]; then
echo "Usage: $0 <backup_timestamp>"
echo "Available backups:"
ls -1 "$BACKUP_DIR"/*.json | xargs -n1 basename
exit 1
fi
TIMESTAMP=$1
# Restore configuration files
cp "$BACKUP_DIR/smart-accounts-config_${TIMESTAMP}.json" config/smart-accounts-config.json
cp "$BACKUP_DIR/monitoring-config_${TIMESTAMP}.json" config/monitoring-config.json
echo "Configuration restored from: ${TIMESTAMP}"
EOF
chmod +x "$BACKUP_DIR/recover-smart-accounts-config.sh"
log_success "Backup and recovery procedures set up!"
log_info ""
log_info "Backup directory: $BACKUP_DIR"
log_info "Backup script: $BACKUP_DIR/backup-smart-accounts-config.sh"
log_info "Recovery script: $BACKUP_DIR/recover-smart-accounts-config.sh"
log_info ""

345
scripts/setup-blockscout-cors.sh Executable file
View File

@@ -0,0 +1,345 @@
#!/bin/bash
# Setup Blockscout CORS Configuration for MetaMask Portfolio
# This script creates CORS configuration files for Blockscout
set -e
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
# Colors
GREEN='\033[0;32m'
RED='\033[0;31m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m'
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
log_success() { echo -e "${GREEN}[✓]${NC} $1"; }
log_warn() { echo -e "${YELLOW}[WARN]${NC} $1"; }
log_error() { echo -e "${RED}[ERROR]${NC} $1"; }
log_info "========================================="
log_info "Blockscout CORS Configuration Setup"
log_info "========================================="
log_info ""
# Create CORS configuration directory
CORS_DIR="$PROJECT_ROOT/blockscout-cors-config"
mkdir -p "$CORS_DIR"
# Create environment variables file
log_info "Creating CORS environment configuration..."
cat > "$CORS_DIR/cors.env" << 'EOF'
# Blockscout CORS Configuration for MetaMask Portfolio
# Add these environment variables to your Blockscout deployment
# Enable CORS
ENABLE_CORS=true
# Allowed Origins (comma-separated)
CORS_ALLOWED_ORIGINS=https://portfolio.metamask.io,https://metamask.io,https://chainlist.org,https://explorer.d-bis.org
# CORS Allowed Methods
CORS_ALLOWED_METHODS=GET,POST,OPTIONS
# CORS Allowed Headers
CORS_ALLOWED_HEADERS=Content-Type,Authorization,Accept
# CORS Max Age (seconds)
CORS_MAX_AGE=3600
# Token Metadata API Configuration
ENABLE_TOKEN_METADATA_API=true
TOKEN_METADATA_CACHE_ENABLED=true
TOKEN_METADATA_CACHE_TTL=3600
# Logo Serving Configuration
ENABLE_TOKEN_LOGO_SERVING=true
TOKEN_LOGO_BASE_URL=https://explorer.d-bis.org/images/tokens
# API Rate Limiting
API_RATE_LIMIT_ENABLED=true
API_RATE_LIMIT_PER_MINUTE=120
EOF
log_success "Created: $CORS_DIR/cors.env"
# Create Kubernetes ConfigMap
log_info "Creating Kubernetes ConfigMap..."
cat > "$CORS_DIR/blockscout-cors-configmap.yaml" << 'EOF'
apiVersion: v1
kind: ConfigMap
metadata:
name: blockscout-metamask-cors
namespace: besu-network
labels:
app: blockscout
component: cors-config
data:
# CORS Configuration
ENABLE_CORS: "true"
CORS_ALLOWED_ORIGINS: "https://portfolio.metamask.io,https://metamask.io,https://chainlist.org,https://explorer.d-bis.org"
CORS_ALLOWED_METHODS: "GET,POST,OPTIONS"
CORS_ALLOWED_HEADERS: "Content-Type,Authorization,Accept"
CORS_MAX_AGE: "3600"
# Token Metadata API
ENABLE_TOKEN_METADATA_API: "true"
TOKEN_METADATA_CACHE_ENABLED: "true"
TOKEN_METADATA_CACHE_TTL: "3600"
# Logo Serving
ENABLE_TOKEN_LOGO_SERVING: "true"
TOKEN_LOGO_BASE_URL: "https://explorer.d-bis.org/images/tokens"
# API Rate Limiting
API_RATE_LIMIT_ENABLED: "true"
API_RATE_LIMIT_PER_MINUTE: "120"
EOF
log_success "Created: $CORS_DIR/blockscout-cors-configmap.yaml"
# Create Docker Compose environment
log_info "Creating Docker Compose environment..."
cat > "$CORS_DIR/docker-compose.cors.env" << 'EOF'
# Blockscout CORS Configuration for Docker Compose
# Add to your docker-compose.yml environment section
ENABLE_CORS=true
CORS_ALLOWED_ORIGINS=https://portfolio.metamask.io,https://metamask.io,https://chainlist.org,https://explorer.d-bis.org
CORS_ALLOWED_METHODS=GET,POST,OPTIONS
CORS_ALLOWED_HEADERS=Content-Type,Authorization,Accept
CORS_MAX_AGE=3600
ENABLE_TOKEN_METADATA_API=true
TOKEN_METADATA_CACHE_ENABLED=true
TOKEN_METADATA_CACHE_TTL=3600
ENABLE_TOKEN_LOGO_SERVING=true
TOKEN_LOGO_BASE_URL=https://explorer.d-bis.org/images/tokens
API_RATE_LIMIT_ENABLED=true
API_RATE_LIMIT_PER_MINUTE=120
EOF
log_success "Created: $CORS_DIR/docker-compose.cors.env"
# Create nginx CORS configuration (if using nginx in front of Blockscout)
log_info "Creating nginx CORS configuration..."
cat > "$CORS_DIR/nginx-cors.conf" << 'EOF'
# Nginx CORS Configuration for Blockscout
# Add to your nginx server block for explorer.d-bis.org
# CORS Headers for MetaMask Portfolio
add_header Access-Control-Allow-Origin "https://portfolio.metamask.io" always;
add_header Access-Control-Allow-Methods "GET, POST, OPTIONS" always;
add_header Access-Control-Allow-Headers "Content-Type, Authorization, Accept" always;
add_header Access-Control-Max-Age 3600 always;
add_header Access-Control-Allow-Credentials true always;
# Handle OPTIONS preflight requests
if ($request_method = OPTIONS) {
add_header Access-Control-Allow-Origin "https://portfolio.metamask.io" always;
add_header Access-Control-Allow-Methods "GET, POST, OPTIONS" always;
add_header Access-Control-Allow-Headers "Content-Type, Authorization, Accept" always;
add_header Access-Control-Max-Age 3600 always;
add_header Content-Length 0;
add_header Content-Type text/plain;
return 204;
}
# Additional CORS for other MetaMask domains
if ($http_origin ~* "^https://(metamask\.io|chainlist\.org)$") {
add_header Access-Control-Allow-Origin "$http_origin" always;
}
EOF
log_success "Created: $CORS_DIR/nginx-cors.conf"
# Create application configuration
log_info "Creating application configuration..."
cat > "$CORS_DIR/blockscout-config.exs" << 'EOF'
# Blockscout CORS Configuration (Elixir/Phoenix)
# Add to config/prod.exs or config/runtime.exs
config :blockscout_web, BlockscoutWeb.Endpoint,
http: [
port: 4000,
protocol_options: [
idle_timeout: 60_000
]
],
# CORS Configuration
cors: [
enabled: true,
allowed_origins: [
"https://portfolio.metamask.io",
"https://metamask.io",
"https://chainlist.org",
"https://explorer.d-bis.org"
],
allowed_methods: ["GET", "POST", "OPTIONS"],
allowed_headers: ["Content-Type", "Authorization", "Accept"],
max_age: 3600
],
# Token Metadata API
token_metadata: [
enabled: true,
cache_enabled: true,
cache_ttl: 3600
],
# Logo Serving
logo_serving: [
enabled: true,
base_url: "https://explorer.d-bis.org/images/tokens"
],
# API Rate Limiting
rate_limiting: [
enabled: true,
requests_per_minute: 120
]
EOF
log_success "Created: $CORS_DIR/blockscout-config.exs"
# Create setup instructions
cat > "$CORS_DIR/SETUP_INSTRUCTIONS.md" << 'EOF'
# Blockscout CORS Configuration Setup
## Overview
This directory contains CORS configuration files for Blockscout to enable MetaMask Portfolio compatibility.
## Files
- `cors.env` - Environment variables for Blockscout
- `blockscout-cors-configmap.yaml` - Kubernetes ConfigMap
- `docker-compose.cors.env` - Docker Compose environment
- `nginx-cors.conf` - Nginx CORS configuration
- `blockscout-config.exs` - Elixir/Phoenix configuration
## Setup Methods
### Method 1: Environment Variables (Docker/Kubernetes)
1. **Docker Compose**:
```bash
# Add to docker-compose.yml
env_file:
- docker-compose.cors.env
```
2. **Kubernetes**:
```bash
kubectl apply -f blockscout-cors-configmap.yaml
# Then reference in deployment:
envFrom:
- configMapRef:
name: blockscout-metamask-cors
```
3. **Direct Environment**:
```bash
source cors.env
# Or export variables manually
```
### Method 2: Application Configuration (Elixir)
1. Copy `blockscout-config.exs` to your Blockscout config
2. Merge with existing configuration
3. Restart Blockscout
### Method 3: Nginx (Reverse Proxy)
1. Add `nginx-cors.conf` to your nginx server block
2. Reload nginx: `systemctl reload nginx`
## Verification
Test CORS headers:
```bash
# Test CORS preflight
curl -I -X OPTIONS https://explorer.d-bis.org/api/v2/tokens/0x... \
-H "Origin: https://portfolio.metamask.io" \
-H "Access-Control-Request-Method: GET"
# Expected headers:
# Access-Control-Allow-Origin: https://portfolio.metamask.io
# Access-Control-Allow-Methods: GET, POST, OPTIONS
# Access-Control-Allow-Headers: Content-Type, Authorization, Accept
```
## API Endpoints Required
Blockscout must provide these API endpoints for Portfolio:
1. **Token Metadata**:
```
GET /api/v2/tokens/{address}
```
2. **Token Holders**:
```
GET /api/v2/tokens/{address}/holders
```
3. **Account Token Balances**:
```
GET /api/v2/addresses/{address}/token-balances
```
4. **Account Transactions**:
```
GET /api/v2/addresses/{address}/transactions
```
## Testing
After configuration:
1. Restart Blockscout
2. Test CORS headers (see verification above)
3. Test from MetaMask Portfolio
4. Verify token metadata is accessible
5. Verify token logos are accessible
## Troubleshooting
### CORS Headers Not Appearing
- Check if CORS is enabled in Blockscout
- Verify environment variables are set
- Check nginx/application logs
- Verify origin is in allowed list
### Portfolio Cannot Access API
- Verify API endpoints are accessible
- Check rate limiting settings
- Verify SSL certificates are valid
- Test API endpoints directly
## Support
For issues, check:
- Blockscout documentation
- MetaMask Portfolio requirements
- CORS configuration best practices
EOF
log_success "Created: $CORS_DIR/SETUP_INSTRUCTIONS.md"
log_info ""
log_info "========================================="
log_info "CORS Configuration Complete!"
log_info "========================================="
log_info ""
log_info "Files created in: $CORS_DIR"
log_info ""
log_info "Next steps:"
log_info "1. Review SETUP_INSTRUCTIONS.md"
log_info "2. Apply CORS configuration to Blockscout"
log_info "3. Test CORS headers"
log_info "4. Verify Portfolio compatibility"
log_info ""

45
scripts/setup-monitoring.sh Executable file
View File

@@ -0,0 +1,45 @@
#!/bin/bash
# Setup monitoring for Smart Accounts
# This script sets up monitoring configuration for Smart Accounts contracts
set -e
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
# Colors
GREEN='\033[0;32m'
RED='\033[0;31m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m'
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
log_success() { echo -e "${GREEN}[✓]${NC} $1"; }
log_warn() { echo -e "${YELLOW}[WARN]${NC} $1"; }
log_error() { echo -e "${RED}[ERROR]${NC} $1"; }
log_info "========================================="
log_info "Setup Smart Accounts Monitoring"
log_info "========================================="
log_info ""
CONFIG_FILE="$PROJECT_ROOT/config/monitoring-config.json"
# Check if config exists
if [ ! -f "$CONFIG_FILE" ]; then
log_error "Monitoring config not found: $CONFIG_FILE"
exit 1
fi
log_info "Monitoring configuration file: $CONFIG_FILE"
log_info ""
log_info "To enable monitoring:"
log_info "1. Update contract addresses in config/monitoring-config.json"
log_info "2. Configure Prometheus endpoint"
log_info "3. Configure Grafana dashboards"
log_info "4. Set up alerting rules"
log_info ""
log_success "Monitoring setup complete!"
log_info ""

View File

@@ -0,0 +1,275 @@
#!/bin/bash
# Setup Public Token List Hosting for MetaMask
# This script prepares token list for hosting on various platforms
set -e
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
TOKEN_LIST="$PROJECT_ROOT/../token-lists/lists/dbis-138.tokenlist.json"
# Colors
GREEN='\033[0;32m'
RED='\033[0;31m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m'
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
log_success() { echo -e "${GREEN}[]${NC} $1"; }
log_warn() { echo -e "${YELLOW}[WARN]${NC} $1"; }
log_error() { echo -e "${RED}[ERROR]${NC} $1"; }
log_info "========================================="
log_info "Token List Hosting Setup"
log_info "========================================="
log_info ""
# Validate token list
if [ ! -f "$TOKEN_LIST" ]; then
log_error "Token list not found: $TOKEN_LIST"
exit 1
fi
if ! jq empty "$TOKEN_LIST" 2>/dev/null; then
log_error "Token list JSON is invalid"
exit 1
fi
log_success "Token list JSON is valid"
# Create hosting directory
HOSTING_DIR="$PROJECT_ROOT/token-list-hosting"
mkdir -p "$HOSTING_DIR"
# Copy token list
cp "$TOKEN_LIST" "$HOSTING_DIR/token-list.json"
log_success "Copied token list to hosting directory"
# Create GitHub Pages setup
log_info "Creating GitHub Pages setup..."
cat > "$HOSTING_DIR/github-pages-setup.md" << 'EOF'
# GitHub Pages Token List Hosting
## Setup Steps
1. **Create GitHub Repository**:
```bash
git init
git add token-list.json
git commit -m "Add ChainID 138 token list"
git remote add origin https://github.com/your-org/token-list.git
git push -u origin main
```
2. **Enable GitHub Pages**:
- Go to repository Settings
- Navigate to Pages
- Source: Deploy from a branch
- Branch: main
- Folder: / (root)
- Click Save
3. **Access Token List**:
- URL: `https://your-org.github.io/token-list/token-list.json`
- Or custom domain: `https://your-domain.com/token-list.json`
4. **Add to MetaMask**:
- Settings → Security & Privacy → Token Lists
- Add custom token list
- Enter: `https://your-org.github.io/token-list/token-list.json`
## CORS Configuration
GitHub Pages automatically serves with CORS headers, so no additional configuration needed.
## Auto-Update
When you update token-list.json and push to main, GitHub Pages automatically updates.
EOF
log_success "Created: $HOSTING_DIR/github-pages-setup.md"
# Create nginx hosting configuration
log_info "Creating nginx hosting configuration..."
cat > "$HOSTING_DIR/nginx-token-list.conf" << 'EOF'
# Nginx configuration for token list hosting
# Add to your nginx server block
server {
listen 443 ssl http2;
server_name your-domain.com;
# SSL Configuration
ssl_certificate /etc/ssl/certs/your-domain.crt;
ssl_certificate_key /etc/ssl/private/your-domain.key;
# Token List Location
location /token-list.json {
alias /var/www/token-list/token-list.json;
# CORS Headers
add_header Access-Control-Allow-Origin * always;
add_header Access-Control-Allow-Methods "GET, OPTIONS" always;
add_header Access-Control-Allow-Headers "Content-Type" always;
add_header Access-Control-Max-Age 3600 always;
add_header Content-Type application/json always;
# Cache for 1 hour
expires 1h;
add_header Cache-Control "public, must-revalidate";
# Handle OPTIONS
if ($request_method = OPTIONS) {
add_header Access-Control-Allow-Origin * always;
add_header Access-Control-Allow-Methods "GET, OPTIONS" always;
add_header Access-Control-Max-Age 3600 always;
add_header Content-Length 0;
return 204;
}
}
}
EOF
log_success "Created: $HOSTING_DIR/nginx-token-list.conf"
# Create IPFS hosting guide
cat > "$HOSTING_DIR/ipfs-hosting-guide.md" << 'EOF'
# IPFS Token List Hosting
## Setup Steps
1. **Install IPFS**:
```bash
# Download from https://ipfs.io
# Or use package manager
```
2. **Start IPFS Node**:
```bash
ipfs daemon
```
3. **Add Token List**:
```bash
ipfs add token-list.json
# Note the hash returned
```
4. **Pin Token List**:
```bash
ipfs pin add <hash>
```
5. **Access Token List**:
- IPFS Gateway: `https://ipfs.io/ipfs/<hash>`
- Pinata Gateway: `https://gateway.pinata.cloud/ipfs/<hash>`
- Cloudflare Gateway: `https://cloudflare-ipfs.com/ipfs/<hash>`
6. **Add to MetaMask**:
- Use one of the gateway URLs above
- Add to MetaMask token lists
## Pinning Services
For permanent hosting, use a pinning service:
- Pinata: https://pinata.cloud
- Infura: https://infura.io
- NFT.Storage: https://nft.storage
## Advantages
- Decentralized
- Permanent (if pinned)
- No single point of failure
- CORS-friendly gateways
EOF
log_success "Created: $HOSTING_DIR/ipfs-hosting-guide.md"
# Create hosting comparison
cat > "$HOSTING_DIR/HOSTING_COMPARISON.md" << 'EOF'
# Token List Hosting Options Comparison
## GitHub Pages
**Pros**:
- Free
- Easy setup
- Automatic HTTPS
- Version control
- Auto-updates
**Cons**:
- Requires GitHub account
- Public repository
- Limited customization
**Best For**: Quick setup, version control
---
## IPFS
**Pros**:
- Decentralized
- Permanent (if pinned)
- No single point of failure
- Multiple gateways
**Cons**:
- Requires IPFS node or pinning service
- Hash changes on update
- Gateway dependency
**Best For**: Decentralized hosting, permanent storage
---
## Custom Domain/CDN
**Pros**:
- Full control
- Custom domain
- CDN performance
- Professional appearance
**Cons**:
- Requires server/CDN
- SSL certificate needed
- Maintenance required
- Cost
**Best For**: Production, professional setup
---
## Recommendation
1. **Start**: GitHub Pages (quick, free)
2. **Production**: Custom domain with CDN
3. **Backup**: IPFS (permanent, decentralized)
EOF
log_success "Created: $HOSTING_DIR/HOSTING_COMPARISON.md"
log_info ""
log_info "========================================="
log_info "Token List Hosting Setup Complete!"
log_info "========================================="
log_info ""
log_info "Files created in: $HOSTING_DIR"
log_info " - token-list.json (token list file)"
log_info " - github-pages-setup.md (GitHub Pages guide)"
log_info " - nginx-token-list.conf (nginx config)"
log_info " - ipfs-hosting-guide.md (IPFS guide)"
log_info " - HOSTING_COMPARISON.md (hosting options)"
log_info ""
log_info "Next steps:"
log_info "1. Choose hosting method"
log_info "2. Follow setup guide"
log_info "3. Host token list"
log_info "4. Add URL to MetaMask"
log_info "5. Verify token list works"
log_info ""

256
scripts/setup-token-logos.sh Executable file
View File

@@ -0,0 +1,256 @@
#!/bin/bash
# Setup Token Logo Hosting for MetaMask
# This script creates logo hosting configuration and updates token lists
set -e
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
TOKEN_LIST="$PROJECT_ROOT/../token-lists/lists/dbis-138.tokenlist.json"
# Colors
GREEN='\033[0;32m'
RED='\033[0;31m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m'
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
log_success() { echo -e "${GREEN}[✓]${NC} $1"; }
log_warn() { echo -e "${YELLOW}[WARN]${NC} $1"; }
log_error() { echo -e "${RED}[ERROR]${NC} $1"; }
log_info "========================================="
log_info "Token Logo Hosting Setup"
log_info "========================================="
log_info ""
# Create logo directory structure
LOGO_DIR="$PROJECT_ROOT/token-logos"
mkdir -p "$LOGO_DIR"/{32x32,128x128,256x256,512x512}
mkdir -p "$LOGO_DIR"/blockscout/images/tokens
log_info "Created logo directory structure"
# Create logo hosting guide
cat > "$LOGO_DIR/LOGO_HOSTING_GUIDE.md" << 'EOF'
# Token Logo Hosting Guide
## Overview
Token logos should be hosted at:
```
https://explorer.d-bis.org/images/tokens/{token-address}.png
```
## Logo Requirements
### Sizes
- **32x32**: Small icons (MetaMask token list)
- **128x128**: Medium icons (MetaMask wallet)
- **256x256**: Large icons (dApps)
- **512x512**: High resolution (Blockscout)
### Format
- **Format**: PNG (recommended) or SVG
- **Background**: Transparent (preferred)
- **Aspect Ratio**: 1:1 (square)
- **File Size**: < 100KB per logo
## Token Logos Needed
### cUSDT (Compliant Tether USD)
- **Address**: `0x93E66202A11B1772E55407B32B44e5Cd8eda7f22`
- **Logo Source**: Can use official USDT logo
- **URL**: https://raw.githubusercontent.com/trustwallet/assets/master/blockchains/ethereum/assets/0xdAC17F958D2ee523a2206206994597C13D831ec7/logo.png
### cUSDC (Compliant USD Coin)
- **Address**: `0xf22258f57794CC8E06237084b353Ab30fFfa640b`
- **Logo Source**: Can use official USDC logo
- **URL**: https://raw.githubusercontent.com/trustwallet/assets/master/blockchains/ethereum/assets/0xA0b86991c6218b36c1d19D4a2e9Eb0cE3606eB48/logo.png
### WETH (Wrapped Ether)
- **Address**: `0xC02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2`
- **Logo Source**: Can use WETH logo
- **URL**: https://raw.githubusercontent.com/trustwallet/assets/master/blockchains/ethereum/assets/0xC02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2/logo.png
### WETH10 (Wrapped Ether v10)
- **Address**: `0xf4BB2e28688e89fCcE3c0580D37d36A7672E8A9f`
- **Logo Source**: Can use WETH logo
- **URL**: https://raw.githubusercontent.com/trustwallet/assets/master/blockchains/ethereum/assets/0xC02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2/logo.png
### LINK (Chainlink Token)
- **Address**: `0xb7721dD53A8c629d9f1Ba31a5819AFe250002b03`
- **Logo Source**: Chainlink logo
- **URL**: https://raw.githubusercontent.com/chainlink/chainlink-docs/main/docs/images/chainlink-logo.svg
### ETH/USD Oracle
- **Address**: `0x3304b747E565a97ec8AC220b0B6A1f6ffDB837e6`
- **Logo Source**: Custom oracle logo (needs creation)
## Hosting Options
### Option 1: Blockscout (Recommended)
1. **Upload logos to Blockscout**:
```bash
# Upload to Blockscout static files
/var/www/blockscout/priv/static/images/tokens/
```
2. **Logo naming convention**:
```
{token-address}.png
{token-address}-32.png (for 32x32)
{token-address}-128.png (for 128x128)
```
3. **Access URL**:
```
https://explorer.d-bis.org/images/tokens/{token-address}.png
```
### Option 2: CDN/Static Hosting
1. **Upload to CDN** (Cloudflare, AWS S3, etc.)
2. **Update token list with CDN URLs**
3. **Ensure CORS is enabled**
### Option 3: IPFS
1. **Upload logos to IPFS**
2. **Pin logos**
3. **Update token list with IPFS URLs**
## Logo Download Script
Use this script to download logos from Trust Wallet assets:
```bash
#!/bin/bash
# Download token logos from Trust Wallet assets
TOKENS=(
"0xdAC17F958D2ee523a2206206994597C13D831ec7:cusdt"
"0xA0b86991c6218b36c1d19D4a2e9Eb0cE3606eB48:cusdc"
"0xC02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2:weth"
)
for token in "${TOKENS[@]}"; do
IFS=':' read -r address name <<< "$token"
url="https://raw.githubusercontent.com/trustwallet/assets/master/blockchains/ethereum/assets/$address/logo.png"
wget -O "$name.png" "$url" || echo "Failed to download $name"
done
```
## Blockscout Configuration
Add to Blockscout configuration:
```elixir
config :blockscout_web, BlockscoutWeb.Endpoint,
logo_serving: [
enabled: true,
base_path: "/images/tokens",
fallback_logo: "/images/default-token.png"
]
```
## Verification
Test logo URLs:
```bash
# Test cUSDT logo
curl -I https://explorer.d-bis.org/images/tokens/0x93E66202A11B1772E55407B32B44e5Cd8eda7f22.png
# Test cUSDC logo
curl -I https://explorer.d-bis.org/images/tokens/0xf22258f57794CC8E06237084b353Ab30fFfa640b.png
```
Expected: HTTP 200 with Content-Type: image/png
EOF
log_success "Created: $LOGO_DIR/LOGO_HOSTING_GUIDE.md"
# Create logo download script
cat > "$LOGO_DIR/download-logos.sh" << 'EOF'
#!/bin/bash
# Download token logos from Trust Wallet assets
set -e
LOGO_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
# Token addresses and names
declare -A TOKENS=(
["0xdAC17F958D2ee523a2206206994597C13D831ec7"]="cusdt"
["0xA0b86991c6218b36c1d19D4a2e9Eb0cE3606eB48"]="cusdc"
["0xC02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2"]="weth"
)
BASE_URL="https://raw.githubusercontent.com/trustwallet/assets/master/blockchains/ethereum/assets"
for address in "${!TOKENS[@]}"; do
name="${TOKENS[$address]}"
url="$BASE_URL/$address/logo.png"
echo "Downloading $name logo from $url..."
wget -q -O "$LOGO_DIR/$name.png" "$url" && echo "✓ Downloaded $name.png" || echo "✗ Failed to download $name"
done
echo ""
echo "Logos downloaded to: $LOGO_DIR"
EOF
chmod +x "$LOGO_DIR/download-logos.sh"
log_success "Created: $LOGO_DIR/download-logos.sh"
# Create nginx configuration for logo serving
cat > "$LOGO_DIR/nginx-logo-serving.conf" << 'EOF'
# Nginx configuration for token logo serving
# Add to your nginx server block for explorer.d-bis.org
location /images/tokens/ {
alias /var/www/blockscout/priv/static/images/tokens/;
# CORS headers
add_header Access-Control-Allow-Origin * always;
add_header Access-Control-Allow-Methods "GET, OPTIONS" always;
add_header Access-Control-Max-Age 3600 always;
# Cache logos for 1 year
expires 1y;
add_header Cache-Control "public, immutable";
# Fallback to default logo if not found
try_files $uri /images/default-token.png =404;
}
# Default token logo
location = /images/default-token.png {
alias /var/www/blockscout/priv/static/images/default-token.png;
expires 1y;
add_header Cache-Control "public, immutable";
}
EOF
log_success "Created: $LOGO_DIR/nginx-logo-serving.conf"
log_info ""
log_info "========================================="
log_info "Logo Hosting Setup Complete!"
log_info "========================================="
log_info ""
log_info "Files created in: $LOGO_DIR"
log_info " - LOGO_HOSTING_GUIDE.md (hosting guide)"
log_info " - download-logos.sh (logo download script)"
log_info " - nginx-logo-serving.conf (nginx config)"
log_info ""
log_info "Next steps:"
log_info "1. Run download-logos.sh to download logos"
log_info "2. Upload logos to Blockscout or CDN"
log_info "3. Update token list with logo URLs"
log_info "4. Test logo URLs"
log_info ""

192
scripts/test-network-access.sh Executable file
View File

@@ -0,0 +1,192 @@
#!/bin/bash
# Network Access Test Script
# Tests connectivity to ChainID 138 RPC endpoints
set -e
# Colors
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
# Logging functions
log_info() {
echo -e "${BLUE}[INFO]${NC} $1"
}
log_success() {
echo -e "${GREEN}[SUCCESS]${NC} $1"
}
log_warning() {
echo -e "${YELLOW}[WARNING]${NC} $1"
}
log_error() {
echo -e "${RED}[ERROR]${NC} $1"
}
# Get script directory
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
SMOM_DIR="$(cd "$PROJECT_ROOT/../smom-dbis-138" && pwd)"
log_info "Network Access Test for ChainID 138"
log_info "===================================="
# Check if cast is available
if ! command -v cast &> /dev/null; then
log_error "cast (Foundry) not found. Please install Foundry first."
exit 1
fi
# Check if curl is available
if ! command -v curl &> /dev/null; then
log_warning "curl not found. Some tests may be skipped."
fi
# Load environment variables
if [ ! -f "$SMOM_DIR/.env" ]; then
log_error ".env file not found in $SMOM_DIR"
exit 1
fi
source "$SMOM_DIR/.env"
# RPC endpoints to test
RPC_ENDPOINTS=(
"${RPC_URL_138:-http://192.168.11.211:8545}"
"http://192.168.11.211:8545"
"http://192.168.11.250:8545"
"https://rpc.d-bis.org"
"https://rpc-http-pub.d-bis.org"
"https://rpc-http-prv.d-bis.org"
)
# Remove duplicates
RPC_ENDPOINTS=($(printf '%s\n' "${RPC_ENDPOINTS[@]}" | sort -u))
# Test function
test_rpc_endpoint() {
local rpc_url=$1
local endpoint_name=$2
log_info "Testing: $endpoint_name ($rpc_url)"
# Test 1: Basic connectivity with curl
if command -v curl &> /dev/null; then
local response=$(curl -s -X POST "$rpc_url" \
-H "Content-Type: application/json" \
-d '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":1}' \
--max-time 5 2>&1)
if echo "$response" | grep -q "result"; then
log_success " ✓ Basic connectivity: OK"
else
log_error " ✗ Basic connectivity: FAILED"
log_warning " Response: $response"
return 1
fi
fi
# Test 2: Block number with cast
local block_number=$(cast block-number --rpc-url "$rpc_url" 2>/dev/null || echo "")
if [ -n "$block_number" ] && [ "$block_number" != "0" ]; then
log_success " ✓ Block number: $block_number"
else
log_error " ✗ Block number: FAILED or network not producing blocks"
return 1
fi
# Test 3: Chain ID
local chain_id=$(cast chain-id --rpc-url "$rpc_url" 2>/dev/null || echo "")
if [ "$chain_id" = "138" ]; then
log_success " ✓ Chain ID: $chain_id (correct)"
elif [ -n "$chain_id" ]; then
log_warning " ⚠ Chain ID: $chain_id (expected 138)"
else
log_error " ✗ Chain ID: FAILED"
return 1
fi
# Test 4: Deployer balance (if available)
if [ -n "$PRIVATE_KEY" ]; then
local deployer=$(cast wallet address "$PRIVATE_KEY" 2>/dev/null || echo "")
if [ -n "$deployer" ]; then
local balance=$(cast balance "$deployer" --rpc-url "$rpc_url" 2>/dev/null || echo "0")
local balance_eth=$(cast --to-unit "$balance" ether 2>/dev/null || echo "0")
if [ "$balance" != "0" ]; then
log_success " ✓ Deployer balance: $balance_eth ETH"
else
log_warning " ⚠ Deployer balance: 0 ETH (may need funding)"
fi
fi
fi
return 0
}
# Test all endpoints
log_info "Testing RPC endpoints..."
echo ""
WORKING_ENDPOINTS=()
FAILED_ENDPOINTS=()
for endpoint in "${RPC_ENDPOINTS[@]}"; do
# Extract endpoint name
endpoint_name=$(echo "$endpoint" | sed 's|https\?://||' | sed 's|:.*||')
if test_rpc_endpoint "$endpoint" "$endpoint_name"; then
WORKING_ENDPOINTS+=("$endpoint")
log_success "$endpoint_name is accessible and working"
else
FAILED_ENDPOINTS+=("$endpoint")
log_error "$endpoint_name is not accessible or not working"
fi
echo ""
done
# Summary
log_info "===================================="
log_info "Test Summary"
log_info "===================================="
if [ ${#WORKING_ENDPOINTS[@]} -gt 0 ]; then
log_success "Working Endpoints (${#WORKING_ENDPOINTS[@]}):"
for endpoint in "${WORKING_ENDPOINTS[@]}"; do
log_success "$endpoint"
done
echo ""
# Recommend best endpoint
if [ -n "$RPC_URL_138" ] && [[ " ${WORKING_ENDPOINTS[@]} " =~ " ${RPC_URL_138} " ]]; then
log_success "Recommended: $RPC_URL_138 (already configured)"
else
log_info "Recommended: ${WORKING_ENDPOINTS[0]}"
log_warning "Update .env: RPC_URL_138=${WORKING_ENDPOINTS[0]}"
fi
else
log_error "No working RPC endpoints found!"
log_error ""
log_error "Possible issues:"
log_error " 1. Network connectivity problems"
log_error " 2. RPC endpoints not operational"
log_error " 3. Firewall blocking access"
log_error " 4. VPN or network routing needed"
exit 1
fi
if [ ${#FAILED_ENDPOINTS[@]} -gt 0 ]; then
echo ""
log_warning "Failed Endpoints (${#FAILED_ENDPOINTS[@]}):"
for endpoint in "${FAILED_ENDPOINTS[@]}"; do
log_warning "$endpoint"
done
fi
echo ""
log_success "Network access test complete!"

View File

@@ -0,0 +1,191 @@
#!/bin/bash
# Test MetaMask Portfolio Integration
# This script tests Blockscout API endpoints required for MetaMask Portfolio
set -e
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
# Colors
GREEN='\033[0;32m'
RED='\033[0;31m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m'
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
log_success() { echo -e "${GREEN}[✓]${NC} $1"; }
log_warn() { echo -e "${YELLOW}[WARN]${NC} $1"; }
log_error() { echo -e "${RED}[ERROR]${NC} $1"; }
EXPLORER_URL="${EXPLORER_URL:-https://explorer.d-bis.org}"
TEST_ADDRESS="${TEST_ADDRESS:-0x4207aA9aC89B8bF4795dbAbBbE17fdd224E7947C}"
CUSDT_ADDRESS="0x93E66202A11B1772E55407B32B44e5Cd8eda7f22"
CUSDC_ADDRESS="0xf22258f57794CC8E06237084b353Ab30fFfa640b"
log_info "========================================="
log_info "MetaMask Portfolio Integration Test"
log_info "========================================="
log_info ""
log_info "Explorer URL: $EXPLORER_URL"
log_info "Test Address: $TEST_ADDRESS"
log_info ""
# Test CORS headers
log_info "Testing CORS headers..."
CORS_TEST=$(curl -s -I -X OPTIONS "$EXPLORER_URL/api/v2/tokens/$CUSDT_ADDRESS" \
-H "Origin: https://portfolio.metamask.io" \
-H "Access-Control-Request-Method: GET" 2>&1)
if echo "$CORS_TEST" | grep -q "Access-Control-Allow-Origin"; then
log_success "CORS headers present"
echo "$CORS_TEST" | grep -i "access-control" | head -5
else
log_warn "CORS headers not found or incomplete"
echo "$CORS_TEST" | head -10
fi
log_info ""
# Test token metadata API
log_info "Testing token metadata API endpoints..."
# Test cUSDT metadata
log_info "1. Testing cUSDT metadata..."
TOKEN_METADATA=$(curl -s "$EXPLORER_URL/api/v2/tokens/$CUSDT_ADDRESS" 2>&1 || echo "ERROR")
if echo "$TOKEN_METADATA" | grep -q "symbol\|name\|decimals"; then
log_success "cUSDT metadata API working"
echo "$TOKEN_METADATA" | jq -r '.symbol, .name, .decimals' 2>/dev/null || echo "$TOKEN_METADATA" | head -5
else
log_error "cUSDT metadata API failed"
echo "$TOKEN_METADATA" | head -5
fi
log_info ""
# Test cUSDC metadata
log_info "2. Testing cUSDC metadata..."
TOKEN_METADATA=$(curl -s "$EXPLORER_URL/api/v2/tokens/$CUSDC_ADDRESS" 2>&1 || echo "ERROR")
if echo "$TOKEN_METADATA" | grep -q "symbol\|name\|decimals"; then
log_success "cUSDC metadata API working"
echo "$TOKEN_METADATA" | jq -r '.symbol, .name, .decimals' 2>/dev/null || echo "$TOKEN_METADATA" | head -5
else
log_error "cUSDC metadata API failed"
echo "$TOKEN_METADATA" | head -5
fi
log_info ""
# Test account token balances
log_info "3. Testing account token balances..."
BALANCES=$(curl -s "$EXPLORER_URL/api/v2/addresses/$TEST_ADDRESS/token-balances" 2>&1 || echo "ERROR")
if echo "$BALANCES" | grep -q "token\|balance"; then
log_success "Token balances API working"
echo "$BALANCES" | jq -r '.[] | "\(.token.symbol): \(.value)"' 2>/dev/null | head -5 || echo "$BALANCES" | head -5
else
log_warn "Token balances API may not be available"
echo "$BALANCES" | head -5
fi
log_info ""
# Test account transactions
log_info "4. Testing account transactions API..."
TXS=$(curl -s "$EXPLORER_URL/api/v2/addresses/$TEST_ADDRESS/transactions" 2>&1 || echo "ERROR")
if echo "$TXS" | grep -q "hash\|block_number"; then
log_success "Transactions API working"
TX_COUNT=$(echo "$TXS" | jq '.items | length' 2>/dev/null || echo "0")
log_info "Found $TX_COUNT transactions"
else
log_warn "Transactions API may not be available"
echo "$TXS" | head -5
fi
log_info ""
# Test logo URLs
log_info "5. Testing token logo URLs..."
LOGO_URLS=(
"$EXPLORER_URL/images/tokens/$CUSDT_ADDRESS.png"
"$EXPLORER_URL/images/tokens/$CUSDC_ADDRESS.png"
)
for logo_url in "${LOGO_URLS[@]}"; do
LOGO_TEST=$(curl -s -I "$logo_url" 2>&1 | head -1)
if echo "$LOGO_TEST" | grep -q "200\|OK"; then
log_success "Logo accessible: $(basename $logo_url)"
else
log_warn "Logo not found: $(basename $logo_url)"
fi
done
log_info ""
# Create test report
REPORT_FILE="$PROJECT_ROOT/portfolio-integration-test-report.md"
cat > "$REPORT_FILE" << EOF
# MetaMask Portfolio Integration Test Report
**Date**: $(date)
**Explorer URL**: $EXPLORER_URL
**Test Address**: $TEST_ADDRESS
## Test Results
### CORS Configuration
- Status: $(echo "$CORS_TEST" | grep -q "Access-Control-Allow-Origin" && echo "✅ PASS" || echo "❌ FAIL")
- Headers: $(echo "$CORS_TEST" | grep -i "access-control" | head -3 | tr '\n' ' ')
### API Endpoints
1. **Token Metadata API**
- cUSDT: $(echo "$TOKEN_METADATA" | grep -q "symbol" && echo "✅ Working" || echo "❌ Failed")
- cUSDC: $(echo "$TOKEN_METADATA" | grep -q "symbol" && echo "✅ Working" || echo "❌ Failed")
2. **Token Balances API**
- Status: $(echo "$BALANCES" | grep -q "token\|balance" && echo "✅ Working" || echo "⚠️ Limited")
3. **Transactions API**
- Status: $(echo "$TXS" | grep -q "hash" && echo "✅ Working" || echo "⚠️ Limited")
4. **Logo URLs**
- cUSDT Logo: $(curl -s -I "$EXPLORER_URL/images/tokens/$CUSDT_ADDRESS.png" 2>&1 | grep -q "200" && echo "✅ Accessible" || echo "❌ Not Found")
- cUSDC Logo: $(curl -s -I "$EXPLORER_URL/images/tokens/$CUSDC_ADDRESS.png" 2>&1 | grep -q "200" && echo "✅ Accessible" || echo "❌ Not Found")
## Recommendations
1. Ensure all API endpoints are accessible
2. Verify CORS headers are correctly configured
3. Test from MetaMask Portfolio after deployment
4. Monitor API response times
5. Verify token logos are accessible
## Next Steps
1. Deploy Blockscout with CORS configuration
2. Test from MetaMask Portfolio
3. Verify token auto-detection
4. Verify balance display
5. Verify transaction history
EOF
log_success "Created test report: $REPORT_FILE"
log_info ""
log_info "========================================="
log_info "Portfolio Integration Test Complete!"
log_info "========================================="
log_info ""
log_info "Test report saved to: $REPORT_FILE"
log_info ""
log_info "Next steps:"
log_info "1. Review test report"
log_info "2. Fix any failing tests"
log_info "3. Test from MetaMask Portfolio"
log_info ""

View File

@@ -0,0 +1,156 @@
#!/bin/bash
# Update Smart Accounts configuration with deployed addresses
# This script helps update config/smart-accounts-config.json after deployment
set -e
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
CONFIG_FILE="$PROJECT_ROOT/config/smart-accounts-config.json"
# Colors
GREEN='\033[0;32m'
RED='\033[0;31m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m'
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
log_success() { echo -e "${GREEN}[✓]${NC} $1"; }
log_warn() { echo -e "${YELLOW}[WARN]${NC} $1"; }
log_error() { echo -e "${RED}[ERROR]${NC} $1"; }
log_info "========================================="
log_info "Update Smart Accounts Configuration"
log_info "========================================="
log_info ""
# Check if jq is installed
if ! command -v jq &> /dev/null; then
log_error "jq is required but not installed. Install with: apt-get install jq"
exit 1
fi
# Check if config file exists
if [ ! -f "$CONFIG_FILE" ]; then
log_error "Config file not found: $CONFIG_FILE"
exit 1
fi
# Function to update address in config
update_address() {
local key=$1
local address=$2
if [ -z "$address" ]; then
log_warn "Skipping $key (no address provided)"
return
fi
# Validate address format
if [[ ! "$address" =~ ^0x[0-9a-fA-F]{40}$ ]]; then
log_error "Invalid address format: $address"
return
fi
# Update config using jq
tmp_file=$(mktemp)
jq ".$key = \"$address\"" "$CONFIG_FILE" > "$tmp_file"
mv "$tmp_file" "$CONFIG_FILE"
log_success "Updated $key: $address"
}
# Function to update deployment info
update_deployment() {
local contract=$1
local address=$2
local tx_hash=$3
local block_number=$4
if [ -z "$address" ]; then
log_warn "Skipping deployment info for $contract (no address provided)"
return
fi
tmp_file=$(mktemp)
jq ".deployment.$contract.address = \"$address\"" "$CONFIG_FILE" > "$tmp_file"
mv "$tmp_file" "$CONFIG_FILE"
if [ -n "$tx_hash" ]; then
tmp_file=$(mktemp)
jq ".deployment.$contract.transactionHash = \"$tx_hash\"" "$CONFIG_FILE" > "$tmp_file"
mv "$tmp_file" "$CONFIG_FILE"
fi
if [ -n "$block_number" ]; then
tmp_file=$(mktemp)
jq ".deployment.$contract.blockNumber = $block_number" "$CONFIG_FILE" > "$tmp_file"
mv "$tmp_file" "$CONFIG_FILE"
fi
log_success "Updated deployment info for $contract"
}
# Interactive mode
if [ "$1" = "--interactive" ] || [ "$1" = "-i" ]; then
log_info "Interactive mode: Enter addresses when prompted"
log_info ""
read -p "EntryPoint address (or press Enter to skip): " entry_point
read -p "AccountFactory address (or press Enter to skip): " account_factory
read -p "Paymaster address (or press Enter to skip): " paymaster
update_address "entryPointAddress" "$entry_point"
update_address "accountFactoryAddress" "$account_factory"
update_address "paymasterAddress" "$paymaster"
log_info ""
log_success "Configuration updated!"
exit 0
fi
# Command line mode
if [ $# -eq 0 ]; then
log_info "Usage:"
log_info " $0 --interactive # Interactive mode"
log_info " $0 --entry-point ADDRESS # Update EntryPoint address"
log_info " $0 --account-factory ADDRESS # Update AccountFactory address"
log_info " $0 --paymaster ADDRESS # Update Paymaster address"
log_info " $0 --all ENTRY_POINT ACCOUNT_FACTORY [PAYMASTER] # Update all"
exit 0
fi
# Parse arguments
while [[ $# -gt 0 ]]; do
case $1 in
--entry-point)
update_address "entryPointAddress" "$2"
shift 2
;;
--account-factory)
update_address "accountFactoryAddress" "$2"
shift 2
;;
--paymaster)
update_address "paymasterAddress" "$2"
shift 2
;;
--all)
update_address "entryPointAddress" "$2"
update_address "accountFactoryAddress" "$3"
if [ -n "$4" ]; then
update_address "paymasterAddress" "$4"
fi
exit 0
;;
*)
log_error "Unknown option: $1"
exit 1
;;
esac
done
log_info ""
log_success "Configuration updated!"

103
scripts/update-token-logos.sh Executable file
View File

@@ -0,0 +1,103 @@
#!/bin/bash
# Update all token lists with proper logo URLs
# This script updates logoURI fields in all token list files
set -e
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
# Colors
GREEN='\033[0;32m'
RED='\033[0;31m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m'
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
log_success() { echo -e "${GREEN}[✓]${NC} $1"; }
log_warn() { echo -e "${YELLOW}[WARN]${NC} $1"; }
log_error() { echo -e "${RED}[ERROR]${NC} $1"; }
log_info "========================================="
log_info "Update Token Logos in Token Lists"
log_info "========================================="
log_info ""
# Token logo mapping
declare -A TOKEN_LOGOS=(
# Format: address=logo_url
["0x3304b747E565a97ec8AC220b0B6A1f6ffDB837e6"]="https://explorer.d-bis.org/images/tokens/0x3304b747E565a97ec8AC220b0B6A1f6ffDB837e6.png"
["0xC02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2"]="https://explorer.d-bis.org/images/tokens/0xC02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2.png"
["0xf4BB2e28688e89fCcE3c0580D37d36A7672E8A9f"]="https://explorer.d-bis.org/images/tokens/0xf4BB2e28688e89fCcE3c0580D37d36A7672E8A9f.png"
["0xb7721dD53A8c629d9f1Ba31a5819AFe250002b03"]="https://explorer.d-bis.org/images/tokens/0xb7721dD53A8c629d9f1Ba31a5819AFe250002b03.png"
["0x93E66202A11B1772E55407B32B44e5Cd8eda7f22"]="https://explorer.d-bis.org/images/tokens/0x93E66202A11B1772E55407B32B44e5Cd8eda7f22.png"
["0xf22258f57794CC8E06237084b353Ab30fFfa640b"]="https://explorer.d-bis.org/images/tokens/0xf22258f57794CC8E06237084b353Ab30fFfa640b.png"
)
# Token list files to update
TOKEN_LISTS=(
"$PROJECT_ROOT/../token-lists/lists/dbis-138.tokenlist.json"
"$PROJECT_ROOT/docs/METAMASK_TOKEN_LIST.json"
"$PROJECT_ROOT/config/token-list.json"
"$PROJECT_ROOT/config/complete-token-list.json"
)
# Check if jq is installed
if ! command -v jq &> /dev/null; then
log_error "jq is required but not installed"
exit 1
fi
# Function to update logo in token list
update_token_logo() {
local file=$1
local address=$2
local logo_url=$3
if [ ! -f "$file" ]; then
log_warn "File not found: $file"
return
fi
# Update logoURI for matching address (case-insensitive)
jq --arg addr "$address" --arg logo "$logo_url" \
'(.tokens[]? | select((.address | ascii_downcase) == ($addr | ascii_downcase)) | .logoURI) = $logo' \
"$file" > "${file}.tmp" && mv "${file}.tmp" "$file" 2>/dev/null || {
log_warn "Failed to update logo for $address in $file"
rm -f "${file}.tmp"
}
}
# Update each token list
for token_list in "${TOKEN_LISTS[@]}"; do
if [ ! -f "$token_list" ]; then
log_warn "Token list not found: $token_list"
continue
fi
log_info "Updating: $token_list"
for address in "${!TOKEN_LOGOS[@]}"; do
logo_url="${TOKEN_LOGOS[$address]}"
update_token_logo "$token_list" "$address" "$logo_url"
done
log_success "Updated: $token_list"
done
log_info ""
log_info "========================================="
log_info "Token Logo Update Complete!"
log_info "========================================="
log_info ""
log_info "Updated token lists with logo URLs:"
for token_list in "${TOKEN_LISTS[@]}"; do
if [ -f "$token_list" ]; then
log_info " - $token_list"
fi
done
log_info ""
log_info "Logo URLs point to: https://explorer.d-bis.org/images/tokens/{address}.png"
log_info ""

159
scripts/validate-config.sh Executable file
View File

@@ -0,0 +1,159 @@
#!/bin/bash
# Configuration Validation Script
# Validates Smart Accounts configuration files
set -e
# Colors
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
# Logging functions
log_info() {
echo -e "${BLUE}[INFO]${NC} $1"
}
log_success() {
echo -e "${GREEN}[SUCCESS]${NC} $1"
}
log_warning() {
echo -e "${YELLOW}[WARNING]${NC} $1"
}
log_error() {
echo -e "${RED}[ERROR]${NC} $1"
}
# Get script directory
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
log_info "Configuration Validation"
log_info "========================"
# Check if jq is available
if ! command -v jq &> /dev/null; then
log_error "jq not found. Please install jq first."
exit 1
fi
VALIDATION_STATUS=0
# Validate smart-accounts-config.json
CONFIG_FILE="$PROJECT_ROOT/config/smart-accounts-config.json"
log_info "1. Validating smart-accounts-config.json..."
if [ ! -f "$CONFIG_FILE" ]; then
log_error " Configuration file not found: $CONFIG_FILE"
VALIDATION_STATUS=1
else
# Check JSON validity
if ! jq empty "$CONFIG_FILE" 2>/dev/null; then
log_error " Invalid JSON format"
VALIDATION_STATUS=1
else
log_success " JSON format is valid"
# Check required fields
CHAIN_ID=$(jq -r '.chainId // empty' "$CONFIG_FILE")
if [ -z "$CHAIN_ID" ] || [ "$CHAIN_ID" = "null" ]; then
log_error " Missing required field: chainId"
VALIDATION_STATUS=1
elif [ "$CHAIN_ID" != "138" ]; then
log_warning " ChainID is not 138 (found: $CHAIN_ID)"
else
log_success " ChainID is correct: $CHAIN_ID"
fi
RPC_URL=$(jq -r '.rpcUrl // empty' "$CONFIG_FILE")
if [ -z "$RPC_URL" ] || [ "$RPC_URL" = "null" ]; then
log_warning " Missing field: rpcUrl"
else
log_success " RPC URL configured: $RPC_URL"
fi
ENTRY_POINT=$(jq -r '.entryPointAddress // empty' "$CONFIG_FILE")
if [ -z "$ENTRY_POINT" ] || [ "$ENTRY_POINT" = "null" ] || [ "$ENTRY_POINT" = "" ]; then
log_warning " EntryPoint address not configured"
else
# Validate address format
if [[ "$ENTRY_POINT" =~ ^0x[a-fA-F0-9]{40}$ ]]; then
log_success " EntryPoint address format is valid: $ENTRY_POINT"
else
log_error " EntryPoint address format is invalid: $ENTRY_POINT"
VALIDATION_STATUS=1
fi
fi
ACCOUNT_FACTORY=$(jq -r '.accountFactoryAddress // empty' "$CONFIG_FILE")
if [ -z "$ACCOUNT_FACTORY" ] || [ "$ACCOUNT_FACTORY" = "null" ] || [ "$ACCOUNT_FACTORY" = "" ]; then
log_warning " AccountFactory address not configured"
else
# Validate address format
if [[ "$ACCOUNT_FACTORY" =~ ^0x[a-fA-F0-9]{40}$ ]]; then
log_success " AccountFactory address format is valid: $ACCOUNT_FACTORY"
else
log_error " AccountFactory address format is invalid: $ACCOUNT_FACTORY"
VALIDATION_STATUS=1
fi
fi
PAYMASTER=$(jq -r '.paymasterAddress // empty' "$CONFIG_FILE")
if [ -n "$PAYMASTER" ] && [ "$PAYMASTER" != "null" ] && [ "$PAYMASTER" != "" ]; then
# Validate address format
if [[ "$PAYMASTER" =~ ^0x[a-fA-F0-9]{40}$ ]]; then
log_success " Paymaster address format is valid: $PAYMASTER"
else
log_error " Paymaster address format is invalid: $PAYMASTER"
VALIDATION_STATUS=1
fi
else
log_info " Paymaster not configured (optional)"
fi
fi
fi
# Validate monitoring-config.json
MONITORING_CONFIG="$PROJECT_ROOT/config/monitoring-config.json"
log_info "2. Validating monitoring-config.json..."
if [ ! -f "$MONITORING_CONFIG" ]; then
log_warning " Monitoring configuration file not found (optional)"
else
if ! jq empty "$MONITORING_CONFIG" 2>/dev/null; then
log_error " Invalid JSON format"
VALIDATION_STATUS=1
else
log_success " Monitoring configuration is valid"
fi
fi
# Validate analytics-config.json
ANALYTICS_CONFIG="$PROJECT_ROOT/config/analytics-config.json"
log_info "3. Validating analytics-config.json..."
if [ ! -f "$ANALYTICS_CONFIG" ]; then
log_warning " Analytics configuration file not found (optional)"
else
if ! jq empty "$ANALYTICS_CONFIG" 2>/dev/null; then
log_error " Invalid JSON format"
VALIDATION_STATUS=1
else
log_success " Analytics configuration is valid"
fi
fi
# Summary
log_info "========================"
if [ $VALIDATION_STATUS -eq 0 ]; then
log_success "Configuration validation passed! ✅"
exit 0
else
log_error "Configuration validation failed! ❌"
exit 1
fi

159
scripts/verify-deployment.sh Executable file
View File

@@ -0,0 +1,159 @@
#!/bin/bash
# Smart Accounts Deployment Verification Script
# Verifies that all contracts are deployed and configured correctly
set -e
# Colors
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
# Logging functions
log_info() {
echo -e "${BLUE}[INFO]${NC} $1"
}
log_success() {
echo -e "${GREEN}[SUCCESS]${NC} $1"
}
log_warning() {
echo -e "${YELLOW}[WARNING]${NC} $1"
}
log_error() {
echo -e "${RED}[ERROR]${NC} $1"
}
# Get script directory
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
SMOM_DIR="$(cd "$PROJECT_ROOT/../smom-dbis-138" && pwd)"
log_info "Smart Accounts Deployment Verification"
log_info "======================================"
# Check if cast is available
if ! command -v cast &> /dev/null; then
log_error "cast (Foundry) not found. Please install Foundry first."
exit 1
fi
# Load environment variables
if [ ! -f "$SMOM_DIR/.env" ]; then
log_error ".env file not found in $SMOM_DIR"
exit 1
fi
source "$SMOM_DIR/.env"
if [ -z "$RPC_URL_138" ]; then
log_error "RPC_URL_138 not set in .env"
exit 1
fi
# Load configuration
CONFIG_FILE="$PROJECT_ROOT/config/smart-accounts-config.json"
if [ ! -f "$CONFIG_FILE" ]; then
log_error "Configuration file not found: $CONFIG_FILE"
exit 1
fi
# Check if jq is available
if ! command -v jq &> /dev/null; then
log_error "jq not found. Please install jq first."
exit 1
fi
# Extract addresses from config
ENTRY_POINT=$(jq -r '.entryPointAddress // empty' "$CONFIG_FILE")
ACCOUNT_FACTORY=$(jq -r '.accountFactoryAddress // empty' "$CONFIG_FILE")
PAYMASTER=$(jq -r '.paymasterAddress // empty' "$CONFIG_FILE")
log_info "Verifying contracts on ChainID 138..."
log_info "RPC URL: $RPC_URL_138"
# Verify EntryPoint
if [ -n "$ENTRY_POINT" ] && [ "$ENTRY_POINT" != "null" ] && [ "$ENTRY_POINT" != "" ]; then
log_info "Checking EntryPoint at $ENTRY_POINT..."
CODE=$(cast code "$ENTRY_POINT" --rpc-url "$RPC_URL_138" 2>/dev/null || echo "")
if [ -n "$CODE" ] && [ "$CODE" != "0x" ]; then
log_success "EntryPoint contract verified (has code)"
else
log_error "EntryPoint contract not found or has no code"
fi
else
log_warning "EntryPoint address not configured"
fi
# Verify AccountFactory
if [ -n "$ACCOUNT_FACTORY" ] && [ "$ACCOUNT_FACTORY" != "null" ] && [ "$ACCOUNT_FACTORY" != "" ]; then
log_info "Checking AccountFactory at $ACCOUNT_FACTORY..."
CODE=$(cast code "$ACCOUNT_FACTORY" --rpc-url "$RPC_URL_138" 2>/dev/null || echo "")
if [ -n "$CODE" ] && [ "$CODE" != "0x" ]; then
log_success "AccountFactory contract verified (has code)"
else
log_error "AccountFactory contract not found or has no code"
fi
else
log_warning "AccountFactory address not configured"
fi
# Verify Paymaster (optional)
if [ -n "$PAYMASTER" ] && [ "$PAYMASTER" != "null" ] && [ "$PAYMASTER" != "" ]; then
log_info "Checking Paymaster at $PAYMASTER..."
CODE=$(cast code "$PAYMASTER" --rpc-url "$RPC_URL_138" 2>/dev/null || echo "")
if [ -n "$CODE" ] && [ "$CODE" != "0x" ]; then
log_success "Paymaster contract verified (has code)"
else
log_warning "Paymaster contract not found or has no code (optional)"
fi
else
log_info "Paymaster not configured (optional)"
fi
# Check RPC connectivity
log_info "Checking RPC connectivity..."
BLOCK_NUMBER=$(cast block-number --rpc-url "$RPC_URL_138" 2>/dev/null || echo "")
if [ -n "$BLOCK_NUMBER" ]; then
log_success "RPC connection successful (block: $BLOCK_NUMBER)"
else
log_error "RPC connection failed"
exit 1
fi
# Check configuration file
log_info "Verifying configuration file..."
if jq empty "$CONFIG_FILE" 2>/dev/null; then
log_success "Configuration file is valid JSON"
else
log_error "Configuration file is invalid JSON"
exit 1
fi
# Summary
log_info "======================================"
log_info "Verification Summary:"
log_info "- RPC Connection: ✅"
log_info "- Configuration File: ✅"
if [ -n "$ENTRY_POINT" ] && [ "$ENTRY_POINT" != "null" ] && [ "$ENTRY_POINT" != "" ]; then
log_info "- EntryPoint: ✅ Configured"
else
log_info "- EntryPoint: ⚠️ Not configured"
fi
if [ -n "$ACCOUNT_FACTORY" ] && [ "$ACCOUNT_FACTORY" != "null" ] && [ "$ACCOUNT_FACTORY" != "" ]; then
log_info "- AccountFactory: ✅ Configured"
else
log_info "- AccountFactory: ⚠️ Not configured"
fi
if [ -n "$PAYMASTER" ] && [ "$PAYMASTER" != "null" ] && [ "$PAYMASTER" != "" ]; then
log_info "- Paymaster: ✅ Configured (optional)"
else
log_info "- Paymaster: ⚠️ Not configured (optional)"
fi
log_success "Verification complete!"