mirror of
https://github.com/ElppaDev/snStatus.git
synced 2026-01-29 09:35:36 +00:00
786 lines
30 KiB
JavaScript
786 lines
30 KiB
JavaScript
import express from 'express';
|
|
import cors from 'cors';
|
|
import si from 'systeminformation';
|
|
import Docker from 'dockerode';
|
|
import { createServer } from 'http';
|
|
import { Server } from 'socket.io';
|
|
import fs from 'fs';
|
|
import path from 'path';
|
|
import webpush from 'web-push';
|
|
import crypto from 'crypto';
|
|
|
|
const app = express();
|
|
const httpServer = createServer(app);
|
|
const io = new Server(httpServer, {
|
|
cors: {
|
|
origin: "*",
|
|
methods: ["GET", "POST"]
|
|
}
|
|
});
|
|
const PORT = 8001;
|
|
|
|
// Docker Connection
|
|
const docker = new Docker({ socketPath: '/var/run/docker.sock' });
|
|
|
|
app.use(cors());
|
|
app.use(express.json());
|
|
|
|
// Helper: Read Synology Info from Host
|
|
const getSynologyInfo = () => {
|
|
try {
|
|
const versionPath = '/host/etc/VERSION';
|
|
const synoInfoPath = '/host/etc/synoinfo.conf';
|
|
|
|
let dsmVersion = 'Unknown';
|
|
let model = 'Synology NAS';
|
|
|
|
if (fs.existsSync(versionPath)) {
|
|
const versionContent = fs.readFileSync(versionPath, 'utf8');
|
|
const productVersion = versionContent.match(/productversion="([^"]+)"/);
|
|
const buildNumber = versionContent.match(/buildnumber="([^"]+)"/);
|
|
if (productVersion && buildNumber) {
|
|
dsmVersion = `DSM ${productVersion[1]}-${buildNumber[1]}`;
|
|
}
|
|
}
|
|
|
|
if (fs.existsSync(synoInfoPath)) {
|
|
// Try to read model from synoinfo (unique="synology_geminilake_920+") or similar
|
|
// Or usually /proc/sys/kernel/syno_hw_version if available
|
|
// Simpler fallback: check hostname or use generic if deep parsing fails.
|
|
// Actually synoinfo.conf has 'upnpmodelname="DS920+"' usually.
|
|
const infoContent = fs.readFileSync(synoInfoPath, 'utf8');
|
|
const modelMatch = infoContent.match(/upnpmodelname="([^"]+)"/);
|
|
if (modelMatch) {
|
|
model = modelMatch[1];
|
|
}
|
|
}
|
|
|
|
return { model, dsmVersion };
|
|
} catch (e) {
|
|
console.error('Failed to read host files:', e);
|
|
return { model: 'Docker Container', dsmVersion: 'Unknown' };
|
|
}
|
|
};
|
|
|
|
// --- Docker Config & Security ---
|
|
const DATA_DIR = path.join(process.cwd(), 'data');
|
|
const CONFIG_FILE = path.join(DATA_DIR, 'config.json');
|
|
|
|
// Ensure data directory exists
|
|
if (!fs.existsSync(DATA_DIR)) {
|
|
try { fs.mkdirSync(DATA_DIR, { recursive: true }); }
|
|
catch (e) { console.error('Failed to create data directory:', e); }
|
|
}
|
|
|
|
let dockerConfig = {
|
|
enabled: false,
|
|
passwordHash: null,
|
|
salt: null,
|
|
retentionHours: 24,
|
|
alertThresholds: { cpu: 80, memory: 80, disk: 90 },
|
|
containerAlertEnabled: false,
|
|
alertCooldownSeconds: 300 // 5 minutes default
|
|
};
|
|
|
|
const saveDockerConfig = () => {
|
|
try { fs.writeFileSync(CONFIG_FILE, JSON.stringify(dockerConfig, null, 2)); }
|
|
catch (e) { console.error('Failed to save docker config:', e); }
|
|
};
|
|
|
|
const loadDockerConfig = () => {
|
|
try {
|
|
if (fs.existsSync(CONFIG_FILE)) {
|
|
const data = fs.readFileSync(CONFIG_FILE, 'utf8');
|
|
dockerConfig = JSON.parse(data);
|
|
// Merge defaults
|
|
if (!dockerConfig.alertThresholds) dockerConfig.alertThresholds = { cpu: 90, mem: 90, disk: 90 };
|
|
// Restore history limit from saved retention hours
|
|
// Note: historyRetentionHours and HISTORY_LIMIT are defined later in the file
|
|
// This function is called AFTER those variables are initialized (line 438)
|
|
if (dockerConfig.retentionHours) {
|
|
historyRetentionHours = dockerConfig.retentionHours;
|
|
HISTORY_LIMIT = historyRetentionHours * 60;
|
|
}
|
|
} else {
|
|
saveDockerConfig();
|
|
}
|
|
} catch (e) { console.error('Failed to load docker config:', e); }
|
|
};
|
|
// I will Stub loadDockerConfig here and call the real logic later?
|
|
// No, I should move History Vars up too.
|
|
|
|
// --- Helper: Secure Hash ---
|
|
const hashPassword = (password, salt = null) => {
|
|
if (!password) return null;
|
|
try {
|
|
if (!salt) salt = crypto.randomBytes(16).toString('hex');
|
|
const hash = crypto.scryptSync(password, salt, 64).toString('hex');
|
|
return { hash, salt };
|
|
} catch (e) { console.error("Hashing error:", e); return null; }
|
|
};
|
|
|
|
// --- Push Notification System Variables ---
|
|
const SUBSCRIPTIONS_FILE = path.join(process.cwd(), 'data', 'subscriptions.json');
|
|
let subscriptions = [];
|
|
let vapidKeys = { publicKey: '', privateKey: '' };
|
|
let alertState = { cpu: 0, memory: 0, disk: 0 };
|
|
|
|
const initPushNotifications = () => {
|
|
// Ensure data directory exists
|
|
if (!fs.existsSync(process.cwd() + '/data')) {
|
|
fs.mkdirSync(process.cwd() + '/data', { recursive: true });
|
|
}
|
|
|
|
// Initialize VAPID keys
|
|
if (!fs.existsSync(process.cwd() + '/data/vapid.json')) {
|
|
vapidKeys = webpush.generateVAPIDKeys();
|
|
fs.writeFileSync(process.cwd() + '/data/vapid.json', JSON.stringify(vapidKeys));
|
|
} else {
|
|
vapidKeys = JSON.parse(fs.readFileSync(process.cwd() + '/data/vapid.json', 'utf8'));
|
|
}
|
|
webpush.setVapidDetails('mailto:admin@example.com', vapidKeys.publicKey, vapidKeys.privateKey);
|
|
|
|
// Initialize subscriptions - CREATE FILE IF MISSING
|
|
if (fs.existsSync(SUBSCRIPTIONS_FILE)) {
|
|
try {
|
|
subscriptions = JSON.parse(fs.readFileSync(SUBSCRIPTIONS_FILE, 'utf8'));
|
|
console.log('[PUSH] Loaded', subscriptions.length, 'existing subscriptions');
|
|
} catch (e) {
|
|
console.error('[PUSH] Error loading subscriptions:', e);
|
|
subscriptions = [];
|
|
}
|
|
} else {
|
|
console.log('[PUSH] Creating new subscriptions file');
|
|
subscriptions = [];
|
|
fs.writeFileSync(SUBSCRIPTIONS_FILE, JSON.stringify(subscriptions));
|
|
}
|
|
};
|
|
|
|
const saveSubscriptions = () => {
|
|
try { fs.writeFileSync(SUBSCRIPTIONS_FILE, JSON.stringify(subscriptions)); }
|
|
catch (e) { console.error('Failed to save subscriptions:', e); }
|
|
};
|
|
|
|
const sendNotification = (payload) => {
|
|
const notificationPayload = JSON.stringify(payload);
|
|
console.log('[PUSH] Sending notification to', subscriptions.length, 'subscribers');
|
|
console.log('[PUSH] Payload:', payload);
|
|
|
|
if (subscriptions.length === 0) {
|
|
console.warn('[PUSH] No subscriptions available!');
|
|
return;
|
|
}
|
|
|
|
subscriptions.forEach((subscription, index) => {
|
|
console.log(`[PUSH] Sending to subscription ${index + 1}/${subscriptions.length}`);
|
|
webpush.sendNotification(subscription, notificationPayload)
|
|
.then(() => {
|
|
console.log(`[PUSH] Successfully sent to subscription ${index + 1}`);
|
|
})
|
|
.catch(error => {
|
|
console.error(`[PUSH] Error sending to subscription ${index + 1}:`, error);
|
|
if (error.statusCode === 410 || error.statusCode === 404) {
|
|
console.log('[PUSH] Removing expired subscription');
|
|
subscriptions.splice(index, 1);
|
|
saveSubscriptions();
|
|
}
|
|
});
|
|
});
|
|
};
|
|
|
|
const checkAlerts = (stats) => {
|
|
const now = Date.now();
|
|
const thresholds = dockerConfig.alertThresholds || { cpu: 90, memory: 90, disk: 90 };
|
|
const COOLDOWN = (dockerConfig.alertCooldownSeconds || 300) * 1000; // Convert seconds to ms
|
|
|
|
if (stats.cpu > thresholds.cpu) {
|
|
if (now - alertState.cpu > COOLDOWN) {
|
|
console.log('[ALERT] CPU alert sent:', stats.cpu + '%');
|
|
sendNotification({ title: 'snStatus 알림', body: `CPU 사용률 높음: ${stats.cpu}%` });
|
|
alertState.cpu = now;
|
|
}
|
|
} else { alertState.cpu = 0; }
|
|
|
|
if (stats.memory > thresholds.memory) {
|
|
if (now - alertState.memory > COOLDOWN) {
|
|
console.log('[ALERT] Memory alert sent:', stats.memory + '%');
|
|
sendNotification({ title: 'snStatus 알림', body: `메모리 사용률 높음: ${stats.memory}%` });
|
|
alertState.memory = now;
|
|
}
|
|
} else { alertState.memory = 0; }
|
|
};
|
|
|
|
// VAPID Public Key Endpoint
|
|
app.get('/api/notifications/vapidPublicKey', (req, res) => {
|
|
try {
|
|
if (!vapidKeys || !vapidKeys.publicKey) {
|
|
return res.status(500).json({ error: 'VAPID keys not initialized' });
|
|
}
|
|
res.json({ publicKey: vapidKeys.publicKey });
|
|
} catch (error) {
|
|
console.error('Error getting VAPID key:', error);
|
|
res.status(500).json({ error: 'Failed to get VAPID key' });
|
|
}
|
|
});
|
|
|
|
// Subscribe to push notifications
|
|
app.post('/api/notifications/subscribe', async (req, res) => {
|
|
const subscription = req.body;
|
|
if (!subscription || !subscription.endpoint) {
|
|
return res.status(400).json({ error: 'Invalid subscription' });
|
|
}
|
|
// Remove existing subscription with same endpoint
|
|
subscriptions = subscriptions.filter(sub => sub.endpoint !== subscription.endpoint);
|
|
subscriptions.push(subscription);
|
|
saveSubscriptions();
|
|
console.log('Push subscription added:', subscription.endpoint);
|
|
res.status(201).json({ message: 'Subscription added' });
|
|
});
|
|
|
|
// Unsubscribe from push notifications
|
|
app.post('/api/notifications/unsubscribe', async (req, res) => {
|
|
const { endpoint } = req.body;
|
|
subscriptions = subscriptions.filter(sub => sub.endpoint !== endpoint);
|
|
saveSubscriptions();
|
|
console.log('Push subscription removed:', endpoint);
|
|
res.json({ message: 'Unsubscribed' });
|
|
});
|
|
|
|
// --- System Info APIs ---
|
|
// Global state for network speed calculation
|
|
let previousNetStats = {};
|
|
let previousTime = Date.now();
|
|
|
|
// Function to read and parse /host/proc/net/dev
|
|
const getHostNetworkStats = () => {
|
|
try {
|
|
// Use /sys/class/net mounted from host to bypass container network namespace isolation.
|
|
// This is the correct way to see host interfaces (eth0, eth1, etc.) from a bridge container.
|
|
const netDir = '/host/sys/class/net';
|
|
|
|
let interfaces = [];
|
|
try {
|
|
if (fs.existsSync(netDir)) {
|
|
interfaces = fs.readdirSync(netDir);
|
|
} else {
|
|
console.warn("Host sysfs not found at /host/sys/class/net");
|
|
return [];
|
|
}
|
|
} catch (e) { return []; }
|
|
|
|
const timestamp = Date.now();
|
|
const currentStats = {};
|
|
const timeDiff = (timestamp - previousTime) / 1000;
|
|
let result = [];
|
|
|
|
for (const iface of interfaces) {
|
|
// Filter unwanted interfaces
|
|
if (iface === 'lo' ||
|
|
iface.startsWith('veth') ||
|
|
iface.startsWith('docker') ||
|
|
iface.startsWith('br-') ||
|
|
iface.startsWith('cali') ||
|
|
iface.startsWith('flannel') ||
|
|
iface.startsWith('cni') ||
|
|
iface.startsWith('sit') ||
|
|
iface.startsWith('tun') ||
|
|
iface.startsWith('tap')) continue;
|
|
|
|
try {
|
|
const rxPath = path.join(netDir, iface, 'statistics', 'rx_bytes');
|
|
const txPath = path.join(netDir, iface, 'statistics', 'tx_bytes');
|
|
|
|
if (!fs.existsSync(rxPath) || !fs.existsSync(txPath)) continue;
|
|
|
|
const rxBytes = parseInt(fs.readFileSync(rxPath, 'utf8').trim());
|
|
const txBytes = parseInt(fs.readFileSync(txPath, 'utf8').trim());
|
|
|
|
currentStats[iface] = { rx: rxBytes, tx: txBytes };
|
|
|
|
let rx_sec = 0;
|
|
let tx_sec = 0;
|
|
|
|
if (previousNetStats[iface] && timeDiff > 0) {
|
|
const prevRx = previousNetStats[iface].rx;
|
|
const prevTx = previousNetStats[iface].tx;
|
|
|
|
// Simple difference (handle counter wrap if needed, but unlikely for 64bit counters in JS for short periods)
|
|
if (rxBytes >= prevRx) rx_sec = (rxBytes - prevRx) / timeDiff;
|
|
if (txBytes >= prevTx) tx_sec = (txBytes - prevTx) / timeDiff;
|
|
}
|
|
|
|
result.push({
|
|
iface: iface,
|
|
rx_sec: rx_sec,
|
|
tx_sec: tx_sec
|
|
});
|
|
|
|
} catch (err) {
|
|
continue;
|
|
}
|
|
}
|
|
|
|
previousNetStats = currentStats;
|
|
previousTime = timestamp;
|
|
|
|
return result;
|
|
|
|
} catch (error) {
|
|
console.error('Failed to read host network stats from /sys:', error.message);
|
|
return [];
|
|
}
|
|
};
|
|
|
|
app.get('/api/stats', async (req, res) => {
|
|
try {
|
|
const [cpu, mem, fsData, osInfo, system] = await Promise.all([
|
|
si.currentLoad(),
|
|
si.mem(),
|
|
si.fsSize(),
|
|
si.osInfo(),
|
|
si.system()
|
|
]);
|
|
|
|
// Get Host Network Stats
|
|
const network = getHostNetworkStats();
|
|
|
|
// Get Host Info
|
|
const hostInfo = getSynologyInfo();
|
|
|
|
// Disk Filtering Logic
|
|
const filteredStorage = fsData.filter(disk => {
|
|
let mountPoint = disk.mount;
|
|
if (mountPoint.startsWith('/host')) mountPoint = mountPoint.substring(5);
|
|
if (mountPoint.includes('@') || mountPoint.includes('docker') || mountPoint.includes('container') ||
|
|
mountPoint.includes('appdata') || mountPoint.includes('tmp') ||
|
|
disk.fs.startsWith('overlay') || disk.fs.startsWith('tmpfs') || disk.fs.startsWith('shm')) {
|
|
return false;
|
|
}
|
|
return /^\/volume\d+$/.test(mountPoint);
|
|
});
|
|
|
|
const stats = {
|
|
cpu: {
|
|
load: Math.round(cpu.currentLoad),
|
|
cores: cpu.cpus.map(c => Math.round(c.load)),
|
|
},
|
|
memory: {
|
|
total: mem.total,
|
|
used: mem.used,
|
|
percentage: Math.round((mem.active / mem.total) * 100),
|
|
},
|
|
storage: filteredStorage.map(disk => ({
|
|
fs: disk.fs,
|
|
type: disk.type,
|
|
size: disk.size,
|
|
used: disk.used,
|
|
use: Math.round(disk.use),
|
|
mount: disk.mount,
|
|
})),
|
|
network: network, // Use custom stats
|
|
system: {
|
|
model: hostInfo.model !== 'Docker Container' ? hostInfo.model : system.model, // Prefer host info
|
|
manufacturer: system.manufacturer,
|
|
os: 'DSM', // Hardcode or derive
|
|
release: hostInfo.dsmVersion,
|
|
kernel: osInfo.kernel,
|
|
hostname: osInfo.hostname,
|
|
uptime: osInfo.uptime
|
|
}
|
|
};
|
|
|
|
res.json(stats);
|
|
|
|
} catch (error) {
|
|
console.error('Error fetching system stats:', error);
|
|
res.status(500).json({ error: 'Failed to retrieve system statistics' });
|
|
}
|
|
});
|
|
|
|
// --- History Data ---
|
|
const HISTORY_FILE = path.join(process.cwd(), 'data', 'history.json');
|
|
let historyRetentionHours = 24; // Default 24 hours
|
|
let HISTORY_LIMIT = historyRetentionHours * 60; // points (1 min interval)
|
|
let historyData = [];
|
|
|
|
const saveHistory = () => {
|
|
try {
|
|
fs.writeFileSync(HISTORY_FILE, JSON.stringify(historyData));
|
|
} catch (e) { console.error('Failed to save history:', e); }
|
|
};
|
|
|
|
const loadHistory = () => {
|
|
try {
|
|
if (fs.existsSync(HISTORY_FILE)) {
|
|
const data = fs.readFileSync(HISTORY_FILE, 'utf8');
|
|
historyData = JSON.parse(data);
|
|
// Ensure data respects current limit (if limit changed while offline? Unlikely but good practice)
|
|
if (historyData.length > HISTORY_LIMIT) {
|
|
historyData = historyData.slice(historyData.length - HISTORY_LIMIT);
|
|
}
|
|
}
|
|
} catch (e) { console.error('Failed to load history:', e); }
|
|
};
|
|
loadHistory();
|
|
|
|
// Retention Settings API
|
|
app.get('/api/settings/retention', (req, res) => {
|
|
res.json({ hours: historyRetentionHours });
|
|
});
|
|
|
|
app.post('/api/settings/retention', (req, res) => {
|
|
const { hours } = req.body;
|
|
if (!hours || hours < 1) {
|
|
return res.status(400).json({ error: 'Invalid retention period' });
|
|
}
|
|
historyRetentionHours = parseInt(hours);
|
|
HISTORY_LIMIT = historyRetentionHours * 60;
|
|
|
|
// Trim existing data if needed
|
|
if (historyData.length > HISTORY_LIMIT) {
|
|
historyData = historyData.slice(historyData.length - HISTORY_LIMIT);
|
|
}
|
|
saveHistory(); // Save truncated data
|
|
|
|
// Save preference to config
|
|
dockerConfig.retentionHours = historyRetentionHours;
|
|
saveDockerConfig();
|
|
|
|
console.log(`History retention updated to ${historyRetentionHours} hours (${HISTORY_LIMIT} points)`);
|
|
res.json({ message: 'Retention updated', hours: historyRetentionHours });
|
|
});
|
|
|
|
const updateHistory = async () => {
|
|
try {
|
|
const [cpu, mem] = await Promise.all([
|
|
si.currentLoad(),
|
|
si.mem()
|
|
]);
|
|
|
|
// Capture Network Stats for History
|
|
// Note: This relies on the shared getHostNetworkStats state.
|
|
// It captures the "current rate" since the last poll (whether by UI or this loop).
|
|
const netStats = getHostNetworkStats();
|
|
let totalRx = 0;
|
|
let totalTx = 0;
|
|
netStats.forEach(net => {
|
|
// Include all interfaces or filter? Let's include all physical-ish ones found by our filter
|
|
totalRx += net.rx_sec;
|
|
totalTx += net.tx_sec;
|
|
});
|
|
|
|
const point = {
|
|
timestamp: Date.now(),
|
|
cpu: Math.round(cpu.currentLoad),
|
|
memory: Math.round((mem.active / mem.total) * 100),
|
|
network: {
|
|
rx: totalRx,
|
|
tx: totalTx
|
|
}
|
|
};
|
|
|
|
historyData.push(point);
|
|
if (historyData.length > HISTORY_LIMIT) {
|
|
historyData.shift(); // Remove oldest
|
|
}
|
|
|
|
saveHistory();
|
|
checkAlerts(point);
|
|
} catch (e) {
|
|
console.error("Failed to update history:", e);
|
|
}
|
|
};
|
|
|
|
// Update history every 1 minute
|
|
setInterval(updateHistory, 60 * 1000);
|
|
// Initial run
|
|
updateHistory();
|
|
|
|
app.get('/api/history', (req, res) => {
|
|
res.json(historyData);
|
|
});
|
|
|
|
|
|
// Config saved via helper
|
|
// NOTE: loadDockerConfig normally called here.
|
|
// But check scope: HISTORY_LIMIT is defined above at 220.
|
|
// Our new loadDockerConfig at top CANNOT see HISTORY_LIMIT.
|
|
// So we must fix loadDockerConfig logic.
|
|
// I'll handle that separately. For now, deleting this block.
|
|
|
|
// Helper: Secure Hash (Scrypt + Salt)
|
|
|
|
|
|
loadDockerConfig(); // Initial Load
|
|
|
|
// Docker Config API
|
|
app.get('/api/docker/config', (req, res) => {
|
|
res.json({
|
|
enabled: dockerConfig.enabled,
|
|
alertThresholds: dockerConfig.alertThresholds || { cpu: 90, memory: 90, disk: 90 },
|
|
containerAlertEnabled: dockerConfig.containerAlertEnabled !== undefined ? dockerConfig.containerAlertEnabled : true,
|
|
alertCooldownSeconds: dockerConfig.alertCooldownSeconds || 300
|
|
});
|
|
});
|
|
|
|
app.post('/api/docker/config', (req, res) => {
|
|
const { enabled, password } = req.body;
|
|
const isEnabling = !!enabled;
|
|
|
|
// Case 1: Disabling (Require Verification)
|
|
if (!isEnabling && dockerConfig.enabled) {
|
|
if (!password) return res.status(400).json({ error: 'Password is required to disable Docker features.' });
|
|
|
|
if (dockerConfig.passwordHash) {
|
|
let match = false;
|
|
|
|
if (!dockerConfig.salt) {
|
|
// Legacy SHA256 Check
|
|
const legacyHash = crypto.createHash('sha256').update(password).digest('hex');
|
|
match = (legacyHash === dockerConfig.passwordHash);
|
|
} else {
|
|
// Modern Scrypt Check
|
|
try {
|
|
const { hash } = hashPassword(password, dockerConfig.salt);
|
|
match = crypto.timingSafeEqual(
|
|
Buffer.from(hash, 'hex'),
|
|
Buffer.from(dockerConfig.passwordHash, 'hex')
|
|
);
|
|
} catch (e) {
|
|
return res.status(500).json({ error: 'Verification error.' });
|
|
}
|
|
}
|
|
|
|
if (!match) return res.status(401).json({ error: '패스워드가 올바르지 않습니다.' });
|
|
}
|
|
}
|
|
|
|
// Case 2: Enabling (Require Password if no existing hash)
|
|
if (isEnabling && !password && !dockerConfig.passwordHash) {
|
|
return res.status(400).json({ error: 'Password is required to enable Docker features.' });
|
|
}
|
|
|
|
dockerConfig.enabled = isEnabling;
|
|
|
|
// If Enabling and password provided, update/set password
|
|
if (isEnabling && password) {
|
|
const result = hashPassword(password);
|
|
if (result) {
|
|
dockerConfig.passwordHash = result.hash;
|
|
dockerConfig.salt = result.salt;
|
|
}
|
|
}
|
|
|
|
saveDockerConfig();
|
|
console.log(`Docker feature ${dockerConfig.enabled ? 'Enabled' : 'Disabled'}`);
|
|
res.json({ message: 'Configuration saved', enabled: dockerConfig.enabled });
|
|
});
|
|
|
|
// Verify Password API
|
|
app.post('/api/docker/verify', (req, res) => {
|
|
const { password } = req.body;
|
|
if (!password) return res.status(400).json({ success: false });
|
|
|
|
// Validate if config exists
|
|
if (!dockerConfig.passwordHash || !dockerConfig.salt) {
|
|
return res.status(500).json({ success: false, error: 'Security configuration not found. Please reset in Settings.' });
|
|
}
|
|
|
|
const { hash } = hashPassword(password, dockerConfig.salt);
|
|
|
|
// Constant-time comparison to prevent timing attacks
|
|
try {
|
|
const match = crypto.timingSafeEqual(
|
|
Buffer.from(hash, 'hex'),
|
|
Buffer.from(dockerConfig.passwordHash, 'hex')
|
|
);
|
|
|
|
if (match) {
|
|
res.json({ success: true });
|
|
} else {
|
|
res.status(401).json({ success: false, error: '패스워드가 올바르지 않습니다.' });
|
|
}
|
|
} catch (e) {
|
|
res.status(401).json({ success: false, error: 'Verification failed' });
|
|
}
|
|
});
|
|
|
|
// --- Docker APIs ---
|
|
// Helper: Calculate CPU Percent from Docker Stats
|
|
const calculateCPUPercent = (stats) => {
|
|
if (!stats || !stats.cpu_stats || !stats.precpu_stats) return 0;
|
|
|
|
const cpuDelta = stats.cpu_stats.cpu_usage.total_usage - stats.precpu_stats.cpu_usage.total_usage;
|
|
const systemCpuDelta = stats.cpu_stats.system_cpu_usage - stats.precpu_stats.system_cpu_usage;
|
|
const numberCpus = stats.cpu_stats.online_cpus || (stats.cpu_stats.cpu_usage.percpu_usage ? stats.cpu_stats.cpu_usage.percpu_usage.length : 1);
|
|
|
|
if (systemCpuDelta > 0 && cpuDelta > 0) {
|
|
return (cpuDelta / systemCpuDelta) * numberCpus * 100.0;
|
|
}
|
|
return 0;
|
|
};
|
|
|
|
// --- Docker APIs ---
|
|
app.get('/api/docker/containers', async (req, res) => {
|
|
// Check if enabled
|
|
if (!dockerConfig.enabled) {
|
|
return res.status(403).json({ error: 'Docker features are disabled.' });
|
|
}
|
|
|
|
try {
|
|
const containers = await docker.listContainers({ all: true });
|
|
|
|
// Fetch stats for running containers
|
|
const containersWithStats = await Promise.all(containers.map(async (containerInfo) => {
|
|
if (containerInfo.State !== 'running') {
|
|
return { ...containerInfo, stats: null };
|
|
}
|
|
|
|
try {
|
|
const container = docker.getContainer(containerInfo.Id);
|
|
const stats = await container.stats({ stream: false });
|
|
|
|
const cpuPercent = calculateCPUPercent(stats);
|
|
const memoryUsage = stats.memory_stats.usage || 0;
|
|
const memoryLimit = stats.memory_stats.limit || 0;
|
|
const memoryPercent = memoryLimit > 0 ? (memoryUsage / memoryLimit) * 100 : 0;
|
|
|
|
return {
|
|
...containerInfo,
|
|
stats: {
|
|
cpu: cpuPercent,
|
|
memory: memoryUsage,
|
|
memoryLimit: memoryLimit,
|
|
memoryPercent: memoryPercent
|
|
}
|
|
};
|
|
} catch (e) {
|
|
console.error(`Failed to fetch stats for ${containerInfo.Names[0]}:`, e.message);
|
|
return { ...containerInfo, stats: null };
|
|
}
|
|
}));
|
|
|
|
res.json(containersWithStats);
|
|
} catch (error) {
|
|
console.error('Error fetching containers:', error);
|
|
res.status(500).json({ error: 'Failed to fetch containers. Is Docker Sock mounted?' });
|
|
}
|
|
});
|
|
|
|
// Container Control APIs
|
|
app.post('/api/docker/containers/:id/:action', async (req, res) => {
|
|
const { id, action } = req.params;
|
|
try {
|
|
const container = docker.getContainer(id);
|
|
let data;
|
|
switch (action) {
|
|
case 'start': data = await container.start(); break;
|
|
case 'stop': data = await container.stop(); break;
|
|
case 'restart': data = await container.restart(); break;
|
|
default: return res.status(400).json({ error: 'Invalid action' });
|
|
}
|
|
res.json({ message: `Container ${action} successful`, data });
|
|
} catch (error) {
|
|
console.error(`Error performing ${action} on container ${id}:`, error);
|
|
res.status(500).json({ error: `Failed to ${action} container: ${error.message}` });
|
|
}
|
|
});
|
|
|
|
// Save Alert Thresholds
|
|
app.post('/api/settings/thresholds', (req, res) => {
|
|
const { thresholds, containerAlertEnabled, alertCooldownSeconds } = req.body;
|
|
if (thresholds) {
|
|
dockerConfig.alertThresholds = thresholds;
|
|
if (containerAlertEnabled !== undefined) {
|
|
dockerConfig.containerAlertEnabled = containerAlertEnabled;
|
|
}
|
|
if (alertCooldownSeconds !== undefined) {
|
|
dockerConfig.alertCooldownSeconds = Math.max(10, Math.min(3600, alertCooldownSeconds));
|
|
}
|
|
saveDockerConfig();
|
|
res.json({ success: true });
|
|
} else {
|
|
res.status(400).json({ error: 'Invalid thresholds' });
|
|
}
|
|
});
|
|
|
|
// Container Logs API
|
|
app.get('/api/docker/containers/:id/logs', async (req, res) => {
|
|
const { id } = req.params;
|
|
try {
|
|
const container = docker.getContainer(id);
|
|
const logs = await container.logs({ stdout: true, stderr: true, tail: 100, timestamps: true });
|
|
res.send(logs.toString('utf8'));
|
|
} catch (error) {
|
|
console.error('Error fetching logs:', error);
|
|
res.status(500).json({ error: 'Failed to fetch logs' });
|
|
}
|
|
});
|
|
|
|
|
|
const monitorDockerEvents = () => {
|
|
docker.getEvents((err, stream) => {
|
|
if (err) return console.error('Error getting docker events:', err);
|
|
|
|
stream.on('data', chunk => {
|
|
if (!dockerConfig.enabled) return; // Ignore if disabled
|
|
if (!dockerConfig.containerAlertEnabled) return; // Ignore if container alerts disabled
|
|
|
|
try {
|
|
const event = JSON.parse(chunk.toString());
|
|
// Filter for container stop/die
|
|
if (event.Type === 'container' && (event.Action === 'die' || event.Action === 'stop')) {
|
|
// Check if it's "unexpected"?
|
|
// Usually we alert on any stop if the monitoring is strictly keeping uptime.
|
|
// But if user stops it manually via UI, we might get an event too.
|
|
// However, UI stop calls API -> API stops container.
|
|
// It is hard to distinguish "crash" from "manual stop" without state tracking.
|
|
// For now, user asked "컨테이너가 까지게 되면", effectively "stops".
|
|
// We will alert.
|
|
const containerName = event.Actor?.Attributes?.name || event.id.substring(0, 12);
|
|
sendNotification({
|
|
title: 'snStatus 알림',
|
|
body: `컨테이너 종료: ${containerName}`
|
|
});
|
|
}
|
|
} catch (e) {
|
|
console.error('Error parsing docker event:', e);
|
|
}
|
|
});
|
|
});
|
|
};
|
|
|
|
initPushNotifications();
|
|
if (dockerConfig.enabled) monitorDockerEvents(); // Start if enabled, or always start?
|
|
// Better always start listening, but filter inside.
|
|
// Actually monitorDockerEvents is a stream. If I call it multiple times it duplicates.
|
|
// Just call it once.
|
|
monitorDockerEvents();
|
|
|
|
|
|
// --- WebSocket for Terminal & Logs ---
|
|
io.on('connection', (socket) => {
|
|
let stream = null;
|
|
socket.on('attach-terminal', async (containerId) => {
|
|
try {
|
|
const container = docker.getContainer(containerId);
|
|
const exec = await container.exec({
|
|
AttachStdin: true, AttachStdout: true, AttachStderr: true, Tty: true, Cmd: ['/bin/sh']
|
|
});
|
|
stream = await exec.start({ hijack: true, stdin: true });
|
|
stream.on('data', (chunk) => socket.emit('terminal-output', chunk.toString('utf8')));
|
|
socket.on('terminal-input', (data) => { if (stream) stream.write(data); });
|
|
socket.on('resize-terminal', ({ cols, rows }) => { if (exec) exec.resize({ w: cols, h: rows }).catch(() => { }); });
|
|
} catch (err) {
|
|
console.error('Error attaching to container:', err);
|
|
socket.emit('terminal-output', '\r\nError: Failed to attach to container. ' + err.message + '\r\n');
|
|
}
|
|
});
|
|
|
|
socket.on('disconnect', () => { if (stream) stream.end(); });
|
|
});
|
|
|
|
httpServer.listen(PORT, '0.0.0.0', () => {
|
|
console.log(`Server running on port ${PORT}`);
|
|
});
|