v1.10.0 #471

Merged
LukeGus merged 106 commits from dev-1.10.0 into main 2026-01-01 04:20:12 +00:00
14 changed files with 494 additions and 131 deletions
Showing only changes of commit 28729e3de2 - Show all commits

View File

@@ -85,6 +85,15 @@ router.post(
return res.status(403).json({ error: "Not host owner" });
}
// Check if host uses credentials (required for sharing)
if (!host[0].credentialId) {
return res.status(400).json({
error:
"Only hosts using credentials can be shared. Please create a credential and assign it to this host before sharing.",
code: "CREDENTIAL_REQUIRED_FOR_SHARING",
});
}
// Verify target exists (user or role)
if (targetType === "user") {
const targetUser = await db

View File

@@ -31,6 +31,7 @@ import multer from "multer";
import { sshLogger } from "../../utils/logger.js";
import { SimpleDBOps } from "../../utils/simple-db-ops.js";
import { AuthManager } from "../../utils/auth-manager.js";
import { PermissionManager } from "../../utils/permission-manager.js";
import { DataCrypto } from "../../utils/data-crypto.js";
import { SystemCrypto } from "../../utils/system-crypto.js";
import { DatabaseSaveTrigger } from "../db/index.js";
@@ -48,6 +49,7 @@ function isValidPort(port: unknown): port is number {
}
const authManager = AuthManager.getInstance();
const permissionManager = PermissionManager.getInstance();
const authenticateJWT = authManager.createAuthMiddleware();
const requireDataAccess = authManager.createDataAccessMiddleware();
@@ -601,23 +603,55 @@ router.put(
}
try {
// Check if user can update this host (owner or manage permission)
const accessInfo = await permissionManager.canAccessHost(
userId,
Number(hostId),
"write",
);
if (!accessInfo.hasAccess) {
sshLogger.warn("User does not have permission to update host", {
operation: "host_update",
hostId: parseInt(hostId),
userId,
});
return res.status(403).json({ error: "Access denied" });
}
// Get the actual owner ID for the update
const hostRecord = await db
.select({ userId: sshData.userId })
.from(sshData)
.where(eq(sshData.id, Number(hostId)))
.limit(1);
if (hostRecord.length === 0) {
sshLogger.warn("Host not found for update", {
operation: "host_update",
hostId: parseInt(hostId),
userId,
});
return res.status(404).json({ error: "Host not found" });
}
const ownerId = hostRecord[0].userId;
await SimpleDBOps.update(
sshData,
"ssh_data",
and(eq(sshData.id, Number(hostId)), eq(sshData.userId, userId)),
eq(sshData.id, Number(hostId)),
sshDataObj,
userId,
ownerId,
);
const updatedHosts = await SimpleDBOps.select(
db
.select()
.from(sshData)
.where(
and(eq(sshData.id, Number(hostId)), eq(sshData.userId, userId)),
),
.where(eq(sshData.id, Number(hostId))),
"ssh_data",
userId,
ownerId,
);
if (updatedHosts.length === 0) {
@@ -780,6 +814,7 @@ router.get(
socks5ProxyChain: sshData.socks5ProxyChain,
// Shared access info
ownerId: sshData.userId,
isShared: sql<boolean>`${hostAccess.id} IS NOT NULL`,
permissionLevel: hostAccess.permissionLevel,
expiresAt: hostAccess.expiresAt,
@@ -1611,32 +1646,9 @@ async function resolveHostCredentials(
host: Record<string, unknown>,
): Promise<Record<string, unknown>> {
try {
// Skip credential resolution for shared hosts
// Shared users cannot access the owner's encrypted credentials
if (host.isShared && host.credentialId) {
sshLogger.info(
`Skipping credential resolution for shared host ${host.id} with credentialId ${host.credentialId}`,
{
operation: "resolve_host_credentials_shared",
hostId: host.id as number,
isShared: host.isShared,
},
);
// Return host without resolving credentials
// The frontend should handle credential auth for shared hosts differently
const result = { ...host };
if (host.key_password !== undefined) {
if (result.keyPassword === undefined) {
result.keyPassword = host.key_password;
}
delete result.key_password;
}
return result;
}
if (host.credentialId && host.userId) {
if (host.credentialId && (host.userId || host.ownerId)) {
const credentialId = host.credentialId as number;
const userId = host.userId as string;
const ownerId = (host.ownerId || host.userId) as string;
const credentials = await SimpleDBOps.select(
db
@@ -1645,11 +1657,11 @@ async function resolveHostCredentials(
.where(
and(
eq(sshCredentials.id, credentialId),
eq(sshCredentials.userId, userId),
eq(sshCredentials.userId, ownerId),
),
),
"ssh_credentials",
userId,
ownerId,
);
if (credentials.length > 0) {

View File

@@ -201,11 +201,12 @@ class SSHConnectionPool {
private cleanupInterval: NodeJS.Timeout;
constructor() {
// Reduce cleanup interval from 5 minutes to 2 minutes for faster dead connection removal
this.cleanupInterval = setInterval(
() => {
this.cleanup();
},
5 * 60 * 1000,
2 * 60 * 1000,
);
}
@@ -218,9 +219,22 @@ class SSHConnectionPool {
return `${host.ip}:${host.port}:${host.username}${socks5Key}`;
}
private isConnectionHealthy(client: Client): boolean {
try {
// Check if the connection has been destroyed or closed
// @ts-ignore - accessing internal property to check connection state
if (client._sock && (client._sock.destroyed || !client._sock.writable)) {
return false;
}
return true;
} catch (error) {
return false;
}
}
async getConnection(host: SSHHostWithCredentials): Promise<Client> {
const hostKey = this.getHostKey(host);
const connections = this.connections.get(hostKey) || [];
let connections = this.connections.get(hostKey) || [];
statsLogger.info("Getting connection from pool", {
operation: "get_connection_from_pool",
@@ -228,19 +242,39 @@ class SSHConnectionPool {
availableConnections: connections.length,
useSocks5: host.useSocks5,
socks5Host: host.socks5Host,
hasSocks5ProxyChain: !!(host.socks5ProxyChain && host.socks5ProxyChain.length > 0),
hasSocks5ProxyChain: !!(
host.socks5ProxyChain && host.socks5ProxyChain.length > 0
),
hostId: host.id,
});
// Find available connection and validate health
const available = connections.find((conn) => !conn.inUse);
if (available) {
statsLogger.info("Reusing existing connection from pool", {
operation: "reuse_connection",
hostKey,
});
available.inUse = true;
available.lastUsed = Date.now();
return available.client;
// Health check before reuse
if (!this.isConnectionHealthy(available.client)) {
statsLogger.warn("Removing unhealthy connection from pool", {
operation: "remove_dead_connection",
hostKey,
});
// Remove dead connection
try {
available.client.end();
} catch (error) {
// Ignore cleanup errors
}
connections = connections.filter((c) => c !== available);
this.connections.set(hostKey, connections);
// Fall through to create new connection
} else {
statsLogger.info("Reusing existing connection from pool", {
operation: "reuse_connection",
hostKey,
});
available.inUse = true;
available.lastUsed = Date.now();
return available.client;
}
}
if (connections.length < this.maxConnectionsPerHost) {
@@ -338,7 +372,8 @@ class SSHConnectionPool {
// Check if SOCKS5 proxy is enabled (either single proxy or chain)
if (
host.useSocks5 &&
(host.socks5Host || (host.socks5ProxyChain && host.socks5ProxyChain.length > 0))
(host.socks5Host ||
(host.socks5ProxyChain && host.socks5ProxyChain.length > 0))
) {
statsLogger.info("Using SOCKS5 proxy for connection", {
operation: "socks5_enabled",
@@ -346,7 +381,9 @@ class SSHConnectionPool {
hostPort: host.port,
socks5Host: host.socks5Host,
socks5Port: host.socks5Port,
hasChain: !!(host.socks5ProxyChain && host.socks5ProxyChain.length > 0),
hasChain: !!(
host.socks5ProxyChain && host.socks5ProxyChain.length > 0
),
chainLength: host.socks5ProxyChain?.length || 0,
});
@@ -383,7 +420,8 @@ class SSHConnectionPool {
statsLogger.error("SOCKS5 connection error", socks5Error, {
operation: "socks5_connection_error",
hostIp: host.ip,
errorMessage: socks5Error instanceof Error ? socks5Error.message : "Unknown",
errorMessage:
socks5Error instanceof Error ? socks5Error.message : "Unknown",
});
reject(
new Error(
@@ -476,13 +514,30 @@ class SSHConnectionPool {
private cleanup(): void {
const now = Date.now();
const maxAge = 10 * 60 * 1000;
let totalCleaned = 0;
let totalUnhealthy = 0;
for (const [hostKey, connections] of this.connections.entries()) {
const activeConnections = connections.filter((conn) => {
// Remove if idle for too long
if (!conn.inUse && now - conn.lastUsed > maxAge) {
try {
conn.client.end();
} catch (error) {}
totalCleaned++;
return false;
}
// Also remove if connection is unhealthy (even if recently used)
if (!this.isConnectionHealthy(conn.client)) {
statsLogger.warn("Removing unhealthy connection during cleanup", {
operation: "cleanup_unhealthy",
hostKey,
inUse: conn.inUse,
});
try {
conn.client.end();
} catch (error) {}
totalUnhealthy++;
return false;
}
return true;
@@ -494,6 +549,15 @@ class SSHConnectionPool {
this.connections.set(hostKey, activeConnections);
}
}
if (totalCleaned > 0 || totalUnhealthy > 0) {
statsLogger.info("Connection pool cleanup completed", {
operation: "cleanup_complete",
idleCleaned: totalCleaned,
unhealthyCleaned: totalUnhealthy,
remainingHosts: this.connections.size,
});
}
}
clearAllConnections(): void {
@@ -507,10 +571,14 @@ class SSHConnectionPool {
try {
conn.client.end();
} catch (error) {
statsLogger.error("Error closing connection during full cleanup", error, {
operation: "clear_all_error",
hostKey,
});
statsLogger.error(
"Error closing connection during full cleanup",
error,
{
operation: "clear_all_error",
hostKey,
},
);
}
}
}
@@ -533,18 +601,35 @@ class SSHConnectionPool {
class RequestQueue {
private queues = new Map<number, Array<() => Promise<unknown>>>();
private processing = new Set<number>();
private requestTimeout = 60000; // 60 second timeout for requests
async queueRequest<T>(hostId: number, request: () => Promise<T>): Promise<T> {
return new Promise((resolve, reject) => {
const queue = this.queues.get(hostId) || [];
queue.push(async () => {
return new Promise<T>((resolve, reject) => {
const wrappedRequest = async () => {
try {
const result = await request();
// Add timeout wrapper to prevent indefinite hanging
const result = await Promise.race<T>([
request(),
new Promise<never>((_, rej) =>
setTimeout(
() =>
rej(
new Error(
`Request timeout after ${this.requestTimeout}ms for host ${hostId}`,
),
),
this.requestTimeout,
),
),
]);
resolve(result);
} catch (error) {
reject(error);
}
});
};
const queue = this.queues.get(hostId) || [];
queue.push(wrappedRequest);
this.queues.set(hostId, queue);
this.processQueue(hostId);
});
@@ -561,12 +646,21 @@ class RequestQueue {
if (request) {
try {
await request();
} catch (error) {}
} catch (error) {
// Log errors but continue processing queue
statsLogger.debug("Request queue error", {
operation: "queue_request_error",
hostId,
error: error instanceof Error ? error.message : String(error),
});
}
}
}
this.processing.delete(hostId);
if (queue.length > 0) {
// Check if new items were added during processing
const currentQueue = this.queues.get(hostId);
if (currentQueue && currentQueue.length > 0) {
this.processQueue(hostId);
}
}
@@ -701,10 +795,87 @@ class AuthFailureTracker {
}
}
class PollingBackoff {
private failures = new Map<number, { count: number; nextRetry: number }>();
private baseDelay = 30000; // 30s base delay
private maxDelay = 600000; // 10 min max delay
private maxRetries = 5; // Max retry attempts before giving up
recordFailure(hostId: number): void {
const existing = this.failures.get(hostId) || { count: 0, nextRetry: 0 };
const delay = Math.min(
this.baseDelay * Math.pow(2, existing.count),
this.maxDelay,
);
this.failures.set(hostId, {
count: existing.count + 1,
nextRetry: Date.now() + delay,
});
statsLogger.debug("Recorded polling backoff", {
operation: "polling_backoff_recorded",
hostId,
failureCount: existing.count + 1,
nextRetryDelay: delay,
});
}
shouldSkip(hostId: number): boolean {
const backoff = this.failures.get(hostId);
if (!backoff) return false;
// If exceeded max retries, always skip
if (backoff.count >= this.maxRetries) {
return true;
}
// Otherwise check if we're still in backoff period
return Date.now() < backoff.nextRetry;
}
getBackoffInfo(hostId: number): string | null {
const backoff = this.failures.get(hostId);
if (!backoff) return null;
if (backoff.count >= this.maxRetries) {
return `Max retries exceeded (${backoff.count} failures) - polling suspended`;
}
const remainingMs = backoff.nextRetry - Date.now();
if (remainingMs > 0) {
const remainingSec = Math.ceil(remainingMs / 1000);
return `Retry in ${remainingSec}s (attempt ${backoff.count}/${this.maxRetries})`;
}
return null;
}
reset(hostId: number): void {
this.failures.delete(hostId);
statsLogger.debug("Reset polling backoff", {
operation: "polling_backoff_reset",
hostId,
});
}
cleanup(): void {
const maxAge = 60 * 60 * 1000; // 1 hour
const now = Date.now();
for (const [hostId, backoff] of this.failures.entries()) {
// Only cleanup if not at max retries and old enough
if (backoff.count < this.maxRetries && now - backoff.nextRetry > maxAge) {
this.failures.delete(hostId);
}
}
}
}
const connectionPool = new SSHConnectionPool();
const requestQueue = new RequestQueue();
const metricsCache = new MetricsCache();
const authFailureTracker = new AuthFailureTracker();
const pollingBackoff = new PollingBackoff();
const authManager = AuthManager.getInstance();
type HostStatus = "online" | "offline";
@@ -891,7 +1062,11 @@ class PollingManager {
}
try {
const isOnline = await tcpPing(refreshedHost.ip, refreshedHost.port, 5000);
const isOnline = await tcpPing(
refreshedHost.ip,
refreshedHost.port,
5000,
);
const statusEntry: StatusEntry = {
status: isOnline ? "online" : "offline",
lastChecked: new Date().toISOString(),
@@ -907,6 +1082,17 @@ class PollingManager {
}
private async pollHostMetrics(host: SSHHostWithCredentials): Promise<void> {
// Check if we should skip due to backoff
if (pollingBackoff.shouldSkip(host.id)) {
const backoffInfo = pollingBackoff.getBackoffInfo(host.id);
statsLogger.debug("Skipping metrics polling due to backoff", {
operation: "poll_metrics_skipped",
hostId: host.id,
backoffInfo,
});
return;
}
// Refresh host data from database to get latest SOCKS5 and other settings
const refreshedHost = await fetchHostById(host.id, host.userId);
if (!refreshedHost) {
@@ -928,17 +1114,24 @@ class PollingManager {
data: metrics,
timestamp: Date.now(),
});
// Reset backoff on successful collection
pollingBackoff.reset(refreshedHost.id);
} catch (error) {
const errorMessage =
error instanceof Error ? error.message : String(error);
// Record failure for backoff
pollingBackoff.recordFailure(refreshedHost.id);
const latestConfig = this.pollingConfigs.get(refreshedHost.id);
if (latestConfig && latestConfig.statsConfig.metricsEnabled) {
const backoffInfo = pollingBackoff.getBackoffInfo(refreshedHost.id);
statsLogger.warn("Failed to collect metrics for host", {
operation: "metrics_poll_failed",
hostId: refreshedHost.id,
hostName: refreshedHost.name,
error: errorMessage,
backoff: backoffInfo,
});
}
}
@@ -1228,8 +1421,13 @@ async function resolveHostCredentials(
useSocks5: baseHost.useSocks5,
socks5Host: baseHost.socks5Host,
socks5Port: baseHost.socks5Port,
hasSocks5ProxyChain: !!(baseHost.socks5ProxyChain && (baseHost.socks5ProxyChain as any[]).length > 0),
proxyChainLength: baseHost.socks5ProxyChain ? (baseHost.socks5ProxyChain as any[]).length : 0,
hasSocks5ProxyChain: !!(
baseHost.socks5ProxyChain &&
(baseHost.socks5ProxyChain as any[]).length > 0
),
proxyChainLength: baseHost.socks5ProxyChain
? (baseHost.socks5ProxyChain as any[]).length
: 0,
});
return baseHost as unknown as SSHHostWithCredentials;
@@ -1735,6 +1933,7 @@ app.listen(PORT, async () => {
setInterval(
() => {
authFailureTracker.cleanup();
pollingBackoff.cleanup();
},
10 * 60 * 1000,
);

View File

@@ -3,28 +3,87 @@ import type { Client } from "ssh2";
export function execCommand(
client: Client,
command: string,
timeoutMs = 30000,
): Promise<{
stdout: string;
stderr: string;
code: number | null;
}> {
return new Promise((resolve, reject) => {
client.exec(command, { pty: false }, (err, stream) => {
if (err) return reject(err);
let settled = false;
let stream: any = null;
const timeout = setTimeout(() => {
if (!settled) {
settled = true;
cleanup();
reject(new Error(`Command timeout after ${timeoutMs}ms: ${command}`));
}
}, timeoutMs);
const cleanup = () => {
clearTimeout(timeout);
if (stream) {
try {
stream.removeAllListeners();
if (stream.stderr) {
stream.stderr.removeAllListeners();
}
stream.destroy();
} catch (error) {
// Ignore cleanup errors
}
}
};
client.exec(command, { pty: false }, (err, _stream) => {
if (err) {
if (!settled) {
settled = true;
cleanup();
reject(err);
}
return;
}
stream = _stream;
let stdout = "";
let stderr = "";
let exitCode: number | null = null;
stream
.on("close", (code: number | undefined) => {
exitCode = typeof code === "number" ? code : null;
resolve({ stdout, stderr, code: exitCode });
if (!settled) {
settled = true;
exitCode = typeof code === "number" ? code : null;
cleanup();
resolve({ stdout, stderr, code: exitCode });
}
})
.on("data", (data: Buffer) => {
stdout += data.toString("utf8");
})
.stderr.on("data", (data: Buffer) => {
stderr += data.toString("utf8");
.on("error", (streamErr: Error) => {
if (!settled) {
settled = true;
cleanup();
reject(streamErr);
}
});
if (stream.stderr) {
stream.stderr
.on("data", (data: Buffer) => {
stderr += data.toString("utf8");
})
.on("error", (stderrErr: Error) => {
if (!settled) {
settled = true;
cleanup();
reject(stderrErr);
}
});
}
});
});
}

View File

@@ -26,12 +26,21 @@ export async function collectCpuMetrics(client: Client): Promise<{
let loadTriplet: [number, number, number] | null = null;
try {
const [stat1, loadAvgOut, coresOut] = await Promise.all([
execCommand(client, "cat /proc/stat"),
execCommand(client, "cat /proc/loadavg"),
execCommand(
client,
"nproc 2>/dev/null || grep -c ^processor /proc/cpuinfo",
// Wrap Promise.all with timeout to prevent indefinite blocking
const [stat1, loadAvgOut, coresOut] = await Promise.race([
Promise.all([
execCommand(client, "cat /proc/stat"),
execCommand(client, "cat /proc/loadavg"),
execCommand(
client,
"nproc 2>/dev/null || grep -c ^processor /proc/cpuinfo",
),
]),
new Promise<never>((_, reject) =>
setTimeout(
() => reject(new Error("CPU metrics collection timeout")),
25000,
),
),
]);

View File

@@ -46,10 +46,20 @@ export async function collectLoginStats(client: Client): Promise<LoginStats> {
const timeStr = parts.slice(timeStart, timeStart + 5).join(" ");
if (user && user !== "wtmp" && tty !== "system") {
let parsedTime: string;
try {
const date = new Date(timeStr);
parsedTime = isNaN(date.getTime())
? new Date().toISOString()
: date.toISOString();
} catch (e) {
parsedTime = new Date().toISOString();
}
recentLogins.push({
user,
ip,
time: new Date(timeStr).toISOString(),
time: parsedTime,
status: "success",
});
if (ip !== "local") {
@@ -60,7 +70,10 @@ export async function collectLoginStats(client: Client): Promise<LoginStats> {
}
}
} catch (e) {
// Ignore errors
statsLogger.debug("Failed to collect recent login stats", {
operation: "recent_login_stats_failed",
error: e instanceof Error ? e.message : String(e),
});
}
try {
@@ -96,12 +109,20 @@ export async function collectLoginStats(client: Client): Promise<LoginStats> {
}
if (user && ip) {
let parsedTime: string;
try {
const date = timeStr ? new Date(timeStr) : new Date();
parsedTime = isNaN(date.getTime())
? new Date().toISOString()
: date.toISOString();
} catch (e) {
parsedTime = new Date().toISOString();
}
failedLogins.push({
user,
ip,
time: timeStr
? new Date(timeStr).toISOString()
: new Date().toISOString(),
time: parsedTime,
status: "failed",
});
if (ip !== "unknown") {
@@ -110,7 +131,10 @@ export async function collectLoginStats(client: Client): Promise<LoginStats> {
}
}
} catch (e) {
// Ignore errors
statsLogger.debug("Failed to collect failed login stats", {
operation: "failed_login_stats_failed",
error: e instanceof Error ? e.message : String(e),
});
}
return {

View File

@@ -8,8 +8,8 @@ export async function collectProcessesMetrics(client: Client): Promise<{
top: Array<{
pid: string;
user: string;
cpu: string;
mem: string;
cpu: number;
mem: number;
command: string;
}>;
}> {
@@ -18,8 +18,8 @@ export async function collectProcessesMetrics(client: Client): Promise<{
const topProcesses: Array<{
pid: string;
user: string;
cpu: string;
mem: string;
cpu: number;
mem: number;
command: string;
}> = [];
@@ -33,11 +33,13 @@ export async function collectProcessesMetrics(client: Client): Promise<{
for (let i = 1; i < Math.min(psLines.length, 11); i++) {
const parts = psLines[i].split(/\s+/);
if (parts.length >= 11) {
const cpuVal = Number(parts[2]);
const memVal = Number(parts[3]);
topProcesses.push({
pid: parts[1],
user: parts[0],
cpu: parts[2],
mem: parts[3],
cpu: Number.isFinite(cpuVal) ? cpuVal : 0,
mem: Number.isFinite(memVal) ? memVal : 0,
command: parts.slice(10).join(" ").substring(0, 50),
});
}
@@ -46,8 +48,12 @@ export async function collectProcessesMetrics(client: Client): Promise<{
const procCount = await execCommand(client, "ps aux | wc -l");
const runningCount = await execCommand(client, "ps aux | grep -c ' R '");
totalProcesses = Number(procCount.stdout.trim()) - 1;
runningProcesses = Number(runningCount.stdout.trim());
const totalCount = Number(procCount.stdout.trim()) - 1;
totalProcesses = Number.isFinite(totalCount) ? totalCount : null;
const runningCount2 = Number(runningCount.stdout.trim());
runningProcesses = Number.isFinite(runningCount2) ? runningCount2 : null;
} catch (e) {
statsLogger.debug("Failed to collect process stats", {
operation: "process_stats_failed",

View File

@@ -30,35 +30,32 @@ interface PermissionCheckResult {
class PermissionManager {
private static instance: PermissionManager;
private permissionCache: Map<string, { permissions: string[]; timestamp: number }>;
private permissionCache: Map<
string,
{ permissions: string[]; timestamp: number }
>;
private readonly CACHE_TTL = 5 * 60 * 1000; // 5 minutes
private constructor() {
this.permissionCache = new Map();
// Auto-cleanup expired host access every 1 minute
setInterval(
() => {
this.cleanupExpiredAccess().catch((error) => {
databaseLogger.error(
"Failed to run periodic host access cleanup",
error,
{
operation: "host_access_cleanup_periodic",
},
);
});
},
60 * 1000,
);
setInterval(() => {
this.cleanupExpiredAccess().catch((error) => {
databaseLogger.error(
"Failed to run periodic host access cleanup",
error,
{
operation: "host_access_cleanup_periodic",
},
);
});
}, 60 * 1000);
// Clear permission cache every 5 minutes
setInterval(
() => {
this.clearPermissionCache();
},
this.CACHE_TTL,
);
setInterval(() => {
this.clearPermissionCache();
}, this.CACHE_TTL);
}
static getInstance(): PermissionManager {
@@ -168,10 +165,7 @@ class PermissionManager {
* Check if user has a specific permission
* Supports wildcards: "hosts.*", "*"
*/
async hasPermission(
userId: string,
permission: string,
): Promise<boolean> {
async hasPermission(userId: string, permission: string): Promise<boolean> {
const userPermissions = await this.getUserPermissions(userId);
// Check for wildcard "*" (god mode)
@@ -220,7 +214,14 @@ class PermissionManager {
};
}
// Check if host is shared with user
// Get user's role IDs
const userRoleIds = await db
.select({ roleId: userRoles.roleId })
.from(userRoles)
.where(eq(userRoles.userId, userId));
const roleIds = userRoleIds.map((r) => r.roleId);
// Check if host is shared with user OR user's roles
const now = new Date().toISOString();
const sharedAccess = await db
.select()
@@ -228,11 +229,16 @@ class PermissionManager {
.where(
and(
eq(hostAccess.hostId, hostId),
eq(hostAccess.userId, userId),
or(
isNull(hostAccess.expiresAt),
gte(hostAccess.expiresAt, now),
eq(hostAccess.userId, userId),
roleIds.length > 0
? sql`${hostAccess.roleId} IN (${sql.join(
roleIds.map((id) => sql`${id}`),
sql`, `,
)})`
: sql`false`,
),
or(isNull(hostAccess.expiresAt), gte(hostAccess.expiresAt, now)),
),
)
.limit(1);
@@ -243,7 +249,7 @@ class PermissionManager {
// Check permission level for write/delete actions
if (action === "write" || action === "delete") {
const level = access.permissionLevel;
if (level === "readonly") {
if (level === "view" || level === "readonly") {
return {
hasAccess: false,
isOwner: false,

View File

@@ -301,6 +301,19 @@ export function Dashboard({
});
};
const handleServerStatClick = (serverId: number, serverName: string) => {
getSSHHosts().then((hosts) => {
const host = hosts.find((h: { id: number }) => h.id === serverId);
if (!host) return;
addTab({
type: "server",
title: serverName,
hostConfig: host,
});
});
};
const handleAddHost = () => {
const sshManagerTab = tabList.find((t) => t.type === "ssh_manager");
if (sshManagerTab) {
@@ -714,6 +727,9 @@ export function Dashboard({
key={server.id}
variant="outline"
className="border-2 !border-dark-border bg-dark-bg h-auto p-3 min-w-0"
onClick={() =>
handleServerStatClick(server.id, server.name)
}
>
<div className="flex flex-col w-full">
<div className="flex flex-row items-center mb-2">

View File

@@ -285,13 +285,13 @@ export function HostSharingTab({
return (
<div className="space-y-6">
{/* Credential Authentication Warning */}
{hostData?.authType === "Credential" && (
{/* Credential Requirement Warning */}
{!hostData?.credentialId && (
<Alert variant="destructive">
<AlertCircle className="h-4 w-4" />
<AlertTitle>{t("rbac.credentialSharingWarning")}</AlertTitle>
<AlertTitle>{t("rbac.credentialRequired")}</AlertTitle>
<AlertDescription>
{t("rbac.credentialSharingWarningDescription")}
{t("rbac.credentialRequiredDescription")}
</AlertDescription>
</Alert>
)}
@@ -464,7 +464,12 @@ export function HostSharingTab({
/>
</div>
<Button type="button" onClick={handleShare} className="w-full">
<Button
type="button"
onClick={handleShare}
className="w-full"
disabled={!hostData?.credentialId}
>
<Plus className="h-4 w-4 mr-2" />
{t("rbac.share")}
</Button>

View File

@@ -236,13 +236,21 @@ export function ServerStats({
};
fetchStatus();
intervalId = window.setInterval(fetchStatus, 10000);
intervalId = window.setInterval(
fetchStatus,
statsConfig.statusCheckInterval * 1000,
);
return () => {
cancelled = true;
if (intervalId) window.clearInterval(intervalId);
};
}, [currentHostConfig?.id, isVisible, statusCheckEnabled]);
}, [
currentHostConfig?.id,
isVisible,
statusCheckEnabled,
statsConfig.statusCheckInterval,
]);
React.useEffect(() => {
if (!metricsEnabled || !currentHostConfig?.id || !isVisible) {
@@ -297,13 +305,21 @@ export function ServerStats({
};
fetchMetrics();
intervalId = window.setInterval(fetchMetrics, 10000);
intervalId = window.setInterval(
fetchMetrics,
statsConfig.metricsInterval * 1000,
);
return () => {
cancelled = true;
if (intervalId) window.clearInterval(intervalId);
};
}, [currentHostConfig?.id, isVisible, metricsEnabled]);
}, [
currentHostConfig?.id,
isVisible,
metricsEnabled,
statsConfig.metricsInterval,
]);
const topMarginPx = isTopbarOpen ? 74 : 16;
const leftMarginPx = sidebarState === "collapsed" ? 16 : 8;

View File

@@ -77,9 +77,9 @@ export function LoginStatsWidget({ metrics }: LoginStatsWidgetProps) {
</div>
) : (
<div className="space-y-1">
{recentLogins.slice(0, 5).map((login, idx) => (
{recentLogins.slice(0, 5).map((login) => (
<div
key={idx}
key={`${login.user}-${login.time}-${login.ip}`}
className="text-xs bg-dark-bg-darker p-2 rounded border border-dark-border/30 flex justify-between items-center"
>
<div className="flex items-center gap-2 min-w-0">
@@ -111,9 +111,9 @@ export function LoginStatsWidget({ metrics }: LoginStatsWidgetProps) {
</span>
</div>
<div className="space-y-1">
{failedLogins.slice(0, 3).map((login, idx) => (
{failedLogins.slice(0, 3).map((login) => (
<div
key={idx}
key={`failed-${login.user}-${login.time}-${login.ip || "unknown"}`}
className="text-xs bg-red-900/20 p-2 rounded border border-red-500/30 flex justify-between items-center"
>
<div className="flex items-center gap-2 min-w-0">

View File

@@ -59,9 +59,9 @@ export function ProcessesWidget({ metrics }: ProcessesWidgetProps) {
</div>
) : (
<div className="space-y-2">
{topProcesses.map((proc, index: number) => (
{topProcesses.map((proc) => (
<div
key={index}
key={proc.pid}
className="p-2.5 rounded-lg bg-dark-bg/30 hover:bg-dark-bg/50 transition-colors border border-dark-border/20"
>
<div className="flex items-center justify-between mb-1.5">

View File

@@ -1966,6 +1966,7 @@ export async function getServerStatusById(id: number): Promise<ServerStatus> {
return response.data;
} catch (error) {
handleApiError(error, "fetch server status");
throw error; // Explicit throw to propagate error
}
}
@@ -1975,6 +1976,7 @@ export async function getServerMetricsById(id: number): Promise<ServerMetrics> {
return response.data;
} catch (error) {
handleApiError(error, "fetch server metrics");
throw error; // Explicit throw to propagate error
}
}