* fix select edit host but not update view (#438)

* fix: Checksum issue with chocolatey

* fix: Remove homebrew old stuff

* Add Korean translation (#439)

Co-authored-by: 송준우 <2484@coreit.co.kr>

* feat: Automate flatpak

* fix: Add imagemagik to electron builder to resolve build error

* fix: Build error with runtime repo flag

* fix: Flatpak runtime error and install freedesktop ver warning

* fix: Flatpak runtime error and install freedesktop ver warning

* feat: Re-add homebrew cask and move scripts to backend

* fix: No sandbox flag issue

* fix: Change name for electron macos cask output

* fix: Sandbox error with Linux

* fix: Remove comming soon for app stores in readme

* Adding Comment at the end of the public_key on the host on deploy (#440)

* Add termix.rb Cask file

* Update Termix to version 1.9.0 with new checksum

* Update README to remove 'coming soon' notes

* -Add New Interface for Credential DB
-Add Credential Name as a comment into the server authorized_key file

---------

Co-authored-by: Luke Gustafson <88517757+LukeGus@users.noreply.github.com>

* Sudo auto fill password (#441)

* Add termix.rb Cask file

* Update Termix to version 1.9.0 with new checksum

* Update README to remove 'coming soon' notes

* Feature Sudo password auto-fill;

* Fix locale json shema;

---------

Co-authored-by: Luke Gustafson <88517757+LukeGus@users.noreply.github.com>

* Added Italian Language; (#445)

* Add termix.rb Cask file

* Update Termix to version 1.9.0 with new checksum

* Update README to remove 'coming soon' notes

* Added Italian Language;

---------

Co-authored-by: Luke Gustafson <88517757+LukeGus@users.noreply.github.com>

* Auto collapse snippet folders (#448)

* Add termix.rb Cask file

* Update Termix to version 1.9.0 with new checksum

* Update README to remove 'coming soon' notes

* feat: Add collapsable snippets (customizable in user profile)

* Translations (#447)

* Add termix.rb Cask file

* Update Termix to version 1.9.0 with new checksum

* Update README to remove 'coming soon' notes

* Added Italian Language;

* Fix translations;

Removed duplicate keys, synchronised other languages using English as the source, translated added keys, fixed inaccurate translations.

---------

Co-authored-by: Luke Gustafson <88517757+LukeGus@users.noreply.github.com>

* Remove PTY-level keepalive (#449)

* Add termix.rb Cask file

* Update Termix to version 1.9.0 with new checksum

* Update README to remove 'coming soon' notes

* Remove PTY-level keepalive to prevent unwanted terminal output; use SSH-level keepalive instead

---------

Co-authored-by: Luke Gustafson <88517757+LukeGus@users.noreply.github.com>

* feat: Seperate server stats and tunnel management (improved both UI's) then started initial docker implementation

* fix: finalize adding docker to db

* feat: Add docker management support (local squash)

* Fix RBAC role system bugs and improve UX (#446)

* Fix RBAC role system bugs and improve UX

- Fix user list dropdown selection in host sharing
- Fix role sharing permissions to include role-based access
- Fix translation template interpolation for success messages
- Standardize system roles to admin and user only
- Auto-assign user role to new registrations
- Remove blocking confirmation dialogs in modal contexts
- Add missing i18n keys for common actions
- Fix button type to prevent unintended form submissions

* Enhance RBAC system with UI improvements and security fixes

- Move role assignment to Users tab with per-user role management
- Protect system roles (admin/user) from editing and manual assignment
- Simplify permission system: remove Use level, keep View and Manage
- Hide Update button and Sharing tab for view-only/shared hosts
- Prevent users from sharing hosts with themselves
- Unify table and modal styling across admin panels
- Auto-assign system roles on user registration
- Add permission metadata to host interface

* Add empty state message for role assignment

- Display helpful message when no custom roles available
- Clarify that system roles are auto-assigned
- Add noCustomRolesToAssign translation in English and Chinese

* fix: Prevent credential sharing errors for shared hosts

- Skip credential resolution for shared hosts with credential authentication
  to prevent decryption errors (credentials are encrypted per-user)
- Add warning alert in sharing tab when host uses credential authentication
- Inform users that shared users cannot connect to credential-based hosts
- Add translations for credential sharing warning (EN/ZH)

This prevents authentication failures when sharing hosts configured
with credential authentication while maintaining security by keeping
credentials isolated per user.

* feat: Improve rbac UI and fixes some bugs

---------

Co-authored-by: Luke Gustafson <88517757+LukeGus@users.noreply.github.com>
Co-authored-by: LukeGus <bugattiguy527@gmail.com>

* SOCKS5 support (#452)

* Add termix.rb Cask file

* Update Termix to version 1.9.0 with new checksum

* Update README to remove 'coming soon' notes

* SOCKS5 support

Adding single and chain socks5 proxy support

* fix: cleanup files

---------

Co-authored-by: Luke Gustafson <88517757+LukeGus@users.noreply.github.com>
Co-authored-by: LukeGus <bugattiguy527@gmail.com>

* Notes and Expiry fields add (#453)

* Add termix.rb Cask file

* Update Termix to version 1.9.0 with new checksum

* Update README to remove 'coming soon' notes

* Notes and Expiry add

* fix: cleanup files

---------

Co-authored-by: Luke Gustafson <88517757+LukeGus@users.noreply.github.com>
Co-authored-by: LukeGus <bugattiguy527@gmail.com>

* fix: ssh host types

* fix: sudo incorrect styling and remove expiration date

* feat: add sudo password and add diagonal bg's

* fix: snippet running on enter key

* fix: base64 decoding

* fix: improve server stats / rbac

* fix: wrap ssh host json export in hosts array

* feat: auto trim host inputs, fix file manager jump hosts, dashboard prevent duplicates, file manager terminal not size updating, improve left sidebar sorting, hide/show tags, add apperance user profile tab, add new host manager tabs.

* feat: improve terminal connection speed

* fix: sqlite constriant errors and support non-root user (nginx perm issue)

* feat: add beta syntax highlighing to terminal

* feat: update imports and improve admin settings user management

* chore: update translations

* chore: update translations

* feat: Complete light mode implementation with semantic theme system (#450)

- Add comprehensive light/dark mode CSS variables with semantic naming
- Implement theme-aware scrollbars using CSS variables
- Add light mode backgrounds: --bg-base, --bg-elevated, --bg-surface, etc.
- Add theme-aware borders: --border-base, --border-panel, --border-subtle
- Add semantic text colors: --foreground-secondary, --foreground-subtle
- Convert oklch colors to hex for better compatibility
- Add theme awareness to CodeMirror editors
- Update dark mode colors for consistency (background, sidebar, card, muted, input)
- Add Tailwind color mappings for semantic classes

Co-authored-by: Luke Gustafson <88517757+LukeGus@users.noreply.github.com>

* fix: syntax errors

* chore: updating/match themes and split admin settings

* feat: add translation workflow and remove old translation.json

* fix: translation workflow error

* fix: translation workflow error

* feat: improve translation system and update workflow

* fix: wrong path for translations

* fix: change translation to flat files

* fix: gh rule error

* chore: auto-translate to multiple languages (#458)

* chore: improve organization and made a few styling changes in host manager

* feat: improve terminal stability and split out the host manager

* fix: add unnversiioned files

* chore: migrate all to use the new theme system

* fix: wrong animation line colors

* fix: rbac implementation general issues (local squash)

* fix: remove unneeded files

* feat: add 10 new langs

* chore: update gitnore

* chore: auto-translate to multiple languages (#459)

* fix: improve tunnel system

* fix: properly split tabs, still need to fix up the host manager

* chore: cleanup files (possible RC)

* feat: add norwegian

* chore: auto-translate to multiple languages (#461)

* fix: small qol fixes and began readme update

* fix: run cleanup script

* feat: add docker docs button

* feat: general bug fixes and readme updates

* fix: translations

* chore: auto-translate to multiple languages (#462)

* fix: cleanup files

* fix: test new translation issue and add better server-stats support

* fix: fix translate error

* chore: auto-translate to multiple languages (#463)

* fix: fix translate mismatching text

* chore: auto-translate to multiple languages (#465)

* fix: fix translate mismatching text

* fix: fix translate mismatching text

* chore: auto-translate to multiple languages (#466)

* fix: fix translate mismatching text

* fix: fix translate mismatching text

* fix: fix translate mismatching text

* chore: auto-translate to multiple languages (#467)

* fix: fix translate mismatching text

* chore: auto-translate to multiple languages (#468)

* feat: add to readme, a few qol changes, and improve server stats in general

* chore: auto-translate to multiple languages (#469)

* feat: turned disk uage into graph and fixed issue with termina console

* fix: electron build error and hide icons when shared

* chore: run clean

* fix: general server stats issues, file manager decoding, ui qol

* fix: add dashboard line breaks

* fix: docker console error

* fix: docker console not loading and mismatched stripped background for electron

* fix: docker console not loading

* chore: docker console not loading in docker

* chore: translate readme to chinese

* chore: match package lock to package json

* chore: nginx config issue for dokcer console

* chore: auto-translate to multiple languages (#470)

---------

Co-authored-by: Tran Trung Kien <kientt13.7@gmail.com>
Co-authored-by: junu <bigdwarf_@naver.com>
Co-authored-by: 송준우 <2484@coreit.co.kr>
Co-authored-by: SlimGary <trash.slim@gmail.com>
Co-authored-by: Nunzio Marfè <nunzio.marfe@protonmail.com>
Co-authored-by: Wesley Reid <starhound@lostsouls.org>
Co-authored-by: ZacharyZcR <zacharyzcr1984@gmail.com>
Co-authored-by: Denis <38875137+Medvedinca@users.noreply.github.com>
Co-authored-by: Peet McKinney <68706879+PeetMcK@users.noreply.github.com>
This commit was merged in pull request #471.
This commit is contained in:
Luke Gustafson
2025-12-31 22:20:12 -06:00
committed by GitHub
parent 7139290d14
commit ad86c2040b
225 changed files with 87356 additions and 17706 deletions

View File

@@ -0,0 +1,632 @@
import { Client as SSHClient } from "ssh2";
import { WebSocketServer, WebSocket } from "ws";
import { parse as parseUrl } from "url";
import { AuthManager } from "../utils/auth-manager.js";
import { sshData, sshCredentials } from "../database/db/schema.js";
import { and, eq } from "drizzle-orm";
import { getDb } from "../database/db/index.js";
import { SimpleDBOps } from "../utils/simple-db-ops.js";
import { systemLogger } from "../utils/logger.js";
import type { SSHHost } from "../../types/index.js";
const dockerConsoleLogger = systemLogger;
interface SSHSession {
client: SSHClient;
stream: any;
isConnected: boolean;
containerId?: string;
shell?: string;
}
const activeSessions = new Map<string, SSHSession>();
const wss = new WebSocketServer({
host: "0.0.0.0",
port: 30008,
verifyClient: async (info) => {
try {
const url = parseUrl(info.req.url || "", true);
const token = url.query.token as string;
if (!token) {
return false;
}
const authManager = AuthManager.getInstance();
const decoded = await authManager.verifyJWTToken(token);
if (!decoded || !decoded.userId) {
return false;
}
return true;
} catch (error) {
return false;
}
},
});
async function detectShell(
session: SSHSession,
containerId: string,
): Promise<string> {
const shells = ["bash", "sh", "ash"];
for (const shell of shells) {
try {
await new Promise<void>((resolve, reject) => {
session.client.exec(
`docker exec ${containerId} which ${shell}`,
(err, stream) => {
if (err) return reject(err);
let output = "";
stream.on("data", (data: Buffer) => {
output += data.toString();
});
stream.on("close", (code: number) => {
if (code === 0 && output.trim()) {
resolve();
} else {
reject(new Error(`Shell ${shell} not found`));
}
});
stream.stderr.on("data", () => {
// Ignore stderr
});
},
);
});
return shell;
} catch {
continue;
}
}
return "sh";
}
async function createJumpHostChain(
jumpHosts: any[],
userId: string,
): Promise<SSHClient | null> {
if (!jumpHosts || jumpHosts.length === 0) {
return null;
}
let currentClient: SSHClient | null = null;
for (let i = 0; i < jumpHosts.length; i++) {
const jumpHostId = jumpHosts[i].hostId;
const jumpHostData = await SimpleDBOps.select(
getDb()
.select()
.from(sshData)
.where(and(eq(sshData.id, jumpHostId), eq(sshData.userId, userId))),
"ssh_data",
userId,
);
if (jumpHostData.length === 0) {
throw new Error(`Jump host ${jumpHostId} not found`);
}
const jumpHost = jumpHostData[0] as unknown as SSHHost;
if (typeof jumpHost.jumpHosts === "string" && jumpHost.jumpHosts) {
try {
jumpHost.jumpHosts = JSON.parse(jumpHost.jumpHosts);
} catch (e) {
dockerConsoleLogger.error("Failed to parse jump hosts", e, {
hostId: jumpHost.id,
});
jumpHost.jumpHosts = [];
}
}
let resolvedCredentials: any = {
password: jumpHost.password,
sshKey: jumpHost.key,
keyPassword: jumpHost.keyPassword,
authType: jumpHost.authType,
};
if (jumpHost.credentialId) {
const credentials = await SimpleDBOps.select(
getDb()
.select()
.from(sshCredentials)
.where(
and(
eq(sshCredentials.id, jumpHost.credentialId as number),
eq(sshCredentials.userId, userId),
),
),
"ssh_credentials",
userId,
);
if (credentials.length > 0) {
const credential = credentials[0];
resolvedCredentials = {
password: credential.password,
sshKey:
credential.private_key || credential.privateKey || credential.key,
keyPassword: credential.key_password || credential.keyPassword,
authType: credential.auth_type || credential.authType,
};
}
}
const client = new SSHClient();
const config: any = {
host: jumpHost.ip,
port: jumpHost.port || 22,
username: jumpHost.username,
tryKeyboard: true,
readyTimeout: 60000,
keepaliveInterval: 30000,
keepaliveCountMax: 120,
tcpKeepAlive: true,
tcpKeepAliveInitialDelay: 30000,
};
if (
resolvedCredentials.authType === "password" &&
resolvedCredentials.password
) {
config.password = resolvedCredentials.password;
} else if (
resolvedCredentials.authType === "key" &&
resolvedCredentials.sshKey
) {
const cleanKey = resolvedCredentials.sshKey
.trim()
.replace(/\r\n/g, "\n")
.replace(/\r/g, "\n");
config.privateKey = Buffer.from(cleanKey, "utf8");
if (resolvedCredentials.keyPassword) {
config.passphrase = resolvedCredentials.keyPassword;
}
}
if (currentClient) {
await new Promise<void>((resolve, reject) => {
currentClient!.forwardOut(
"127.0.0.1",
0,
jumpHost.ip,
jumpHost.port || 22,
(err, stream) => {
if (err) return reject(err);
config.sock = stream;
resolve();
},
);
});
}
await new Promise<void>((resolve, reject) => {
client.on("ready", () => resolve());
client.on("error", reject);
client.connect(config);
});
currentClient = client;
}
return currentClient;
}
wss.on("connection", async (ws: WebSocket, req) => {
const userId = (req as any).userId;
const sessionId = `docker-console-${Date.now()}-${Math.random()}`;
let sshSession: SSHSession | null = null;
ws.on("message", async (data) => {
try {
const message = JSON.parse(data.toString());
switch (message.type) {
case "connect": {
const { hostConfig, containerId, shell, cols, rows } =
message.data as {
hostConfig: SSHHost;
containerId: string;
shell?: string;
cols?: number;
rows?: number;
};
if (
typeof hostConfig.jumpHosts === "string" &&
hostConfig.jumpHosts
) {
try {
hostConfig.jumpHosts = JSON.parse(hostConfig.jumpHosts);
} catch (e) {
dockerConsoleLogger.error("Failed to parse jump hosts", e, {
hostId: hostConfig.id,
});
hostConfig.jumpHosts = [];
}
}
if (!hostConfig || !containerId) {
ws.send(
JSON.stringify({
type: "error",
message: "Host configuration and container ID are required",
}),
);
return;
}
if (!hostConfig.enableDocker) {
ws.send(
JSON.stringify({
type: "error",
message:
"Docker is not enabled for this host. Enable it in Host Settings.",
}),
);
return;
}
try {
let resolvedCredentials: any = {
password: hostConfig.password,
sshKey: hostConfig.key,
keyPassword: hostConfig.keyPassword,
authType: hostConfig.authType,
};
if (hostConfig.credentialId) {
const credentials = await SimpleDBOps.select(
getDb()
.select()
.from(sshCredentials)
.where(
and(
eq(sshCredentials.id, hostConfig.credentialId as number),
eq(sshCredentials.userId, userId),
),
),
"ssh_credentials",
userId,
);
if (credentials.length > 0) {
const credential = credentials[0];
resolvedCredentials = {
password: credential.password,
sshKey:
credential.private_key ||
credential.privateKey ||
credential.key,
keyPassword:
credential.key_password || credential.keyPassword,
authType: credential.auth_type || credential.authType,
};
}
}
const client = new SSHClient();
const config: any = {
host: hostConfig.ip,
port: hostConfig.port || 22,
username: hostConfig.username,
tryKeyboard: true,
readyTimeout: 60000,
keepaliveInterval: 30000,
keepaliveCountMax: 120,
tcpKeepAlive: true,
tcpKeepAliveInitialDelay: 30000,
};
if (
resolvedCredentials.authType === "password" &&
resolvedCredentials.password
) {
config.password = resolvedCredentials.password;
} else if (
resolvedCredentials.authType === "key" &&
resolvedCredentials.sshKey
) {
const cleanKey = resolvedCredentials.sshKey
.trim()
.replace(/\r\n/g, "\n")
.replace(/\r/g, "\n");
config.privateKey = Buffer.from(cleanKey, "utf8");
if (resolvedCredentials.keyPassword) {
config.passphrase = resolvedCredentials.keyPassword;
}
}
if (hostConfig.jumpHosts && hostConfig.jumpHosts.length > 0) {
const jumpClient = await createJumpHostChain(
hostConfig.jumpHosts,
userId,
);
if (jumpClient) {
const stream = await new Promise<any>((resolve, reject) => {
jumpClient.forwardOut(
"127.0.0.1",
0,
hostConfig.ip,
hostConfig.port || 22,
(err, stream) => {
if (err) return reject(err);
resolve(stream);
},
);
});
config.sock = stream;
}
}
await new Promise<void>((resolve, reject) => {
client.on("ready", () => resolve());
client.on("error", reject);
client.connect(config);
});
sshSession = {
client,
stream: null,
isConnected: true,
containerId,
};
activeSessions.set(sessionId, sshSession);
let shellToUse = shell || "bash";
if (shell) {
try {
await new Promise<void>((resolve, reject) => {
client.exec(
`docker exec ${containerId} which ${shell}`,
(err, stream) => {
if (err) return reject(err);
let output = "";
stream.on("data", (data: Buffer) => {
output += data.toString();
});
stream.on("close", (code: number) => {
if (code === 0 && output.trim()) {
resolve();
} else {
reject(new Error(`Shell ${shell} not available`));
}
});
stream.stderr.on("data", () => {
// Ignore stderr
});
},
);
});
} catch {
dockerConsoleLogger.warn(
`Requested shell ${shell} not found, detecting available shell`,
{
operation: "shell_validation",
sessionId,
containerId,
requestedShell: shell,
},
);
shellToUse = await detectShell(sshSession, containerId);
}
} else {
shellToUse = await detectShell(sshSession, containerId);
}
sshSession.shell = shellToUse;
const execCommand = `docker exec -it ${containerId} /bin/${shellToUse}`;
client.exec(
execCommand,
{
pty: {
term: "xterm-256color",
cols: cols || 80,
rows: rows || 24,
},
},
(err, stream) => {
if (err) {
dockerConsoleLogger.error(
"Failed to create docker exec",
err,
{
operation: "docker_exec",
sessionId,
containerId,
},
);
ws.send(
JSON.stringify({
type: "error",
message: `Failed to start console: ${err.message}`,
}),
);
return;
}
sshSession!.stream = stream;
stream.on("data", (data: Buffer) => {
if (ws.readyState === WebSocket.OPEN) {
ws.send(
JSON.stringify({
type: "output",
data: data.toString("utf8"),
}),
);
}
});
stream.stderr.on("data", (data: Buffer) => {});
stream.on("close", () => {
if (ws.readyState === WebSocket.OPEN) {
ws.send(
JSON.stringify({
type: "disconnected",
message: "Console session ended",
}),
);
}
if (sshSession) {
sshSession.client.end();
activeSessions.delete(sessionId);
}
});
ws.send(
JSON.stringify({
type: "connected",
data: {
shell: shellToUse,
requestedShell: shell,
shellChanged: shell && shell !== shellToUse,
},
}),
);
},
);
} catch (error) {
dockerConsoleLogger.error("Failed to connect to container", error, {
operation: "console_connect",
sessionId,
containerId: message.data.containerId,
});
ws.send(
JSON.stringify({
type: "error",
message:
error instanceof Error
? error.message
: "Failed to connect to container",
}),
);
}
break;
}
case "input": {
if (sshSession && sshSession.stream) {
sshSession.stream.write(message.data);
}
break;
}
case "resize": {
if (sshSession && sshSession.stream) {
const { cols, rows } = message.data;
sshSession.stream.setWindow(rows, cols);
}
break;
}
case "disconnect": {
if (sshSession) {
if (sshSession.stream) {
sshSession.stream.end();
}
sshSession.client.end();
activeSessions.delete(sessionId);
ws.send(
JSON.stringify({
type: "disconnected",
message: "Disconnected from container",
}),
);
}
break;
}
case "ping": {
if (ws.readyState === WebSocket.OPEN) {
ws.send(JSON.stringify({ type: "pong" }));
}
break;
}
default:
dockerConsoleLogger.warn("Unknown message type", {
operation: "ws_message",
type: message.type,
});
}
} catch (error) {
dockerConsoleLogger.error("WebSocket message error", error, {
operation: "ws_message",
sessionId,
});
ws.send(
JSON.stringify({
type: "error",
message: error instanceof Error ? error.message : "An error occurred",
}),
);
}
});
ws.on("close", () => {
if (sshSession) {
if (sshSession.stream) {
sshSession.stream.end();
}
sshSession.client.end();
activeSessions.delete(sessionId);
}
});
ws.on("error", (error) => {
dockerConsoleLogger.error("WebSocket error", error, {
operation: "ws_error",
sessionId,
});
if (sshSession) {
if (sshSession.stream) {
sshSession.stream.end();
}
sshSession.client.end();
activeSessions.delete(sessionId);
}
});
});
process.on("SIGTERM", () => {
activeSessions.forEach((session, sessionId) => {
if (session.stream) {
session.stream.end();
}
session.client.end();
});
activeSessions.clear();
wss.close(() => {
process.exit(0);
});
});

1904
src/backend/ssh/docker.ts Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -10,6 +10,7 @@ import { fileLogger, sshLogger } from "../utils/logger.js";
import { SimpleDBOps } from "../utils/simple-db-ops.js";
import { AuthManager } from "../utils/auth-manager.js";
import type { AuthenticatedRequest } from "../../types/index.js";
import { createSocks5Connection } from "../utils/socks5-helper.js";
function isExecutableFile(permissions: string, fileName: string): boolean {
const hasExecutePermission =
@@ -278,6 +279,7 @@ interface PendingTOTPSession {
prompts?: Array<{ prompt: string; echo: boolean }>;
totpPromptIndex?: number;
resolvedPassword?: string;
totpAttempts: number;
}
const sshSessions: Record<string, SSHSession> = {};
@@ -356,6 +358,12 @@ app.post("/ssh/file_manager/ssh/connect", async (req, res) => {
userProvidedPassword,
forceKeyboardInteractive,
jumpHosts,
useSocks5,
socks5Host,
socks5Port,
socks5Username,
socks5Password,
socks5ProxyChain,
} = req.body;
const userId = (req as AuthenticatedRequest).userId;
@@ -382,6 +390,15 @@ app.post("/ssh/file_manager/ssh/connect", async (req, res) => {
if (sshSessions[sessionId]?.isConnected) {
cleanupSession(sessionId);
}
// Clean up any stale pending TOTP sessions
if (pendingTOTPSessions[sessionId]) {
try {
pendingTOTPSessions[sessionId].client.end();
} catch {}
delete pendingTOTPSessions[sessionId];
}
const client = new SSHClient();
let resolvedCredentials = { password, sshKey, keyPassword, authType };
@@ -545,9 +562,7 @@ app.post("/ssh/file_manager/ssh/connect", async (req, res) => {
.json({ error: "Password required for password authentication" });
}
if (!forceKeyboardInteractive) {
config.password = resolvedCredentials.password;
}
config.password = resolvedCredentials.password;
} else if (resolvedCredentials.authType === "none") {
} else {
fileLogger.warn(
@@ -713,6 +728,7 @@ app.post("/ssh/file_manager/ssh/connect", async (req, res) => {
prompts,
totpPromptIndex,
resolvedPassword: resolvedCredentials.password,
totpAttempts: 0,
};
res.json({
@@ -785,6 +801,7 @@ app.post("/ssh/file_manager/ssh/connect", async (req, res) => {
prompts,
totpPromptIndex: passwordPromptIndex,
resolvedPassword: resolvedCredentials.password,
totpAttempts: 0,
};
res.json({
@@ -808,7 +825,47 @@ app.post("/ssh/file_manager/ssh/connect", async (req, res) => {
},
);
if (jumpHosts && jumpHosts.length > 0 && userId) {
if (
useSocks5 &&
(socks5Host || (socks5ProxyChain && (socks5ProxyChain as any).length > 0))
) {
try {
const socks5Socket = await createSocks5Connection(ip, port, {
useSocks5,
socks5Host,
socks5Port,
socks5Username,
socks5Password,
socks5ProxyChain: socks5ProxyChain as any,
});
if (socks5Socket) {
config.sock = socks5Socket;
client.connect(config);
return;
} else {
fileLogger.error("SOCKS5 socket is null for SFTP", undefined, {
operation: "sftp_socks5_socket_null",
sessionId,
});
}
} catch (socks5Error) {
fileLogger.error("SOCKS5 connection failed", socks5Error, {
operation: "socks5_connect",
sessionId,
hostId,
proxyHost: socks5Host,
proxyPort: socks5Port || 1080,
});
return res.status(500).json({
error:
"SOCKS5 proxy connection failed: " +
(socks5Error instanceof Error
? socks5Error.message
: "Unknown error"),
});
}
} else if (jumpHosts && jumpHosts.length > 0 && userId) {
try {
const jumpClient = await createJumpHostChain(jumpHosts, userId);
@@ -891,9 +948,7 @@ app.post("/ssh/file_manager/ssh/connect-totp", async (req, res) => {
delete pendingTOTPSessions[sessionId];
try {
session.client.end();
} catch (error) {
sshLogger.debug("Operation failed, continuing", { error });
}
} catch (error) {}
fileLogger.warn("TOTP session timeout before code submission", {
operation: "file_totp_verify",
sessionId,
@@ -1385,7 +1440,7 @@ app.post("/ssh/file_manager/ssh/writeFile", async (req, res) => {
let fileBuffer;
try {
if (typeof content === "string") {
fileBuffer = Buffer.from(content, "utf8");
fileBuffer = Buffer.from(content, "base64");
} else if (Buffer.isBuffer(content)) {
fileBuffer = content;
} else {
@@ -1461,7 +1516,22 @@ app.post("/ssh/file_manager/ssh/writeFile", async (req, res) => {
const tryFallbackMethod = () => {
try {
const base64Content = Buffer.from(content, "utf8").toString("base64");
let contentBuffer: Buffer;
if (typeof content === "string") {
try {
contentBuffer = Buffer.from(content, "base64");
if (contentBuffer.toString("base64") !== content) {
contentBuffer = Buffer.from(content, "utf8");
}
} catch {
contentBuffer = Buffer.from(content, "utf8");
}
} else if (Buffer.isBuffer(content)) {
contentBuffer = content;
} else {
contentBuffer = Buffer.from(content);
}
const base64Content = contentBuffer.toString("base64");
const escapedPath = filePath.replace(/'/g, "'\"'\"'");
const writeCommand = `echo '${base64Content}' | base64 -d > '${escapedPath}' && echo "SUCCESS"`;
@@ -1579,7 +1649,7 @@ app.post("/ssh/file_manager/ssh/uploadFile", async (req, res) => {
let fileBuffer;
try {
if (typeof content === "string") {
fileBuffer = Buffer.from(content, "utf8");
fileBuffer = Buffer.from(content, "base64");
} else if (Buffer.isBuffer(content)) {
fileBuffer = content;
} else {
@@ -1662,7 +1732,22 @@ app.post("/ssh/file_manager/ssh/uploadFile", async (req, res) => {
const tryFallbackMethod = () => {
try {
const base64Content = Buffer.from(content, "utf8").toString("base64");
let contentBuffer: Buffer;
if (typeof content === "string") {
try {
contentBuffer = Buffer.from(content, "base64");
if (contentBuffer.toString("base64") !== content) {
contentBuffer = Buffer.from(content, "utf8");
}
} catch {
contentBuffer = Buffer.from(content, "utf8");
}
} else if (Buffer.isBuffer(content)) {
contentBuffer = content;
} else {
contentBuffer = Buffer.from(content);
}
const base64Content = contentBuffer.toString("base64");
const chunkSize = 1000000;
const chunks = [];
@@ -2940,21 +3025,10 @@ app.post("/ssh/file_manager/ssh/extractArchive", async (req, res) => {
let errorOutput = "";
stream.on("data", (data: Buffer) => {
fileLogger.debug("Extract stdout", {
operation: "extract_archive",
sessionId,
output: data.toString(),
});
});
stream.on("data", (data: Buffer) => {});
stream.stderr.on("data", (data: Buffer) => {
errorOutput += data.toString();
fileLogger.debug("Extract stderr", {
operation: "extract_archive",
sessionId,
error: data.toString(),
});
});
stream.on("close", (code: number) => {
@@ -3132,21 +3206,10 @@ app.post("/ssh/file_manager/ssh/compressFiles", async (req, res) => {
let errorOutput = "";
stream.on("data", (data: Buffer) => {
fileLogger.debug("Compress stdout", {
operation: "compress_files",
sessionId,
output: data.toString(),
});
});
stream.on("data", (data: Buffer) => {});
stream.stderr.on("data", (data: Buffer) => {
errorOutput += data.toString();
fileLogger.debug("Compress stderr", {
operation: "compress_files",
sessionId,
error: data.toString(),
});
});
stream.on("close", (code: number) => {

File diff suppressed because it is too large Load Diff

View File

@@ -14,6 +14,7 @@ import { sshLogger } from "../utils/logger.js";
import { SimpleDBOps } from "../utils/simple-db-ops.js";
import { AuthManager } from "../utils/auth-manager.js";
import { UserCrypto } from "../utils/user-crypto.js";
import { createSocks5Connection } from "../utils/socks5-helper.js";
interface ConnectToHostData {
cols: number;
@@ -32,6 +33,12 @@ interface ConnectToHostData {
userId?: string;
forceKeyboardInteractive?: boolean;
jumpHosts?: Array<{ hostId: number }>;
useSocks5?: boolean;
socks5Host?: string;
socks5Port?: number;
socks5Username?: string;
socks5Password?: string;
socks5ProxyChain?: unknown;
};
initialPath?: string;
executeCommand?: string;
@@ -130,10 +137,12 @@ async function createJumpHostChain(
const clients: Client[] = [];
try {
for (let i = 0; i < jumpHosts.length; i++) {
const jumpHostConfig = await resolveJumpHost(jumpHosts[i].hostId, userId);
const jumpHostConfigs = await Promise.all(
jumpHosts.map((jh) => resolveJumpHost(jh.hostId, userId)),
);
if (!jumpHostConfig) {
for (let i = 0; i < jumpHostConfigs.length; i++) {
if (!jumpHostConfigs[i]) {
sshLogger.error(`Jump host ${i + 1} not found`, undefined, {
operation: "jump_host_chain",
hostId: jumpHosts[i].hostId,
@@ -141,6 +150,10 @@ async function createJumpHostChain(
clients.forEach((c) => c.end());
return null;
}
}
for (let i = 0; i < jumpHostConfigs.length; i++) {
const jumpHostConfig = jumpHostConfigs[i];
const jumpClient = new Client();
clients.push(jumpClient);
@@ -316,9 +329,10 @@ wss.on("connection", async (ws: WebSocket, req) => {
let sshConn: Client | null = null;
let sshStream: ClientChannel | null = null;
let pingInterval: NodeJS.Timeout | null = null;
let keyboardInteractiveFinish: ((responses: string[]) => void) | null = null;
let totpPromptSent = false;
let totpAttempts = 0;
let totpTimeout: NodeJS.Timeout | null = null;
let isKeyboardInteractive = false;
let keyboardInteractiveResponded = false;
let isConnecting = false;
@@ -435,9 +449,15 @@ wss.on("connection", async (ws: WebSocket, req) => {
case "totp_response": {
const totpData = data as TOTPResponseData;
if (keyboardInteractiveFinish && totpData?.code) {
if (totpTimeout) {
clearTimeout(totpTimeout);
totpTimeout = null;
}
const totpCode = totpData.code;
totpAttempts++;
keyboardInteractiveFinish([totpCode]);
keyboardInteractiveFinish = null;
totpPromptSent = false;
} else {
sshLogger.warn("TOTP response received but no callback available", {
operation: "totp_response_error",
@@ -458,6 +478,10 @@ wss.on("connection", async (ws: WebSocket, req) => {
case "password_response": {
const passwordData = data as TOTPResponseData;
if (keyboardInteractiveFinish && passwordData?.code) {
if (totpTimeout) {
clearTimeout(totpTimeout);
totpTimeout = null;
}
const password = passwordData.code;
keyboardInteractiveFinish([password]);
keyboardInteractiveFinish = null;
@@ -597,6 +621,13 @@ wss.on("connection", async (ws: WebSocket, req) => {
isConnecting,
isConnected,
});
ws.send(
JSON.stringify({
type: "error",
message: "Connection already in progress",
code: "DUPLICATE_CONNECTION",
}),
);
return;
}
@@ -617,7 +648,7 @@ wss.on("connection", async (ws: WebSocket, req) => {
);
cleanupSSH(connectionTimeout);
}
}, 120000);
}, 30000);
let resolvedCredentials = { password, key, keyPassword, keyType, authType };
let authMethodNotAvailable = false;
@@ -802,8 +833,6 @@ wss.on("connection", async (ws: WebSocket, req) => {
);
});
setupPingInterval();
if (initialPath && initialPath.trim() !== "") {
const cdCommand = `cd "${initialPath.replace(/"/g, '\\"')}" && pwd\n`;
stream.write(cdCommand);
@@ -987,6 +1016,25 @@ wss.on("connection", async (ws: WebSocket, req) => {
finish(responses);
};
totpTimeout = setTimeout(() => {
if (keyboardInteractiveFinish) {
keyboardInteractiveFinish = null;
totpPromptSent = false;
sshLogger.warn("TOTP prompt timeout", {
operation: "totp_timeout",
hostId: id,
});
ws.send(
JSON.stringify({
type: "error",
message: "TOTP verification timeout. Please reconnect.",
}),
);
cleanupSSH(connectionTimeout);
}
}, 180000);
ws.send(
JSON.stringify({
type: "totp_required",
@@ -1021,6 +1069,24 @@ wss.on("connection", async (ws: WebSocket, req) => {
finish(responses);
};
totpTimeout = setTimeout(() => {
if (keyboardInteractiveFinish) {
keyboardInteractiveFinish = null;
keyboardInteractiveResponded = false;
sshLogger.warn("Password prompt timeout", {
operation: "password_timeout",
hostId: id,
});
ws.send(
JSON.stringify({
type: "error",
message: "Password verification timeout. Please reconnect.",
}),
);
cleanupSSH(connectionTimeout);
}
}, 180000);
ws.send(
JSON.stringify({
type: "password_required",
@@ -1049,10 +1115,10 @@ wss.on("connection", async (ws: WebSocket, req) => {
tryKeyboard: true,
keepaliveInterval: 30000,
keepaliveCountMax: 3,
readyTimeout: 120000,
readyTimeout: 30000,
tcpKeepAlive: true,
tcpKeepAliveInitialDelay: 30000,
timeout: 120000,
timeout: 30000,
env: {
TERM: "xterm-256color",
LANG: "en_US.UTF-8",
@@ -1128,9 +1194,7 @@ wss.on("connection", async (ws: WebSocket, req) => {
return;
}
if (!hostConfig.forceKeyboardInteractive) {
connectConfig.password = resolvedCredentials.password;
}
connectConfig.password = resolvedCredentials.password;
} else if (
resolvedCredentials.authType === "key" &&
resolvedCredentials.key
@@ -1183,6 +1247,49 @@ wss.on("connection", async (ws: WebSocket, req) => {
return;
}
if (
hostConfig.useSocks5 &&
(hostConfig.socks5Host ||
(hostConfig.socks5ProxyChain &&
(hostConfig.socks5ProxyChain as any).length > 0))
) {
try {
const socks5Socket = await createSocks5Connection(ip, port, {
useSocks5: hostConfig.useSocks5,
socks5Host: hostConfig.socks5Host,
socks5Port: hostConfig.socks5Port,
socks5Username: hostConfig.socks5Username,
socks5Password: hostConfig.socks5Password,
socks5ProxyChain: hostConfig.socks5ProxyChain as any,
});
if (socks5Socket) {
connectConfig.sock = socks5Socket;
sshConn.connect(connectConfig);
return;
}
} catch (socks5Error) {
sshLogger.error("SOCKS5 connection failed", socks5Error, {
operation: "socks5_connect",
hostId: id,
proxyHost: hostConfig.socks5Host,
proxyPort: hostConfig.socks5Port || 1080,
});
ws.send(
JSON.stringify({
type: "error",
message:
"SOCKS5 proxy connection failed: " +
(socks5Error instanceof Error
? socks5Error.message
: "Unknown error"),
}),
);
cleanupSSH(connectionTimeout);
return;
}
}
if (
hostConfig.jumpHosts &&
hostConfig.jumpHosts.length > 0 &&
@@ -1279,9 +1386,9 @@ wss.on("connection", async (ws: WebSocket, req) => {
clearTimeout(timeoutId);
}
if (pingInterval) {
clearInterval(pingInterval);
pingInterval = null;
if (totpTimeout) {
clearTimeout(totpTimeout);
totpTimeout = null;
}
if (sshStream) {
@@ -1309,35 +1416,21 @@ wss.on("connection", async (ws: WebSocket, req) => {
}
totpPromptSent = false;
totpAttempts = 0;
isKeyboardInteractive = false;
keyboardInteractiveResponded = false;
keyboardInteractiveFinish = null;
isConnecting = false;
isConnected = false;
setTimeout(() => {
isCleaningUp = false;
}, 100);
isCleaningUp = false;
}
function setupPingInterval() {
pingInterval = setInterval(() => {
if (sshConn && sshStream) {
try {
sshStream.write("\x00");
} catch (e: unknown) {
sshLogger.error(
"SSH keepalive failed: " +
(e instanceof Error ? e.message : "Unknown error"),
);
cleanupSSH();
}
} else if (!sshConn || !sshStream) {
if (pingInterval) {
clearInterval(pingInterval);
pingInterval = null;
}
}
}, 30000);
}
// Note: PTY-level keepalive (writing \x00 to the stream) was removed.
// It was causing ^@ characters to appear in terminals with echoctl enabled.
// SSH-level keepalive is configured via connectConfig (keepaliveInterval,
// keepaliveCountMax, tcpKeepAlive), which handles connection health monitoring
// without producing visible output on the terminal.
//
// See: https://github.com/Termix-SSH/Support/issues/232
// See: https://github.com/Termix-SSH/Support/issues/309
});

View File

@@ -1,4 +1,4 @@
import express from "express";
import express, { type Response } from "express";
import cors from "cors";
import cookieParser from "cookie-parser";
import { Client } from "ssh2";
@@ -13,12 +13,16 @@ import type {
TunnelStatus,
VerificationData,
ErrorType,
AuthenticatedRequest,
} from "../../types/index.js";
import { CONNECTION_STATES } from "../../types/index.js";
import { tunnelLogger, sshLogger } from "../utils/logger.js";
import { SystemCrypto } from "../utils/system-crypto.js";
import { SimpleDBOps } from "../utils/simple-db-ops.js";
import { DataCrypto } from "../utils/data-crypto.js";
import { createSocks5Connection } from "../utils/socks5-helper.js";
import { AuthManager } from "../utils/auth-manager.js";
import { PermissionManager } from "../utils/permission-manager.js";
const app = express();
app.use(
@@ -63,6 +67,10 @@ app.use(
app.use(cookieParser());
app.use(express.json());
const authManager = AuthManager.getInstance();
const permissionManager = PermissionManager.getInstance();
const authenticateJWT = authManager.createAuthMiddleware();
const activeTunnels = new Map<string, Client>();
const retryCounters = new Map<string, number>();
const connectionStatus = new Map<string, TunnelStatus>();
@@ -77,6 +85,7 @@ const tunnelConnecting = new Set<string>();
const tunnelConfigs = new Map<string, TunnelConfig>();
const activeTunnelProcesses = new Map<string, ChildProcess>();
const pendingTunnelOperations = new Map<string, Promise<void>>();
function broadcastTunnelStatus(tunnelName: string, status: TunnelStatus): void {
if (
@@ -154,10 +163,75 @@ function getTunnelMarker(tunnelName: string) {
return `TUNNEL_MARKER_${tunnelName.replace(/[^a-zA-Z0-9]/g, "_")}`;
}
function cleanupTunnelResources(
function normalizeTunnelName(
hostId: number,
tunnelIndex: number,
displayName: string,
sourcePort: number,
endpointHost: string,
endpointPort: number,
): string {
return `${hostId}::${tunnelIndex}::${displayName}::${sourcePort}::${endpointHost}::${endpointPort}`;
}
function parseTunnelName(tunnelName: string): {
hostId?: number;
tunnelIndex?: number;
displayName: string;
sourcePort: string;
endpointHost: string;
endpointPort: string;
isLegacyFormat: boolean;
} {
const parts = tunnelName.split("::");
if (parts.length === 6) {
return {
hostId: parseInt(parts[0]),
tunnelIndex: parseInt(parts[1]),
displayName: parts[2],
sourcePort: parts[3],
endpointHost: parts[4],
endpointPort: parts[5],
isLegacyFormat: false,
};
}
tunnelLogger.warn(`Legacy tunnel name format: ${tunnelName}`);
const legacyParts = tunnelName.split("_");
return {
displayName: legacyParts[0] || "unknown",
sourcePort: legacyParts[legacyParts.length - 3] || "0",
endpointHost: legacyParts[legacyParts.length - 2] || "unknown",
endpointPort: legacyParts[legacyParts.length - 1] || "0",
isLegacyFormat: true,
};
}
function validateTunnelConfig(
tunnelName: string,
tunnelConfig: TunnelConfig,
): boolean {
const parsed = parseTunnelName(tunnelName);
if (parsed.isLegacyFormat) {
return true;
}
return (
parsed.hostId === tunnelConfig.sourceHostId &&
parsed.tunnelIndex === tunnelConfig.tunnelIndex &&
String(parsed.sourcePort) === String(tunnelConfig.sourcePort) &&
parsed.endpointHost === tunnelConfig.endpointHost &&
String(parsed.endpointPort) === String(tunnelConfig.endpointPort)
);
}
async function cleanupTunnelResources(
tunnelName: string,
forceCleanup = false,
): void {
): Promise<void> {
if (cleanupInProgress.has(tunnelName)) {
return;
}
@@ -170,13 +244,16 @@ function cleanupTunnelResources(
const tunnelConfig = tunnelConfigs.get(tunnelName);
if (tunnelConfig) {
killRemoteTunnelByMarker(tunnelConfig, tunnelName, (err) => {
cleanupInProgress.delete(tunnelName);
if (err) {
tunnelLogger.error(
`Failed to kill remote tunnel for '${tunnelName}': ${err.message}`,
);
}
await new Promise<void>((resolve) => {
killRemoteTunnelByMarker(tunnelConfig, tunnelName, (err) => {
cleanupInProgress.delete(tunnelName);
if (err) {
tunnelLogger.error(
`Failed to kill remote tunnel for '${tunnelName}': ${err.message}`,
);
}
resolve();
});
});
} else {
cleanupInProgress.delete(tunnelName);
@@ -272,11 +349,11 @@ function resetRetryState(tunnelName: string): void {
});
}
function handleDisconnect(
async function handleDisconnect(
tunnelName: string,
tunnelConfig: TunnelConfig | null,
shouldRetry = true,
): void {
): Promise<void> {
if (tunnelVerifications.has(tunnelName)) {
try {
const verification = tunnelVerifications.get(tunnelName);
@@ -286,7 +363,11 @@ function handleDisconnect(
tunnelVerifications.delete(tunnelName);
}
cleanupTunnelResources(tunnelName);
while (cleanupInProgress.has(tunnelName)) {
await new Promise((resolve) => setTimeout(resolve, 100));
}
await cleanupTunnelResources(tunnelName);
if (manualDisconnects.has(tunnelName)) {
resetRetryState(tunnelName);
@@ -490,43 +571,76 @@ async function connectSSHTunnel(
authMethod: tunnelConfig.sourceAuthMethod,
};
if (tunnelConfig.sourceCredentialId && tunnelConfig.sourceUserId) {
try {
const userDataKey = DataCrypto.getUserDataKey(tunnelConfig.sourceUserId);
if (userDataKey) {
const credentials = await SimpleDBOps.select(
getDb()
.select()
.from(sshCredentials)
.where(
and(
eq(sshCredentials.id, tunnelConfig.sourceCredentialId),
eq(sshCredentials.userId, tunnelConfig.sourceUserId),
),
),
"ssh_credentials",
tunnelConfig.sourceUserId,
);
const effectiveUserId =
tunnelConfig.requestingUserId || tunnelConfig.sourceUserId;
if (credentials.length > 0) {
const credential = credentials[0];
resolvedSourceCredentials = {
password: credential.password as string | undefined,
sshKey: (credential.private_key ||
credential.privateKey ||
credential.key) as string | undefined,
keyPassword: (credential.key_password || credential.keyPassword) as
| string
| undefined,
keyType: (credential.key_type || credential.keyType) as
| string
| undefined,
authMethod: (credential.auth_type || credential.authType) as string,
};
if (tunnelConfig.sourceCredentialId && effectiveUserId) {
try {
if (
tunnelConfig.requestingUserId &&
tunnelConfig.requestingUserId !== tunnelConfig.sourceUserId
) {
const { SharedCredentialManager } =
await import("../utils/shared-credential-manager.js");
const sharedCredManager = SharedCredentialManager.getInstance();
if (tunnelConfig.sourceHostId) {
const sharedCred = await sharedCredManager.getSharedCredentialForUser(
tunnelConfig.sourceHostId,
tunnelConfig.requestingUserId,
);
if (sharedCred) {
resolvedSourceCredentials = {
password: sharedCred.password,
sshKey: sharedCred.key,
keyPassword: sharedCred.keyPassword,
keyType: sharedCred.keyType,
authMethod: sharedCred.authType,
};
} else {
const errorMessage = `Cannot connect tunnel '${tunnelName}': shared credentials not available`;
tunnelLogger.error(errorMessage);
broadcastTunnelStatus(tunnelName, {
connected: false,
status: CONNECTION_STATES.FAILED,
reason: errorMessage,
});
return;
}
}
} else {
const userDataKey = DataCrypto.getUserDataKey(effectiveUserId);
if (userDataKey) {
const credentials = await SimpleDBOps.select(
getDb()
.select()
.from(sshCredentials)
.where(eq(sshCredentials.id, tunnelConfig.sourceCredentialId)),
"ssh_credentials",
effectiveUserId,
);
if (credentials.length > 0) {
const credential = credentials[0];
resolvedSourceCredentials = {
password: credential.password as string | undefined,
sshKey: (credential.private_key ||
credential.privateKey ||
credential.key) as string | undefined,
keyPassword: (credential.key_password ||
credential.keyPassword) as string | undefined,
keyType: (credential.key_type || credential.keyType) as
| string
| undefined,
authMethod: (credential.auth_type ||
credential.authType) as string,
};
}
}
}
} catch (error) {
tunnelLogger.warn("Failed to resolve source credentials from database", {
tunnelLogger.warn("Failed to resolve source credentials", {
operation: "tunnel_connect",
tunnelName,
credentialId: tunnelConfig.sourceCredentialId,
@@ -581,12 +695,7 @@ async function connectSSHTunnel(
getDb()
.select()
.from(sshCredentials)
.where(
and(
eq(sshCredentials.id, tunnelConfig.endpointCredentialId),
eq(sshCredentials.userId, tunnelConfig.endpointUserId),
),
),
.where(eq(sshCredentials.id, tunnelConfig.endpointCredentialId)),
"ssh_credentials",
tunnelConfig.endpointUserId,
);
@@ -1016,6 +1125,51 @@ async function connectSSHTunnel(
});
}
if (
tunnelConfig.useSocks5 &&
(tunnelConfig.socks5Host ||
(tunnelConfig.socks5ProxyChain &&
tunnelConfig.socks5ProxyChain.length > 0))
) {
try {
const socks5Socket = await createSocks5Connection(
tunnelConfig.sourceIP,
tunnelConfig.sourceSSHPort,
{
useSocks5: tunnelConfig.useSocks5,
socks5Host: tunnelConfig.socks5Host,
socks5Port: tunnelConfig.socks5Port,
socks5Username: tunnelConfig.socks5Username,
socks5Password: tunnelConfig.socks5Password,
socks5ProxyChain: tunnelConfig.socks5ProxyChain,
},
);
if (socks5Socket) {
connOptions.sock = socks5Socket;
conn.connect(connOptions);
return;
}
} catch (socks5Error) {
tunnelLogger.error("SOCKS5 connection failed for tunnel", socks5Error, {
operation: "socks5_connect",
tunnelName,
proxyHost: tunnelConfig.socks5Host,
proxyPort: tunnelConfig.socks5Port || 1080,
});
broadcastTunnelStatus(tunnelName, {
connected: false,
status: CONNECTION_STATES.FAILED,
reason:
"SOCKS5 proxy connection failed: " +
(socks5Error instanceof Error
? socks5Error.message
: "Unknown error"),
});
return;
}
}
conn.connect(connOptions);
}
@@ -1042,12 +1196,7 @@ async function killRemoteTunnelByMarker(
getDb()
.select()
.from(sshCredentials)
.where(
and(
eq(sshCredentials.id, tunnelConfig.sourceCredentialId),
eq(sshCredentials.userId, tunnelConfig.sourceUserId),
),
),
.where(eq(sshCredentials.id, tunnelConfig.sourceCredentialId)),
"ssh_credentials",
tunnelConfig.sourceUserId,
);
@@ -1248,7 +1397,57 @@ async function killRemoteTunnelByMarker(
callback(err);
});
conn.connect(connOptions);
if (
tunnelConfig.useSocks5 &&
(tunnelConfig.socks5Host ||
(tunnelConfig.socks5ProxyChain &&
tunnelConfig.socks5ProxyChain.length > 0))
) {
(async () => {
try {
const socks5Socket = await createSocks5Connection(
tunnelConfig.sourceIP,
tunnelConfig.sourceSSHPort,
{
useSocks5: tunnelConfig.useSocks5,
socks5Host: tunnelConfig.socks5Host,
socks5Port: tunnelConfig.socks5Port,
socks5Username: tunnelConfig.socks5Username,
socks5Password: tunnelConfig.socks5Password,
socks5ProxyChain: tunnelConfig.socks5ProxyChain,
},
);
if (socks5Socket) {
connOptions.sock = socks5Socket;
conn.connect(connOptions);
} else {
callback(new Error("Failed to create SOCKS5 connection"));
}
} catch (socks5Error) {
tunnelLogger.error(
"SOCKS5 connection failed for killing tunnel",
socks5Error,
{
operation: "socks5_connect_kill",
tunnelName,
proxyHost: tunnelConfig.socks5Host,
proxyPort: tunnelConfig.socks5Port || 1080,
},
);
callback(
new Error(
"SOCKS5 proxy connection failed: " +
(socks5Error instanceof Error
? socks5Error.message
: "Unknown error"),
),
);
}
})();
} else {
conn.connect(connOptions);
}
}
app.get("/ssh/tunnel/status", (req, res) => {
@@ -1266,103 +1465,291 @@ app.get("/ssh/tunnel/status/:tunnelName", (req, res) => {
res.json({ name: tunnelName, status });
});
app.post("/ssh/tunnel/connect", (req, res) => {
const tunnelConfig: TunnelConfig = req.body;
app.post(
"/ssh/tunnel/connect",
authenticateJWT,
async (req: AuthenticatedRequest, res: Response) => {
const tunnelConfig: TunnelConfig = req.body;
const userId = req.userId;
if (!tunnelConfig || !tunnelConfig.name) {
return res.status(400).json({ error: "Invalid tunnel configuration" });
}
if (!userId) {
return res.status(401).json({ error: "Authentication required" });
}
const tunnelName = tunnelConfig.name;
if (!tunnelConfig || !tunnelConfig.name) {
return res.status(400).json({ error: "Invalid tunnel configuration" });
}
cleanupTunnelResources(tunnelName);
const tunnelName = tunnelConfig.name;
manualDisconnects.delete(tunnelName);
retryCounters.delete(tunnelName);
retryExhaustedTunnels.delete(tunnelName);
try {
if (!validateTunnelConfig(tunnelName, tunnelConfig)) {
tunnelLogger.error(`Tunnel config validation failed`, {
operation: "tunnel_connect",
tunnelName,
configHostId: tunnelConfig.sourceHostId,
configTunnelIndex: tunnelConfig.tunnelIndex,
});
return res.status(400).json({
error: "Tunnel configuration does not match tunnel name",
});
}
tunnelConfigs.set(tunnelName, tunnelConfig);
if (tunnelConfig.sourceHostId) {
const accessInfo = await permissionManager.canAccessHost(
userId,
tunnelConfig.sourceHostId,
"read",
);
connectSSHTunnel(tunnelConfig, 0).catch((error) => {
tunnelLogger.error(
`Failed to connect tunnel ${tunnelConfig.name}: ${error instanceof Error ? error.message : "Unknown error"}`,
);
});
if (!accessInfo.hasAccess) {
tunnelLogger.warn("User attempted tunnel connect without access", {
operation: "tunnel_connect_unauthorized",
userId,
hostId: tunnelConfig.sourceHostId,
tunnelName,
});
return res.status(403).json({ error: "Access denied to this host" });
}
res.json({ message: "Connection request received", tunnelName });
});
if (accessInfo.isShared && !accessInfo.isOwner) {
tunnelConfig.requestingUserId = userId;
}
}
app.post("/ssh/tunnel/disconnect", (req, res) => {
const { tunnelName } = req.body;
if (pendingTunnelOperations.has(tunnelName)) {
try {
await pendingTunnelOperations.get(tunnelName);
} catch (error) {
tunnelLogger.warn(`Previous tunnel operation failed`, { tunnelName });
}
}
if (!tunnelName) {
return res.status(400).json({ error: "Tunnel name required" });
}
const operation = (async () => {
manualDisconnects.delete(tunnelName);
retryCounters.delete(tunnelName);
retryExhaustedTunnels.delete(tunnelName);
manualDisconnects.add(tunnelName);
retryCounters.delete(tunnelName);
retryExhaustedTunnels.delete(tunnelName);
await cleanupTunnelResources(tunnelName);
if (activeRetryTimers.has(tunnelName)) {
clearTimeout(activeRetryTimers.get(tunnelName)!);
activeRetryTimers.delete(tunnelName);
}
if (tunnelConfigs.has(tunnelName)) {
const existingConfig = tunnelConfigs.get(tunnelName);
if (
existingConfig &&
(existingConfig.sourceHostId !== tunnelConfig.sourceHostId ||
existingConfig.tunnelIndex !== tunnelConfig.tunnelIndex)
) {
throw new Error(`Tunnel name collision detected: ${tunnelName}`);
}
}
cleanupTunnelResources(tunnelName, true);
if (!tunnelConfig.endpointIP || !tunnelConfig.endpointUsername) {
try {
const systemCrypto = SystemCrypto.getInstance();
const internalAuthToken = await systemCrypto.getInternalAuthToken();
broadcastTunnelStatus(tunnelName, {
connected: false,
status: CONNECTION_STATES.DISCONNECTED,
manualDisconnect: true,
});
const allHostsResponse = await axios.get(
"http://localhost:30001/ssh/db/host/internal/all",
{
headers: {
"Content-Type": "application/json",
"X-Internal-Auth-Token": internalAuthToken,
},
},
);
const tunnelConfig = tunnelConfigs.get(tunnelName) || null;
handleDisconnect(tunnelName, tunnelConfig, false);
const allHosts: SSHHost[] = allHostsResponse.data || [];
const endpointHost = allHosts.find(
(h) =>
h.name === tunnelConfig.endpointHost ||
`${h.username}@${h.ip}` === tunnelConfig.endpointHost,
);
setTimeout(() => {
manualDisconnects.delete(tunnelName);
}, 5000);
if (!endpointHost) {
throw new Error(
`Endpoint host '${tunnelConfig.endpointHost}' not found in database`,
);
}
res.json({ message: "Disconnect request received", tunnelName });
});
tunnelConfig.endpointIP = endpointHost.ip;
tunnelConfig.endpointSSHPort = endpointHost.port;
tunnelConfig.endpointUsername = endpointHost.username;
tunnelConfig.endpointPassword = endpointHost.password;
tunnelConfig.endpointAuthMethod = endpointHost.authType;
tunnelConfig.endpointSSHKey = endpointHost.key;
tunnelConfig.endpointKeyPassword = endpointHost.keyPassword;
tunnelConfig.endpointKeyType = endpointHost.keyType;
tunnelConfig.endpointCredentialId = endpointHost.credentialId;
tunnelConfig.endpointUserId = endpointHost.userId;
} catch (resolveError) {
tunnelLogger.error(
"Failed to resolve endpoint host",
resolveError,
{
operation: "tunnel_connect_resolve_endpoint_failed",
tunnelName,
endpointHost: tunnelConfig.endpointHost,
},
);
throw new Error(
`Failed to resolve endpoint host: ${resolveError instanceof Error ? resolveError.message : "Unknown error"}`,
);
}
}
app.post("/ssh/tunnel/cancel", (req, res) => {
const { tunnelName } = req.body;
tunnelConfigs.set(tunnelName, tunnelConfig);
await connectSSHTunnel(tunnelConfig, 0);
})();
if (!tunnelName) {
return res.status(400).json({ error: "Tunnel name required" });
}
pendingTunnelOperations.set(tunnelName, operation);
retryCounters.delete(tunnelName);
retryExhaustedTunnels.delete(tunnelName);
res.json({ message: "Connection request received", tunnelName });
if (activeRetryTimers.has(tunnelName)) {
clearTimeout(activeRetryTimers.get(tunnelName)!);
activeRetryTimers.delete(tunnelName);
}
operation.finally(() => {
pendingTunnelOperations.delete(tunnelName);
});
} catch (error) {
tunnelLogger.error("Failed to process tunnel connect", error, {
operation: "tunnel_connect",
tunnelName,
userId,
});
res.status(500).json({ error: "Failed to connect tunnel" });
}
},
);
if (countdownIntervals.has(tunnelName)) {
clearInterval(countdownIntervals.get(tunnelName)!);
countdownIntervals.delete(tunnelName);
}
app.post(
"/ssh/tunnel/disconnect",
authenticateJWT,
async (req: AuthenticatedRequest, res: Response) => {
const { tunnelName } = req.body;
const userId = req.userId;
cleanupTunnelResources(tunnelName, true);
if (!userId) {
return res.status(401).json({ error: "Authentication required" });
}
broadcastTunnelStatus(tunnelName, {
connected: false,
status: CONNECTION_STATES.DISCONNECTED,
manualDisconnect: true,
});
if (!tunnelName) {
return res.status(400).json({ error: "Tunnel name required" });
}
const tunnelConfig = tunnelConfigs.get(tunnelName) || null;
handleDisconnect(tunnelName, tunnelConfig, false);
try {
const config = tunnelConfigs.get(tunnelName);
if (config && config.sourceHostId) {
const accessInfo = await permissionManager.canAccessHost(
userId,
config.sourceHostId,
"read",
);
if (!accessInfo.hasAccess) {
return res.status(403).json({ error: "Access denied" });
}
}
setTimeout(() => {
manualDisconnects.delete(tunnelName);
}, 5000);
manualDisconnects.add(tunnelName);
retryCounters.delete(tunnelName);
retryExhaustedTunnels.delete(tunnelName);
res.json({ message: "Cancel request received", tunnelName });
});
if (activeRetryTimers.has(tunnelName)) {
clearTimeout(activeRetryTimers.get(tunnelName)!);
activeRetryTimers.delete(tunnelName);
}
await cleanupTunnelResources(tunnelName, true);
broadcastTunnelStatus(tunnelName, {
connected: false,
status: CONNECTION_STATES.DISCONNECTED,
manualDisconnect: true,
});
const tunnelConfig = tunnelConfigs.get(tunnelName) || null;
handleDisconnect(tunnelName, tunnelConfig, false);
setTimeout(() => {
manualDisconnects.delete(tunnelName);
}, 5000);
res.json({ message: "Disconnect request received", tunnelName });
} catch (error) {
tunnelLogger.error("Failed to disconnect tunnel", error, {
operation: "tunnel_disconnect",
tunnelName,
userId,
});
res.status(500).json({ error: "Failed to disconnect tunnel" });
}
},
);
app.post(
"/ssh/tunnel/cancel",
authenticateJWT,
async (req: AuthenticatedRequest, res: Response) => {
const { tunnelName } = req.body;
const userId = req.userId;
if (!userId) {
return res.status(401).json({ error: "Authentication required" });
}
if (!tunnelName) {
return res.status(400).json({ error: "Tunnel name required" });
}
try {
const config = tunnelConfigs.get(tunnelName);
if (config && config.sourceHostId) {
const accessInfo = await permissionManager.canAccessHost(
userId,
config.sourceHostId,
"read",
);
if (!accessInfo.hasAccess) {
return res.status(403).json({ error: "Access denied" });
}
}
retryCounters.delete(tunnelName);
retryExhaustedTunnels.delete(tunnelName);
if (activeRetryTimers.has(tunnelName)) {
clearTimeout(activeRetryTimers.get(tunnelName)!);
activeRetryTimers.delete(tunnelName);
}
if (countdownIntervals.has(tunnelName)) {
clearInterval(countdownIntervals.get(tunnelName)!);
countdownIntervals.delete(tunnelName);
}
await cleanupTunnelResources(tunnelName, true);
broadcastTunnelStatus(tunnelName, {
connected: false,
status: CONNECTION_STATES.DISCONNECTED,
manualDisconnect: true,
});
const tunnelConfig = tunnelConfigs.get(tunnelName) || null;
handleDisconnect(tunnelName, tunnelConfig, false);
setTimeout(() => {
manualDisconnects.delete(tunnelName);
}, 5000);
res.json({ message: "Cancel request received", tunnelName });
} catch (error) {
tunnelLogger.error("Failed to cancel tunnel retry", error, {
operation: "tunnel_cancel",
tunnelName,
userId,
});
res.status(500).json({ error: "Failed to cancel tunnel retry" });
}
},
);
async function initializeAutoStartTunnels(): Promise<void> {
try {
@@ -1408,12 +1795,19 @@ async function initializeAutoStartTunnels(): Promise<void> {
);
if (endpointHost) {
const tunnelIndex =
host.tunnelConnections.indexOf(tunnelConnection);
const tunnelConfig: TunnelConfig = {
name: `${host.name || `${host.username}@${host.ip}`}_${
tunnelConnection.sourcePort
}_${tunnelConnection.endpointHost}_${
tunnelConnection.endpointPort
}`,
name: normalizeTunnelName(
host.id,
tunnelIndex,
host.name || `${host.username}@${host.ip}`,
tunnelConnection.sourcePort,
tunnelConnection.endpointHost,
tunnelConnection.endpointPort,
),
sourceHostId: host.id,
tunnelIndex: tunnelIndex,
hostName: host.name || `${host.username}@${host.ip}`,
sourceIP: host.ip,
sourceSSHPort: host.port,
@@ -1429,6 +1823,7 @@ async function initializeAutoStartTunnels(): Promise<void> {
endpointIP: endpointHost.ip,
endpointSSHPort: endpointHost.port,
endpointUsername: endpointHost.username,
endpointHost: tunnelConnection.endpointHost,
endpointPassword:
tunnelConnection.endpointPassword ||
endpointHost.autostartPassword ||
@@ -1453,6 +1848,11 @@ async function initializeAutoStartTunnels(): Promise<void> {
retryInterval: tunnelConnection.retryInterval * 1000,
autoStart: tunnelConnection.autoStart,
isPinned: host.pin,
useSocks5: host.useSocks5,
socks5Host: host.socks5Host,
socks5Port: host.socks5Port,
socks5Username: host.socks5Username,
socks5Password: host.socks5Password,
};
autoStartTunnels.push(tunnelConfig);

View File

@@ -3,28 +3,87 @@ import type { Client } from "ssh2";
export function execCommand(
client: Client,
command: string,
timeoutMs = 30000,
): Promise<{
stdout: string;
stderr: string;
code: number | null;
}> {
return new Promise((resolve, reject) => {
client.exec(command, { pty: false }, (err, stream) => {
if (err) return reject(err);
let settled = false;
let stream: any = null;
const timeout = setTimeout(() => {
if (!settled) {
settled = true;
cleanup();
reject(new Error(`Command timeout after ${timeoutMs}ms: ${command}`));
}
}, timeoutMs);
const cleanup = () => {
clearTimeout(timeout);
if (stream) {
try {
stream.removeAllListeners();
if (stream.stderr) {
stream.stderr.removeAllListeners();
}
stream.destroy();
} catch (error) {
// Ignore cleanup errors
}
}
};
client.exec(command, { pty: false }, (err, _stream) => {
if (err) {
if (!settled) {
settled = true;
cleanup();
reject(err);
}
return;
}
stream = _stream;
let stdout = "";
let stderr = "";
let exitCode: number | null = null;
stream
.on("close", (code: number | undefined) => {
exitCode = typeof code === "number" ? code : null;
resolve({ stdout, stderr, code: exitCode });
if (!settled) {
settled = true;
exitCode = typeof code === "number" ? code : null;
cleanup();
resolve({ stdout, stderr, code: exitCode });
}
})
.on("data", (data: Buffer) => {
stdout += data.toString("utf8");
})
.stderr.on("data", (data: Buffer) => {
stderr += data.toString("utf8");
.on("error", (streamErr: Error) => {
if (!settled) {
settled = true;
cleanup();
reject(streamErr);
}
});
if (stream.stderr) {
stream.stderr
.on("data", (data: Buffer) => {
stderr += data.toString("utf8");
})
.on("error", (stderrErr: Error) => {
if (!settled) {
settled = true;
cleanup();
reject(stderrErr);
}
});
}
});
});
}

View File

@@ -26,12 +26,20 @@ export async function collectCpuMetrics(client: Client): Promise<{
let loadTriplet: [number, number, number] | null = null;
try {
const [stat1, loadAvgOut, coresOut] = await Promise.all([
execCommand(client, "cat /proc/stat"),
execCommand(client, "cat /proc/loadavg"),
execCommand(
client,
"nproc 2>/dev/null || grep -c ^processor /proc/cpuinfo",
const [stat1, loadAvgOut, coresOut] = await Promise.race([
Promise.all([
execCommand(client, "cat /proc/stat"),
execCommand(client, "cat /proc/loadavg"),
execCommand(
client,
"nproc 2>/dev/null || grep -c ^processor /proc/cpuinfo",
),
]),
new Promise<never>((_, reject) =>
setTimeout(
() => reject(new Error("CPU metrics collection timeout")),
25000,
),
),
]);

View File

@@ -1,5 +1,6 @@
import type { Client } from "ssh2";
import { execCommand } from "./common-utils.js";
import { statsLogger } from "../../utils/logger.js";
export interface LoginRecord {
user: string;
@@ -46,10 +47,20 @@ export async function collectLoginStats(client: Client): Promise<LoginStats> {
const timeStr = parts.slice(timeStart, timeStart + 5).join(" ");
if (user && user !== "wtmp" && tty !== "system") {
let parsedTime: string;
try {
const date = new Date(timeStr);
parsedTime = isNaN(date.getTime())
? new Date().toISOString()
: date.toISOString();
} catch (e) {
parsedTime = new Date().toISOString();
}
recentLogins.push({
user,
ip,
time: new Date(timeStr).toISOString(),
time: parsedTime,
status: "success",
});
if (ip !== "local") {
@@ -59,9 +70,7 @@ export async function collectLoginStats(client: Client): Promise<LoginStats> {
}
}
}
} catch (e) {
// Ignore errors
}
} catch (e) {}
try {
const failedOut = await execCommand(
@@ -96,12 +105,20 @@ export async function collectLoginStats(client: Client): Promise<LoginStats> {
}
if (user && ip) {
let parsedTime: string;
try {
const date = timeStr ? new Date(timeStr) : new Date();
parsedTime = isNaN(date.getTime())
? new Date().toISOString()
: date.toISOString();
} catch (e) {
parsedTime = new Date().toISOString();
}
failedLogins.push({
user,
ip,
time: timeStr
? new Date(timeStr).toISOString()
: new Date().toISOString(),
time: parsedTime,
status: "failed",
});
if (ip !== "unknown") {
@@ -109,9 +126,7 @@ export async function collectLoginStats(client: Client): Promise<LoginStats> {
}
}
}
} catch (e) {
// Ignore errors
}
} catch (e) {}
return {
recentLogins: recentLogins.slice(0, 10),

View File

@@ -68,12 +68,7 @@ export async function collectNetworkMetrics(client: Client): Promise<{
txBytes: null,
});
}
} catch (e) {
statsLogger.debug("Failed to collect network interface stats", {
operation: "network_stats_failed",
error: e instanceof Error ? e.message : String(e),
});
}
} catch (e) {}
return { interfaces };
}

View File

@@ -33,11 +33,13 @@ export async function collectProcessesMetrics(client: Client): Promise<{
for (let i = 1; i < Math.min(psLines.length, 11); i++) {
const parts = psLines[i].split(/\s+/);
if (parts.length >= 11) {
const cpuVal = Number(parts[2]);
const memVal = Number(parts[3]);
topProcesses.push({
pid: parts[1],
user: parts[0],
cpu: parts[2],
mem: parts[3],
cpu: Number.isFinite(cpuVal) ? cpuVal.toString() : "0",
mem: Number.isFinite(memVal) ? memVal.toString() : "0",
command: parts.slice(10).join(" ").substring(0, 50),
});
}
@@ -46,14 +48,13 @@ export async function collectProcessesMetrics(client: Client): Promise<{
const procCount = await execCommand(client, "ps aux | wc -l");
const runningCount = await execCommand(client, "ps aux | grep -c ' R '");
totalProcesses = Number(procCount.stdout.trim()) - 1;
runningProcesses = Number(runningCount.stdout.trim());
} catch (e) {
statsLogger.debug("Failed to collect process stats", {
operation: "process_stats_failed",
error: e instanceof Error ? e.message : String(e),
});
}
const totalCount = Number(procCount.stdout.trim()) - 1;
totalProcesses = Number.isFinite(totalCount) ? totalCount : null;
const runningCount2 = Number(runningCount.stdout.trim());
runningProcesses = Number.isFinite(runningCount2) ? runningCount2 : null;
} catch (e) {}
return {
total: totalProcesses,

View File

@@ -23,10 +23,7 @@ export async function collectSystemMetrics(client: Client): Promise<{
kernel = kernelOut.stdout.trim() || null;
os = osOut.stdout.trim() || null;
} catch (e) {
statsLogger.debug("Failed to collect system info", {
operation: "system_info_failed",
error: e instanceof Error ? e.message : String(e),
});
// No error log
}
return {

View File

@@ -21,12 +21,7 @@ export async function collectUptimeMetrics(client: Client): Promise<{
uptimeFormatted = `${days}d ${hours}h ${minutes}m`;
}
}
} catch (e) {
statsLogger.debug("Failed to collect uptime", {
operation: "uptime_failed",
error: e instanceof Error ? e.message : String(e),
});
}
} catch (e) {}
return {
seconds: uptimeSeconds,