diff --git a/tauri/package-lock.json b/tauri/package-lock.json
new file mode 100644
index 00000000..30888778
--- /dev/null
+++ b/tauri/package-lock.json
@@ -0,0 +1,233 @@
+{
+ "name": "vibetunnel-tauri",
+ "version": "1.0.0",
+ "lockfileVersion": 3,
+ "requires": true,
+ "packages": {
+ "": {
+ "name": "vibetunnel-tauri",
+ "version": "1.0.0",
+ "license": "MIT",
+ "devDependencies": {
+ "@tauri-apps/cli": "^2.0.0-rc.18"
+ }
+ },
+ "node_modules/@tauri-apps/cli": {
+ "version": "2.5.0",
+ "resolved": "https://registry.npmjs.org/@tauri-apps/cli/-/cli-2.5.0.tgz",
+ "integrity": "sha512-rAtHqG0Gh/IWLjN2zTf3nZqYqbo81oMbqop56rGTjrlWk9pTTAjkqOjSL9XQLIMZ3RbeVjveCqqCA0s8RnLdMg==",
+ "dev": true,
+ "license": "Apache-2.0 OR MIT",
+ "bin": {
+ "tauri": "tauri.js"
+ },
+ "engines": {
+ "node": ">= 10"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/tauri"
+ },
+ "optionalDependencies": {
+ "@tauri-apps/cli-darwin-arm64": "2.5.0",
+ "@tauri-apps/cli-darwin-x64": "2.5.0",
+ "@tauri-apps/cli-linux-arm-gnueabihf": "2.5.0",
+ "@tauri-apps/cli-linux-arm64-gnu": "2.5.0",
+ "@tauri-apps/cli-linux-arm64-musl": "2.5.0",
+ "@tauri-apps/cli-linux-riscv64-gnu": "2.5.0",
+ "@tauri-apps/cli-linux-x64-gnu": "2.5.0",
+ "@tauri-apps/cli-linux-x64-musl": "2.5.0",
+ "@tauri-apps/cli-win32-arm64-msvc": "2.5.0",
+ "@tauri-apps/cli-win32-ia32-msvc": "2.5.0",
+ "@tauri-apps/cli-win32-x64-msvc": "2.5.0"
+ }
+ },
+ "node_modules/@tauri-apps/cli-darwin-arm64": {
+ "version": "2.5.0",
+ "resolved": "https://registry.npmjs.org/@tauri-apps/cli-darwin-arm64/-/cli-darwin-arm64-2.5.0.tgz",
+ "integrity": "sha512-VuVAeTFq86dfpoBDNYAdtQVLbP0+2EKCHIIhkaxjeoPARR0sLpFHz2zs0PcFU76e+KAaxtEtAJAXGNUc8E1PzQ==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "license": "Apache-2.0 OR MIT",
+ "optional": true,
+ "os": [
+ "darwin"
+ ],
+ "engines": {
+ "node": ">= 10"
+ }
+ },
+ "node_modules/@tauri-apps/cli-darwin-x64": {
+ "version": "2.5.0",
+ "resolved": "https://registry.npmjs.org/@tauri-apps/cli-darwin-x64/-/cli-darwin-x64-2.5.0.tgz",
+ "integrity": "sha512-hUF01sC06cZVa8+I0/VtsHOk9BbO75rd+YdtHJ48xTdcYaQ5QIwL4yZz9OR1AKBTaUYhBam8UX9Pvd5V2/4Dpw==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "license": "Apache-2.0 OR MIT",
+ "optional": true,
+ "os": [
+ "darwin"
+ ],
+ "engines": {
+ "node": ">= 10"
+ }
+ },
+ "node_modules/@tauri-apps/cli-linux-arm-gnueabihf": {
+ "version": "2.5.0",
+ "resolved": "https://registry.npmjs.org/@tauri-apps/cli-linux-arm-gnueabihf/-/cli-linux-arm-gnueabihf-2.5.0.tgz",
+ "integrity": "sha512-LQKqttsK252LlqYyX8R02MinUsfFcy3+NZiJwHFgi5Y3+ZUIAED9cSxJkyNtuY5KMnR4RlpgWyLv4P6akN1xhg==",
+ "cpu": [
+ "arm"
+ ],
+ "dev": true,
+ "license": "Apache-2.0 OR MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">= 10"
+ }
+ },
+ "node_modules/@tauri-apps/cli-linux-arm64-gnu": {
+ "version": "2.5.0",
+ "resolved": "https://registry.npmjs.org/@tauri-apps/cli-linux-arm64-gnu/-/cli-linux-arm64-gnu-2.5.0.tgz",
+ "integrity": "sha512-mTQufsPcpdHg5RW0zypazMo4L55EfeE5snTzrPqbLX4yCK2qalN7+rnP8O8GT06xhp6ElSP/Ku1M2MR297SByQ==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "license": "Apache-2.0 OR MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">= 10"
+ }
+ },
+ "node_modules/@tauri-apps/cli-linux-arm64-musl": {
+ "version": "2.5.0",
+ "resolved": "https://registry.npmjs.org/@tauri-apps/cli-linux-arm64-musl/-/cli-linux-arm64-musl-2.5.0.tgz",
+ "integrity": "sha512-rQO1HhRUQqyEaal5dUVOQruTRda/TD36s9kv1hTxZiFuSq3558lsTjAcUEnMAtBcBkps20sbyTJNMT0AwYIk8Q==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "license": "Apache-2.0 OR MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">= 10"
+ }
+ },
+ "node_modules/@tauri-apps/cli-linux-riscv64-gnu": {
+ "version": "2.5.0",
+ "resolved": "https://registry.npmjs.org/@tauri-apps/cli-linux-riscv64-gnu/-/cli-linux-riscv64-gnu-2.5.0.tgz",
+ "integrity": "sha512-7oS18FN46yDxyw1zX/AxhLAd7T3GrLj3Ai6s8hZKd9qFVzrAn36ESL7d3G05s8wEtsJf26qjXnVF4qleS3dYsA==",
+ "cpu": [
+ "riscv64"
+ ],
+ "dev": true,
+ "license": "Apache-2.0 OR MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">= 10"
+ }
+ },
+ "node_modules/@tauri-apps/cli-linux-x64-gnu": {
+ "version": "2.5.0",
+ "resolved": "https://registry.npmjs.org/@tauri-apps/cli-linux-x64-gnu/-/cli-linux-x64-gnu-2.5.0.tgz",
+ "integrity": "sha512-SG5sFNL7VMmDBdIg3nO3EzNRT306HsiEQ0N90ILe3ZABYAVoPDO/ttpCO37ApLInTzrq/DLN+gOlC/mgZvLw1w==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "license": "Apache-2.0 OR MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">= 10"
+ }
+ },
+ "node_modules/@tauri-apps/cli-linux-x64-musl": {
+ "version": "2.5.0",
+ "resolved": "https://registry.npmjs.org/@tauri-apps/cli-linux-x64-musl/-/cli-linux-x64-musl-2.5.0.tgz",
+ "integrity": "sha512-QXDM8zp/6v05PNWju5ELsVwF0VH1n6b5pk2E6W/jFbbiwz80Vs1lACl9pv5kEHkrxBj+aWU/03JzGuIj2g3SkQ==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "license": "Apache-2.0 OR MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">= 10"
+ }
+ },
+ "node_modules/@tauri-apps/cli-win32-arm64-msvc": {
+ "version": "2.5.0",
+ "resolved": "https://registry.npmjs.org/@tauri-apps/cli-win32-arm64-msvc/-/cli-win32-arm64-msvc-2.5.0.tgz",
+ "integrity": "sha512-pFSHFK6b+o9y4Un8w0gGLwVyFTZaC3P0kQ7umRt/BLDkzD5RnQ4vBM7CF8BCU5nkwmEBUCZd7Wt3TWZxe41o6Q==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "license": "Apache-2.0 OR MIT",
+ "optional": true,
+ "os": [
+ "win32"
+ ],
+ "engines": {
+ "node": ">= 10"
+ }
+ },
+ "node_modules/@tauri-apps/cli-win32-ia32-msvc": {
+ "version": "2.5.0",
+ "resolved": "https://registry.npmjs.org/@tauri-apps/cli-win32-ia32-msvc/-/cli-win32-ia32-msvc-2.5.0.tgz",
+ "integrity": "sha512-EArv1IaRlogdLAQyGlKmEqZqm5RfHCUMhJoedWu7GtdbOMUfSAz6FMX2boE1PtEmNO4An+g188flLeVErrxEKg==",
+ "cpu": [
+ "ia32"
+ ],
+ "dev": true,
+ "license": "Apache-2.0 OR MIT",
+ "optional": true,
+ "os": [
+ "win32"
+ ],
+ "engines": {
+ "node": ">= 10"
+ }
+ },
+ "node_modules/@tauri-apps/cli-win32-x64-msvc": {
+ "version": "2.5.0",
+ "resolved": "https://registry.npmjs.org/@tauri-apps/cli-win32-x64-msvc/-/cli-win32-x64-msvc-2.5.0.tgz",
+ "integrity": "sha512-lj43EFYbnAta8pd9JnUq87o+xRUR0odz+4rixBtTUwUgdRdwQ2V9CzFtsMu6FQKpFQ6mujRK6P1IEwhL6ADRsQ==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "license": "Apache-2.0 OR MIT",
+ "optional": true,
+ "os": [
+ "win32"
+ ],
+ "engines": {
+ "node": ">= 10"
+ }
+ }
+ }
+}
diff --git a/tauri/public/server-console.html b/tauri/public/server-console.html
new file mode 100644
index 00000000..6ee0dd06
--- /dev/null
+++ b/tauri/public/server-console.html
@@ -0,0 +1,634 @@
+
+
+
+
+
+ Server Console - VibeTunnel
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
Connecting to server...
+
+
+
+
No logs yet
+
Server logs will appear here when activity occurs
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/tauri/public/settings.html b/tauri/public/settings.html
new file mode 100644
index 00000000..806c85dc
--- /dev/null
+++ b/tauri/public/settings.html
@@ -0,0 +1,1286 @@
+
+
+
+
+
+ VibeTunnel Settings
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
Updates
+
+
+
+
+
+
+
+
+
System Permissions
+
+ VibeTunnel requires certain permissions to function properly. Grant these permissions to enable all features.
+
+
+
+
+
+
+
+
+
+
+
Dashboard Security
+
+
+
+
+
+
+
+
+
Server Configuration
+
+
+
+
+
+
+
Remote Access
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
Terminal Settings
+
+
+
+
+
+
CLI Tool
+
+ The vt command lets you quickly create terminal sessions from your existing terminal.
+
+
+
+
+
+
Session Management
+
+
+
+
+
+
+
+
+
+
+
Server Status
+
+
Server: Running
+
Port: 4020
+
Mode: Rust
+
Sessions: 0
+
+
+
+
+
+
+
+
+
API Testing
+
+
+
+
+
+
+
+
+
+
+
+
+
+
Server Console
+
+
+
+
+
+
Server started on port 4020
+
Health check: OK
+
+
+
+
+
+
Developer Tools
+
+
+
+
+
+
+
+
+
+
+
+

+
VibeTunnel
+
Version 1.0.0
+
+
+
+
+
+
+
+
+ © 2024 VibeTunnel. All rights reserved.
+ Built with ❤️ for developers
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/tauri/src-tauri/Cargo.toml b/tauri/src-tauri/Cargo.toml
index 5955fee2..6308cd85 100644
--- a/tauri/src-tauri/Cargo.toml
+++ b/tauri/src-tauri/Cargo.toml
@@ -82,13 +82,20 @@ reqwest = { version = "0.12", features = ["json"] }
base64 = "0.22"
sha2 = "0.10"
+# Debug features
+num_cpus = "1"
+
+# Network utilities
+[target.'cfg(unix)'.dependencies]
+nix = { version = "0.27", features = ["net"] }
+
+[target.'cfg(windows)'.dependencies]
+ipconfig = "0.3"
+windows = { version = "0.58", features = ["Win32_Foundation", "Win32_Security", "Win32_System_Threading", "Win32_UI_WindowsAndMessaging"] }
+
[target.'cfg(not(any(target_os = "android", target_os = "ios")))'.dependencies]
tauri-plugin-single-instance = "2.0.1"
-# Platform-specific dependencies
-[target.'cfg(windows)'.dependencies]
-windows = { version = "0.58", features = ["Win32_Foundation", "Win32_Security", "Win32_System_Threading", "Win32_UI_WindowsAndMessaging"] }
-
[profile.release]
panic = "abort"
codegen-units = 1
diff --git a/tauri/src-tauri/public/icon.png b/tauri/src-tauri/public/icon.png
new file mode 100644
index 00000000..1a5ee251
Binary files /dev/null and b/tauri/src-tauri/public/icon.png differ
diff --git a/tauri/src-tauri/public/server-console.html b/tauri/src-tauri/public/server-console.html
new file mode 100644
index 00000000..6ee0dd06
--- /dev/null
+++ b/tauri/src-tauri/public/server-console.html
@@ -0,0 +1,634 @@
+
+
+
+
+
+ Server Console - VibeTunnel
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
Connecting to server...
+
+
+
+
No logs yet
+
Server logs will appear here when activity occurs
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/tauri/src-tauri/public/settings.html b/tauri/src-tauri/public/settings.html
new file mode 100644
index 00000000..806c85dc
--- /dev/null
+++ b/tauri/src-tauri/public/settings.html
@@ -0,0 +1,1286 @@
+
+
+
+
+
+ VibeTunnel Settings
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
Updates
+
+
+
+
+
+
+
+
+
System Permissions
+
+ VibeTunnel requires certain permissions to function properly. Grant these permissions to enable all features.
+
+
+
+
+
+
+
+
+
+
+
Dashboard Security
+
+
+
+
+
+
+
+
+
Server Configuration
+
+
+
+
+
+
+
Remote Access
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
Terminal Settings
+
+
+
+
+
+
CLI Tool
+
+ The vt command lets you quickly create terminal sessions from your existing terminal.
+
+
+
+
+
+
Session Management
+
+
+
+
+
+
+
+
+
+
+
Server Status
+
+
Server: Running
+
Port: 4020
+
Mode: Rust
+
Sessions: 0
+
+
+
+
+
+
+
+
+
API Testing
+
+
+
+
+
+
+
+
+
+
+
+
+
+
Server Console
+
+
+
+
+
+
Server started on port 4020
+
Health check: OK
+
+
+
+
+
+
Developer Tools
+
+
+
+
+
+
+
+
+
+
+
+

+
VibeTunnel
+
Version 1.0.0
+
+
+
+
+
+
+
+
+ © 2024 VibeTunnel. All rights reserved.
+ Built with ❤️ for developers
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/tauri/src-tauri/public/welcome.html b/tauri/src-tauri/public/welcome.html
new file mode 100644
index 00000000..9a3ec713
--- /dev/null
+++ b/tauri/src-tauri/public/welcome.html
@@ -0,0 +1,417 @@
+
+
+
+
+
+ Welcome to VibeTunnel
+
+
+
+
+
+
+
+

+
+
Welcome to VibeTunnel
+
Turn any browser into your terminal. Command your agents on the go.
+
+ You'll be quickly guided through the basics of VibeTunnel.
+ This screen can always be opened from the settings.
+
+
+
+
+
+
+

+
+
What VibeTunnel Does
+
A secure terminal server that runs on your machine
+
+
+
+
Access your terminal from any web browser
+
+
+
+
Create multiple isolated terminal sessions
+
+
+
+
Secure with password protection
+
+
+
+
Works with ngrok or Tailscale for remote access
+
+
+
+
+
+
+
+

+
+
Accessing Your Dashboard
+
+ To access your terminals from any device, create a tunnel from your device.
+ This can be done via ngrok in settings or Tailscale (recommended).
+
+
+
+
+
+
+
+
+
+
+

+
+
You're All Set!
+
VibeTunnel is now running in your system tray
+
+ Click the VibeTunnel icon in your system tray to access settings,
+ open the dashboard, or manage your terminal sessions.
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/tauri/src-tauri/src/api_testing.rs b/tauri/src-tauri/src/api_testing.rs
new file mode 100644
index 00000000..d221e997
--- /dev/null
+++ b/tauri/src-tauri/src/api_testing.rs
@@ -0,0 +1,648 @@
+use serde::{Serialize, Deserialize};
+use std::sync::Arc;
+use tokio::sync::RwLock;
+use std::collections::HashMap;
+use chrono::{DateTime, Utc};
+use reqwest::Client;
+use std::time::Duration;
+
+/// API test method
+#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Eq)]
+pub enum HttpMethod {
+ GET,
+ POST,
+ PUT,
+ PATCH,
+ DELETE,
+ HEAD,
+ OPTIONS,
+}
+
+impl HttpMethod {
+ pub fn as_str(&self) -> &str {
+ match self {
+ HttpMethod::GET => "GET",
+ HttpMethod::POST => "POST",
+ HttpMethod::PUT => "PUT",
+ HttpMethod::PATCH => "PATCH",
+ HttpMethod::DELETE => "DELETE",
+ HttpMethod::HEAD => "HEAD",
+ HttpMethod::OPTIONS => "OPTIONS",
+ }
+ }
+}
+
+/// API test assertion type
+#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
+pub enum AssertionType {
+ StatusCode(u16),
+ StatusRange { min: u16, max: u16 },
+ ResponseTime { max_ms: u64 },
+ HeaderExists(String),
+ HeaderEquals { key: String, value: String },
+ JsonPath { path: String, expected: serde_json::Value },
+ BodyContains(String),
+ BodyMatches(String), // Regex
+ ContentType(String),
+}
+
+/// API test case
+#[derive(Debug, Clone, Serialize, Deserialize)]
+pub struct APITest {
+ pub id: String,
+ pub name: String,
+ pub description: Option,
+ pub group: Option,
+ pub endpoint_url: String,
+ pub method: HttpMethod,
+ pub headers: HashMap,
+ pub query_params: HashMap,
+ pub body: Option,
+ pub auth: Option,
+ pub assertions: Vec,
+ pub timeout_ms: u64,
+ pub retry_count: u32,
+ pub delay_ms: Option,
+ pub save_response: bool,
+}
+
+/// API test body
+#[derive(Debug, Clone, Serialize, Deserialize)]
+pub enum APITestBody {
+ Json(serde_json::Value),
+ Form(HashMap),
+ Text(String),
+ Binary(Vec),
+}
+
+/// API test authentication
+#[derive(Debug, Clone, Serialize, Deserialize)]
+pub enum APITestAuth {
+ Basic { username: String, password: String },
+ Bearer(String),
+ ApiKey { key: String, value: String, in_header: bool },
+ Custom(HashMap),
+}
+
+/// API test result
+#[derive(Debug, Clone, Serialize, Deserialize)]
+pub struct APITestResult {
+ pub test_id: String,
+ pub test_name: String,
+ pub success: bool,
+ pub timestamp: DateTime,
+ pub duration_ms: u64,
+ pub status_code: Option,
+ pub response_headers: HashMap,
+ pub response_body: Option,
+ pub assertion_results: Vec,
+ pub error: Option,
+ pub retries_used: u32,
+}
+
+/// Assertion result
+#[derive(Debug, Clone, Serialize, Deserialize)]
+pub struct AssertionResult {
+ pub assertion: AssertionType,
+ pub passed: bool,
+ pub actual_value: Option,
+ pub error_message: Option,
+}
+
+/// API test suite
+#[derive(Debug, Clone, Serialize, Deserialize)]
+pub struct APITestSuite {
+ pub id: String,
+ pub name: String,
+ pub description: Option,
+ pub base_url: Option,
+ pub default_headers: HashMap,
+ pub default_auth: Option,
+ pub tests: Vec,
+ pub setup_tests: Vec,
+ pub teardown_tests: Vec,
+ pub variables: HashMap,
+}
+
+/// API test collection
+#[derive(Debug, Clone, Serialize, Deserialize)]
+pub struct APITestCollection {
+ pub id: String,
+ pub name: String,
+ pub suites: Vec,
+ pub global_variables: HashMap,
+}
+
+/// API test runner configuration
+#[derive(Debug, Clone, Serialize, Deserialize)]
+pub struct APITestRunnerConfig {
+ pub parallel_execution: bool,
+ pub max_parallel_tests: usize,
+ pub stop_on_failure: bool,
+ pub capture_responses: bool,
+ pub follow_redirects: bool,
+ pub verify_ssl: bool,
+ pub proxy: Option,
+ pub environment_variables: HashMap,
+}
+
+impl Default for APITestRunnerConfig {
+ fn default() -> Self {
+ Self {
+ parallel_execution: false,
+ max_parallel_tests: 5,
+ stop_on_failure: false,
+ capture_responses: true,
+ follow_redirects: true,
+ verify_ssl: true,
+ proxy: None,
+ environment_variables: HashMap::new(),
+ }
+ }
+}
+
+/// API test history entry
+#[derive(Debug, Clone, Serialize, Deserialize)]
+pub struct APITestHistoryEntry {
+ pub run_id: String,
+ pub timestamp: DateTime,
+ pub suite_name: String,
+ pub total_tests: usize,
+ pub passed_tests: usize,
+ pub failed_tests: usize,
+ pub total_duration_ms: u64,
+ pub results: Vec,
+}
+
+/// API testing manager
+pub struct APITestingManager {
+ client: Arc,
+ config: Arc>,
+ test_suites: Arc>>,
+ test_history: Arc>>,
+ running_tests: Arc>>,
+ shared_variables: Arc>>,
+ notification_manager: Option>,
+}
+
+impl APITestingManager {
+ /// Create a new API testing manager
+ pub fn new() -> Self {
+ let client = Client::builder()
+ .timeout(Duration::from_secs(30))
+ .build()
+ .unwrap();
+
+ Self {
+ client: Arc::new(client),
+ config: Arc::new(RwLock::new(APITestRunnerConfig::default())),
+ test_suites: Arc::new(RwLock::new(HashMap::new())),
+ test_history: Arc::new(RwLock::new(Vec::new())),
+ running_tests: Arc::new(RwLock::new(HashMap::new())),
+ shared_variables: Arc::new(RwLock::new(HashMap::new())),
+ notification_manager: None,
+ }
+ }
+
+ /// Set the notification manager
+ pub fn set_notification_manager(&mut self, notification_manager: Arc) {
+ self.notification_manager = Some(notification_manager);
+ }
+
+ /// Get configuration
+ pub async fn get_config(&self) -> APITestRunnerConfig {
+ self.config.read().await.clone()
+ }
+
+ /// Update configuration
+ pub async fn update_config(&self, config: APITestRunnerConfig) {
+ *self.config.write().await = config;
+ }
+
+ /// Add test suite
+ pub async fn add_test_suite(&self, suite: APITestSuite) {
+ self.test_suites.write().await.insert(suite.id.clone(), suite);
+ }
+
+ /// Get test suite
+ pub async fn get_test_suite(&self, suite_id: &str) -> Option {
+ self.test_suites.read().await.get(suite_id).cloned()
+ }
+
+ /// List test suites
+ pub async fn list_test_suites(&self) -> Vec {
+ self.test_suites.read().await.values().cloned().collect()
+ }
+
+ /// Run single test
+ pub async fn run_test(&self, test: &APITest, variables: &HashMap) -> APITestResult {
+ let start_time = std::time::Instant::now();
+ let mut result = APITestResult {
+ test_id: test.id.clone(),
+ test_name: test.name.clone(),
+ success: false,
+ timestamp: Utc::now(),
+ duration_ms: 0,
+ status_code: None,
+ response_headers: HashMap::new(),
+ response_body: None,
+ assertion_results: Vec::new(),
+ error: None,
+ retries_used: 0,
+ };
+
+ // Replace variables in URL
+ let url = self.replace_variables(&test.endpoint_url, variables);
+
+ // Run test with retries
+ let mut last_error = None;
+ for retry in 0..=test.retry_count {
+ if retry > 0 {
+ // Delay between retries
+ if let Some(delay) = test.delay_ms {
+ tokio::time::sleep(Duration::from_millis(delay)).await;
+ }
+ }
+
+ match self.execute_request(&test, &url, variables).await {
+ Ok((status, headers, body)) => {
+ result.status_code = Some(status);
+ result.response_headers = headers;
+ if test.save_response {
+ result.response_body = Some(body.clone());
+ }
+ result.retries_used = retry;
+
+ // Run assertions
+ result.assertion_results = self.run_assertions(&test.assertions, status, &result.response_headers, &body).await;
+ result.success = result.assertion_results.iter().all(|a| a.passed);
+
+ break;
+ }
+ Err(e) => {
+ last_error = Some(e);
+ }
+ }
+ }
+
+ if let Some(error) = last_error {
+ result.error = Some(error);
+ }
+
+ result.duration_ms = start_time.elapsed().as_millis() as u64;
+ result
+ }
+
+ /// Run test suite
+ pub async fn run_test_suite(&self, suite_id: &str) -> Option {
+ let suite = self.get_test_suite(suite_id).await?;
+ let run_id = uuid::Uuid::new_v4().to_string();
+ let start_time = std::time::Instant::now();
+
+ // Merge variables
+ let mut variables = self.shared_variables.read().await.clone();
+ variables.extend(suite.variables.clone());
+
+ let mut results = Vec::new();
+
+ // Run setup tests
+ for test in &suite.setup_tests {
+ let result = self.run_test(test, &variables).await;
+ if !result.success && self.config.read().await.stop_on_failure {
+ break;
+ }
+ results.push(result);
+ }
+
+ // Run main tests
+ let config = self.config.read().await;
+ if config.parallel_execution {
+ // Run tests in parallel
+ let mut tasks = Vec::new();
+ for test in &suite.tests {
+ let test = test.clone();
+ let vars = variables.clone();
+ let manager = self.clone_for_parallel();
+
+ tasks.push(tokio::spawn(async move {
+ manager.run_test(&test, &vars).await
+ }));
+ }
+
+ for task in tasks {
+ if let Ok(result) = task.await {
+ results.push(result);
+ }
+ }
+ } else {
+ // Run tests sequentially
+ for test in &suite.tests {
+ let result = self.run_test(test, &variables).await;
+ if !result.success && config.stop_on_failure {
+ break;
+ }
+ results.push(result);
+ }
+ }
+
+ // Run teardown tests
+ for test in &suite.teardown_tests {
+ let result = self.run_test(test, &variables).await;
+ results.push(result);
+ }
+
+ let total_duration = start_time.elapsed().as_millis() as u64;
+ let passed = results.iter().filter(|r| r.success).count();
+ let failed = results.len() - passed;
+
+ let history_entry = APITestHistoryEntry {
+ run_id,
+ timestamp: Utc::now(),
+ suite_name: suite.name,
+ total_tests: results.len(),
+ passed_tests: passed,
+ failed_tests: failed,
+ total_duration_ms: total_duration,
+ results,
+ };
+
+ // Store in history
+ self.test_history.write().await.push(history_entry.clone());
+
+ // Send notification
+ if let Some(notification_manager) = &self.notification_manager {
+ let message = format!(
+ "Test suite completed: {} passed, {} failed",
+ passed, failed
+ );
+ let _ = notification_manager.notify_success("API Tests", &message).await;
+ }
+
+ Some(history_entry)
+ }
+
+ /// Get test history
+ pub async fn get_test_history(&self, limit: Option) -> Vec {
+ let history = self.test_history.read().await;
+ match limit {
+ Some(n) => history.iter().rev().take(n).cloned().collect(),
+ None => history.clone(),
+ }
+ }
+
+ /// Clear test history
+ pub async fn clear_test_history(&self) {
+ self.test_history.write().await.clear();
+ }
+
+ /// Import Postman collection
+ pub async fn import_postman_collection(&self, _json_data: &str) -> Result {
+ // TODO: Implement Postman collection import
+ Err("Postman import not yet implemented".to_string())
+ }
+
+ /// Export test suite
+ pub async fn export_test_suite(&self, suite_id: &str) -> Result {
+ let suite = self.get_test_suite(suite_id).await
+ .ok_or_else(|| "Test suite not found".to_string())?;
+
+ serde_json::to_string_pretty(&suite)
+ .map_err(|e| format!("Failed to serialize test suite: {}", e))
+ }
+
+ // Helper methods
+ async fn execute_request(
+ &self,
+ test: &APITest,
+ url: &str,
+ variables: &HashMap,
+ ) -> Result<(u16, HashMap, String), String> {
+ let config = self.config.read().await;
+ let client = Client::builder()
+ .timeout(Duration::from_millis(test.timeout_ms))
+ .redirect(if config.follow_redirects {
+ reqwest::redirect::Policy::default()
+ } else {
+ reqwest::redirect::Policy::none()
+ })
+ .danger_accept_invalid_certs(!config.verify_ssl)
+ .build()
+ .map_err(|e| e.to_string())?;
+
+ let mut request = match test.method {
+ HttpMethod::GET => client.get(url),
+ HttpMethod::POST => client.post(url),
+ HttpMethod::PUT => client.put(url),
+ HttpMethod::PATCH => client.patch(url),
+ HttpMethod::DELETE => client.delete(url),
+ HttpMethod::HEAD => client.head(url),
+ HttpMethod::OPTIONS => client.request(reqwest::Method::OPTIONS, url),
+ };
+
+ // Add headers
+ for (key, value) in &test.headers {
+ let value = self.replace_variables(value, variables);
+ request = request.header(key, value);
+ }
+
+ // Add query params
+ for (key, value) in &test.query_params {
+ let value = self.replace_variables(value, variables);
+ request = request.query(&[(key, value)]);
+ }
+
+ // Add auth
+ if let Some(auth) = &test.auth {
+ request = self.apply_auth(request, auth, variables);
+ }
+
+ // Add body
+ if let Some(body) = &test.body {
+ request = match body {
+ APITestBody::Json(json) => request.json(json),
+ APITestBody::Form(form) => request.form(form),
+ APITestBody::Text(text) => request.body(text.clone()),
+ APITestBody::Binary(bytes) => request.body(bytes.clone()),
+ };
+ }
+
+ // Execute request
+ let response = request.send().await.map_err(|e| e.to_string())?;
+ let status = response.status().as_u16();
+
+ let mut headers = HashMap::new();
+ for (key, value) in response.headers() {
+ if let Ok(value_str) = value.to_str() {
+ headers.insert(key.to_string(), value_str.to_string());
+ }
+ }
+
+ let body = response.text().await.unwrap_or_default();
+
+ Ok((status, headers, body))
+ }
+
+ async fn run_assertions(
+ &self,
+ assertions: &[AssertionType],
+ status: u16,
+ headers: &HashMap,
+ body: &str,
+ ) -> Vec {
+ let mut results = Vec::new();
+
+ for assertion in assertions {
+ let result = match assertion {
+ AssertionType::StatusCode(expected) => {
+ AssertionResult {
+ assertion: assertion.clone(),
+ passed: status == *expected,
+ actual_value: Some(status.to_string()),
+ error_message: if status != *expected {
+ Some(format!("Expected status {}, got {}", expected, status))
+ } else {
+ None
+ },
+ }
+ }
+ AssertionType::StatusRange { min, max } => {
+ AssertionResult {
+ assertion: assertion.clone(),
+ passed: status >= *min && status <= *max,
+ actual_value: Some(status.to_string()),
+ error_message: if status < *min || status > *max {
+ Some(format!("Expected status between {} and {}, got {}", min, max, status))
+ } else {
+ None
+ },
+ }
+ }
+ AssertionType::HeaderExists(key) => {
+ AssertionResult {
+ assertion: assertion.clone(),
+ passed: headers.contains_key(key),
+ actual_value: None,
+ error_message: if !headers.contains_key(key) {
+ Some(format!("Header '{}' not found", key))
+ } else {
+ None
+ },
+ }
+ }
+ AssertionType::HeaderEquals { key, value } => {
+ let actual = headers.get(key);
+ AssertionResult {
+ assertion: assertion.clone(),
+ passed: actual == Some(value),
+ actual_value: actual.cloned(),
+ error_message: if actual != Some(value) {
+ Some(format!("Header '{}' expected '{}', got '{:?}'", key, value, actual))
+ } else {
+ None
+ },
+ }
+ }
+ AssertionType::BodyContains(text) => {
+ AssertionResult {
+ assertion: assertion.clone(),
+ passed: body.contains(text),
+ actual_value: None,
+ error_message: if !body.contains(text) {
+ Some(format!("Body does not contain '{}'", text))
+ } else {
+ None
+ },
+ }
+ }
+ AssertionType::JsonPath { path: _, expected: _ } => {
+ // TODO: Implement JSON path assertion
+ AssertionResult {
+ assertion: assertion.clone(),
+ passed: false,
+ actual_value: None,
+ error_message: Some("JSON path assertions not yet implemented".to_string()),
+ }
+ }
+ _ => {
+ AssertionResult {
+ assertion: assertion.clone(),
+ passed: false,
+ actual_value: None,
+ error_message: Some("Assertion type not implemented".to_string()),
+ }
+ }
+ };
+ results.push(result);
+ }
+
+ results
+ }
+
+ fn replace_variables(&self, text: &str, variables: &HashMap) -> String {
+ let mut result = text.to_string();
+ for (key, value) in variables {
+ result = result.replace(&format!("{{{{{}}}}}", key), value);
+ }
+ result
+ }
+
+ fn apply_auth(
+ &self,
+ request: reqwest::RequestBuilder,
+ auth: &APITestAuth,
+ variables: &HashMap,
+ ) -> reqwest::RequestBuilder {
+ match auth {
+ APITestAuth::Basic { username, password } => {
+ let username = self.replace_variables(username, variables);
+ let password = self.replace_variables(password, variables);
+ request.basic_auth(username, Some(password))
+ }
+ APITestAuth::Bearer(token) => {
+ let token = self.replace_variables(token, variables);
+ request.bearer_auth(token)
+ }
+ APITestAuth::ApiKey { key, value, in_header } => {
+ let key = self.replace_variables(key, variables);
+ let value = self.replace_variables(value, variables);
+ if *in_header {
+ request.header(key, value)
+ } else {
+ request.query(&[(key, value)])
+ }
+ }
+ APITestAuth::Custom(headers) => {
+ let mut req = request;
+ for (key, value) in headers {
+ let value = self.replace_variables(value, variables);
+ req = req.header(key, value);
+ }
+ req
+ }
+ }
+ }
+
+ fn clone_for_parallel(&self) -> Self {
+ Self {
+ client: self.client.clone(),
+ config: self.config.clone(),
+ test_suites: self.test_suites.clone(),
+ test_history: self.test_history.clone(),
+ running_tests: self.running_tests.clone(),
+ shared_variables: self.shared_variables.clone(),
+ notification_manager: self.notification_manager.clone(),
+ }
+ }
+}
+
+/// API test statistics
+#[derive(Debug, Clone, Serialize, Deserialize)]
+pub struct APITestStatistics {
+ pub total_suites: usize,
+ pub total_tests: usize,
+ pub total_runs: usize,
+ pub success_rate: f64,
+ pub average_duration_ms: f64,
+ pub most_failed_tests: Vec<(String, usize)>,
+ pub slowest_tests: Vec<(String, u64)>,
+}
\ No newline at end of file
diff --git a/tauri/src-tauri/src/app_mover.rs b/tauri/src-tauri/src/app_mover.rs
new file mode 100644
index 00000000..3444ef1f
--- /dev/null
+++ b/tauri/src-tauri/src/app_mover.rs
@@ -0,0 +1,172 @@
+use tauri::{AppHandle, Manager};
+use std::path::PathBuf;
+
+/// Check if the app should be moved to Applications folder
+/// This is a macOS-specific feature
+#[cfg(target_os = "macos")]
+pub async fn check_and_prompt_move(app_handle: AppHandle) -> Result<(), String> {
+ use std::process::Command;
+
+ // Get current app bundle path
+ let bundle_path = get_app_bundle_path()?;
+
+ // Check if already in Applications folder
+ if is_in_applications_folder(&bundle_path) {
+ return Ok(());
+ }
+
+ // Check if we've already asked this question
+ let settings = crate::settings::Settings::load().unwrap_or_default();
+ if let Some(asked) = settings.general.show_welcome_on_startup {
+ if !asked {
+ // User has already been asked, don't ask again
+ return Ok(());
+ }
+ }
+
+ // Show dialog asking if user wants to move to Applications
+ let response = tauri::api::dialog::blocking::ask(
+ Some(&app_handle.get_webview_window("main").unwrap()),
+ "Move to Applications Folder?",
+ "VibeTunnel works best when run from the Applications folder. Would you like to move it there?"
+ );
+
+ if response {
+ move_to_applications_folder(bundle_path)?;
+
+ // Restart the app from the new location
+ restart_from_applications()?;
+ }
+
+ // Update settings to not ask again
+ let mut settings = crate::settings::Settings::load().unwrap_or_default();
+ settings.general.show_welcome_on_startup = Some(false);
+ settings.save().ok();
+
+ Ok(())
+}
+
+#[cfg(not(target_os = "macos"))]
+pub async fn check_and_prompt_move(_app_handle: AppHandle) -> Result<(), String> {
+ // Not applicable on other platforms
+ Ok(())
+}
+
+#[cfg(target_os = "macos")]
+fn get_app_bundle_path() -> Result {
+ use std::env;
+
+ // Get the executable path
+ let exe_path = env::current_exe()
+ .map_err(|e| format!("Failed to get executable path: {}", e))?;
+
+ // Navigate up to the .app bundle
+ // Typical structure: /path/to/VibeTunnel.app/Contents/MacOS/VibeTunnel
+ let mut bundle_path = exe_path;
+
+ // Go up three levels to reach the .app bundle
+ for _ in 0..3 {
+ bundle_path = bundle_path.parent()
+ .ok_or("Failed to find app bundle")?
+ .to_path_buf();
+ }
+
+ // Verify this is an .app bundle
+ if !bundle_path.to_string_lossy().ends_with(".app") {
+ return Err("Not running from an app bundle".to_string());
+ }
+
+ Ok(bundle_path)
+}
+
+#[cfg(target_os = "macos")]
+fn is_in_applications_folder(bundle_path: &PathBuf) -> bool {
+ let path_str = bundle_path.to_string_lossy();
+ path_str.contains("/Applications/") || path_str.contains("/System/Applications/")
+}
+
+#[cfg(target_os = "macos")]
+fn move_to_applications_folder(bundle_path: PathBuf) -> Result<(), String> {
+ use std::process::Command;
+ use std::fs;
+
+ let app_name = bundle_path.file_name()
+ .ok_or("Failed to get app name")?
+ .to_string_lossy();
+
+ let dest_path = PathBuf::from("/Applications").join(&app_name);
+
+ // Check if destination already exists
+ if dest_path.exists() {
+ // Ask user if they want to replace
+ let response = tauri::api::dialog::blocking::ask(
+ None,
+ "Replace Existing App?",
+ "VibeTunnel already exists in the Applications folder. Do you want to replace it?"
+ );
+
+ if !response {
+ return Err("User cancelled move operation".to_string());
+ }
+
+ // Remove existing app
+ fs::remove_dir_all(&dest_path)
+ .map_err(|e| format!("Failed to remove existing app: {}", e))?;
+ }
+
+ // Use AppleScript to move the app with proper permissions
+ let script = format!(
+ r#"tell application "Finder"
+ move (POSIX file "{}") to (POSIX file "/Applications/") with replacing
+ end tell"#,
+ bundle_path.to_string_lossy()
+ );
+
+ let output = Command::new("osascript")
+ .arg("-e")
+ .arg(script)
+ .output()
+ .map_err(|e| format!("Failed to execute move command: {}", e))?;
+
+ if !output.status.success() {
+ let error = String::from_utf8_lossy(&output.stderr);
+ return Err(format!("Failed to move app: {}", error));
+ }
+
+ Ok(())
+}
+
+#[cfg(target_os = "macos")]
+fn restart_from_applications() -> Result<(), String> {
+ use std::process::Command;
+
+ // Launch the app from the Applications folder
+ let output = Command::new("open")
+ .arg("-n")
+ .arg("/Applications/VibeTunnel.app")
+ .spawn()
+ .map_err(|e| format!("Failed to restart app: {}", e))?;
+
+ // Exit the current instance
+ std::process::exit(0);
+}
+
+#[tauri::command]
+pub async fn prompt_move_to_applications(app_handle: AppHandle) -> Result<(), String> {
+ check_and_prompt_move(app_handle).await
+}
+
+#[tauri::command]
+pub async fn is_in_applications_folder() -> Result {
+ #[cfg(target_os = "macos")]
+ {
+ let bundle_path = get_app_bundle_path()?;
+ Ok(is_in_applications_folder(&bundle_path))
+ }
+
+ #[cfg(not(target_os = "macos"))]
+ {
+ // Always return true on non-macOS platforms
+ Ok(true)
+ }
+}
\ No newline at end of file
diff --git a/tauri/src-tauri/src/auth_cache.rs b/tauri/src-tauri/src/auth_cache.rs
new file mode 100644
index 00000000..8f25be25
--- /dev/null
+++ b/tauri/src-tauri/src/auth_cache.rs
@@ -0,0 +1,483 @@
+use serde::{Serialize, Deserialize};
+use std::sync::Arc;
+use tokio::sync::RwLock;
+use std::collections::HashMap;
+use chrono::{DateTime, Utc, Duration};
+use sha2::{Sha256, Digest};
+
+/// Authentication token type
+#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Eq, Hash)]
+pub enum TokenType {
+ Bearer,
+ Basic,
+ ApiKey,
+ OAuth2,
+ JWT,
+ Custom,
+}
+
+/// Authentication scope
+#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Hash)]
+pub struct AuthScope {
+ pub service: String,
+ pub resource: Option,
+ pub permissions: Vec,
+}
+
+/// Cached authentication token
+#[derive(Debug, Clone, Serialize, Deserialize)]
+pub struct CachedToken {
+ pub token_type: TokenType,
+ pub token_value: String,
+ pub scope: AuthScope,
+ pub created_at: DateTime,
+ pub expires_at: Option>,
+ pub refresh_token: Option,
+ pub metadata: HashMap,
+}
+
+impl CachedToken {
+ /// Check if token is expired
+ pub fn is_expired(&self) -> bool {
+ if let Some(expires_at) = self.expires_at {
+ Utc::now() >= expires_at
+ } else {
+ false
+ }
+ }
+
+ /// Check if token needs refresh (expires within threshold)
+ pub fn needs_refresh(&self, threshold_seconds: i64) -> bool {
+ if let Some(expires_at) = self.expires_at {
+ let refresh_time = expires_at - Duration::seconds(threshold_seconds);
+ Utc::now() >= refresh_time
+ } else {
+ false
+ }
+ }
+
+ /// Get remaining lifetime in seconds
+ pub fn remaining_lifetime_seconds(&self) -> Option {
+ self.expires_at.map(|expires_at| {
+ let duration = expires_at - Utc::now();
+ duration.num_seconds().max(0)
+ })
+ }
+}
+
+/// Authentication credential
+#[derive(Debug, Clone, Serialize, Deserialize)]
+pub struct AuthCredential {
+ pub credential_type: String,
+ pub username: Option,
+ pub password_hash: Option, // Store hashed password
+ pub api_key: Option,
+ pub client_id: Option,
+ pub client_secret: Option,
+ pub metadata: HashMap,
+}
+
+/// Authentication cache entry
+#[derive(Debug, Clone, Serialize, Deserialize)]
+pub struct AuthCacheEntry {
+ pub key: String,
+ pub tokens: Vec,
+ pub credential: Option,
+ pub last_accessed: DateTime,
+ pub access_count: u64,
+}
+
+/// Authentication cache configuration
+#[derive(Debug, Clone, Serialize, Deserialize)]
+pub struct AuthCacheConfig {
+ pub enabled: bool,
+ pub max_entries: usize,
+ pub default_ttl_seconds: u64,
+ pub refresh_threshold_seconds: i64,
+ pub persist_to_disk: bool,
+ pub encryption_enabled: bool,
+ pub cleanup_interval_seconds: u64,
+}
+
+impl Default for AuthCacheConfig {
+ fn default() -> Self {
+ Self {
+ enabled: true,
+ max_entries: 1000,
+ default_ttl_seconds: 3600, // 1 hour
+ refresh_threshold_seconds: 300, // 5 minutes
+ persist_to_disk: false,
+ encryption_enabled: true,
+ cleanup_interval_seconds: 600, // 10 minutes
+ }
+ }
+}
+
+/// Authentication cache statistics
+#[derive(Debug, Clone, Serialize, Deserialize)]
+pub struct AuthCacheStats {
+ pub total_entries: usize,
+ pub total_tokens: usize,
+ pub expired_tokens: usize,
+ pub cache_hits: u64,
+ pub cache_misses: u64,
+ pub refresh_count: u64,
+ pub eviction_count: u64,
+}
+
+/// Token refresh callback
+pub type TokenRefreshCallback = Arc futures::future::BoxFuture<'static, Result> + Send + Sync>;
+
+/// Authentication cache manager
+pub struct AuthCacheManager {
+ config: Arc>,
+ cache: Arc>>,
+ stats: Arc>,
+ refresh_callbacks: Arc>>,
+ cleanup_handle: Arc>>>,
+ notification_manager: Option>,
+}
+
+impl AuthCacheManager {
+ /// Create a new authentication cache manager
+ pub fn new() -> Self {
+ let manager = Self {
+ config: Arc::new(RwLock::new(AuthCacheConfig::default())),
+ cache: Arc::new(RwLock::new(HashMap::new())),
+ stats: Arc::new(RwLock::new(AuthCacheStats {
+ total_entries: 0,
+ total_tokens: 0,
+ expired_tokens: 0,
+ cache_hits: 0,
+ cache_misses: 0,
+ refresh_count: 0,
+ eviction_count: 0,
+ })),
+ refresh_callbacks: Arc::new(RwLock::new(HashMap::new())),
+ cleanup_handle: Arc::new(RwLock::new(None)),
+ notification_manager: None,
+ };
+
+ // Start cleanup task
+ let cleanup_manager = manager.clone_for_cleanup();
+ tokio::spawn(async move {
+ cleanup_manager.start_cleanup_task().await;
+ });
+
+ manager
+ }
+
+ /// Set the notification manager
+ pub fn set_notification_manager(&mut self, notification_manager: Arc) {
+ self.notification_manager = Some(notification_manager);
+ }
+
+ /// Get configuration
+ pub async fn get_config(&self) -> AuthCacheConfig {
+ self.config.read().await.clone()
+ }
+
+ /// Update configuration
+ pub async fn update_config(&self, config: AuthCacheConfig) {
+ *self.config.write().await = config;
+ }
+
+ /// Store token in cache
+ pub async fn store_token(&self, key: &str, token: CachedToken) -> Result<(), String> {
+ let config = self.config.read().await;
+ if !config.enabled {
+ return Ok(());
+ }
+
+ let mut cache = self.cache.write().await;
+ let mut stats = self.stats.write().await;
+
+ // Get or create cache entry
+ let entry = cache.entry(key.to_string()).or_insert_with(|| {
+ stats.total_entries += 1;
+ AuthCacheEntry {
+ key: key.to_string(),
+ tokens: Vec::new(),
+ credential: None,
+ last_accessed: Utc::now(),
+ access_count: 0,
+ }
+ });
+
+ // Remove expired tokens
+ let expired_count = entry.tokens.iter().filter(|t| t.is_expired()).count();
+ stats.expired_tokens += expired_count;
+ entry.tokens.retain(|t| !t.is_expired());
+
+ // Add new token
+ entry.tokens.push(token);
+ stats.total_tokens += 1;
+ entry.last_accessed = Utc::now();
+
+ // Check cache size limit
+ if cache.len() > config.max_entries {
+ self.evict_oldest_entry(&mut cache, &mut stats);
+ }
+
+ Ok(())
+ }
+
+ /// Get token from cache
+ pub async fn get_token(&self, key: &str, scope: &AuthScope) -> Option {
+ let config = self.config.read().await;
+ if !config.enabled {
+ return None;
+ }
+
+ let mut cache = self.cache.write().await;
+ let mut stats = self.stats.write().await;
+
+ if let Some(entry) = cache.get_mut(key) {
+ entry.last_accessed = Utc::now();
+ entry.access_count += 1;
+
+ // Find matching token
+ for token in &entry.tokens {
+ if !token.is_expired() && self.token_matches_scope(token, scope) {
+ stats.cache_hits += 1;
+
+ // Check if needs refresh
+ if token.needs_refresh(config.refresh_threshold_seconds) {
+ // Trigger refresh in background
+ if let Some(refresh_callback) = self.refresh_callbacks.read().await.get(key) {
+ let token_clone = token.clone();
+ let callback = refresh_callback.clone();
+ let key_clone = key.to_string();
+ let manager = self.clone_for_refresh();
+
+ tokio::spawn(async move {
+ if let Ok(refreshed_token) = callback(token_clone).await {
+ let _ = manager.store_token(&key_clone, refreshed_token).await;
+ manager.stats.write().await.refresh_count += 1;
+ }
+ });
+ }
+ }
+
+ return Some(token.clone());
+ }
+ }
+ }
+
+ stats.cache_misses += 1;
+ None
+ }
+
+ /// Store credential in cache
+ pub async fn store_credential(&self, key: &str, credential: AuthCredential) -> Result<(), String> {
+ let config = self.config.read().await;
+ if !config.enabled {
+ return Ok(());
+ }
+
+ let mut cache = self.cache.write().await;
+ let mut stats = self.stats.write().await;
+
+ let entry = cache.entry(key.to_string()).or_insert_with(|| {
+ stats.total_entries += 1;
+ AuthCacheEntry {
+ key: key.to_string(),
+ tokens: Vec::new(),
+ credential: None,
+ last_accessed: Utc::now(),
+ access_count: 0,
+ }
+ });
+
+ entry.credential = Some(credential);
+ entry.last_accessed = Utc::now();
+
+ Ok(())
+ }
+
+ /// Get credential from cache
+ pub async fn get_credential(&self, key: &str) -> Option {
+ let config = self.config.read().await;
+ if !config.enabled {
+ return None;
+ }
+
+ let mut cache = self.cache.write().await;
+
+ if let Some(entry) = cache.get_mut(key) {
+ entry.last_accessed = Utc::now();
+ entry.access_count += 1;
+ return entry.credential.clone();
+ }
+
+ None
+ }
+
+ /// Register token refresh callback
+ pub async fn register_refresh_callback(&self, key: &str, callback: TokenRefreshCallback) {
+ self.refresh_callbacks.write().await.insert(key.to_string(), callback);
+ }
+
+ /// Clear specific cache entry
+ pub async fn clear_entry(&self, key: &str) {
+ let mut cache = self.cache.write().await;
+ if cache.remove(key).is_some() {
+ self.stats.write().await.total_entries = cache.len();
+ }
+ }
+
+ /// Clear all cache entries
+ pub async fn clear_all(&self) {
+ let mut cache = self.cache.write().await;
+ cache.clear();
+
+ let mut stats = self.stats.write().await;
+ stats.total_entries = 0;
+ stats.total_tokens = 0;
+ stats.expired_tokens = 0;
+ }
+
+ /// Get cache statistics
+ pub async fn get_stats(&self) -> AuthCacheStats {
+ self.stats.read().await.clone()
+ }
+
+ /// List all cache entries
+ pub async fn list_entries(&self) -> Vec<(String, DateTime, u64)> {
+ self.cache.read().await
+ .values()
+ .map(|entry| (entry.key.clone(), entry.last_accessed, entry.access_count))
+ .collect()
+ }
+
+ /// Export cache to JSON (for persistence)
+ pub async fn export_cache(&self) -> Result {
+ let cache = self.cache.read().await;
+ let entries: Vec<_> = cache.values().cloned().collect();
+
+ serde_json::to_string_pretty(&entries)
+ .map_err(|e| format!("Failed to serialize cache: {}", e))
+ }
+
+ /// Import cache from JSON
+ pub async fn import_cache(&self, json_data: &str) -> Result<(), String> {
+ let entries: Vec = serde_json::from_str(json_data)
+ .map_err(|e| format!("Failed to deserialize cache: {}", e))?;
+
+ let mut cache = self.cache.write().await;
+ let mut stats = self.stats.write().await;
+
+ for entry in entries {
+ cache.insert(entry.key.clone(), entry);
+ }
+
+ stats.total_entries = cache.len();
+ stats.total_tokens = cache.values()
+ .map(|e| e.tokens.len())
+ .sum();
+
+ Ok(())
+ }
+
+ /// Hash password for secure storage
+ pub fn hash_password(password: &str) -> String {
+ let mut hasher = Sha256::new();
+ hasher.update(password.as_bytes());
+ format!("{:x}", hasher.finalize())
+ }
+
+ // Helper methods
+ fn token_matches_scope(&self, token: &CachedToken, scope: &AuthScope) -> bool {
+ token.scope.service == scope.service &&
+ token.scope.resource == scope.resource &&
+ scope.permissions.iter().all(|p| token.scope.permissions.contains(p))
+ }
+
+ fn evict_oldest_entry(&self, cache: &mut HashMap, stats: &mut AuthCacheStats) {
+ if let Some((key, _)) = cache.iter()
+ .min_by_key(|(_, entry)| entry.last_accessed) {
+ let key = key.clone();
+ cache.remove(&key);
+ stats.eviction_count += 1;
+ stats.total_entries = cache.len();
+ }
+ }
+
+ async fn start_cleanup_task(&self) {
+ let config = self.config.read().await;
+ let cleanup_interval = Duration::seconds(config.cleanup_interval_seconds as i64);
+ drop(config);
+
+ loop {
+ tokio::time::sleep(cleanup_interval.to_std().unwrap()).await;
+
+ let config = self.config.read().await;
+ if !config.enabled {
+ continue;
+ }
+ drop(config);
+
+ // Clean up expired tokens
+ let mut cache = self.cache.write().await;
+ let mut stats = self.stats.write().await;
+ let mut total_expired = 0;
+
+ for entry in cache.values_mut() {
+ let expired_count = entry.tokens.iter().filter(|t| t.is_expired()).count();
+ total_expired += expired_count;
+ entry.tokens.retain(|t| !t.is_expired());
+ }
+
+ stats.expired_tokens += total_expired;
+ stats.total_tokens = cache.values()
+ .map(|e| e.tokens.len())
+ .sum();
+
+ // Remove empty entries
+ cache.retain(|_, entry| !entry.tokens.is_empty() || entry.credential.is_some());
+ stats.total_entries = cache.len();
+ }
+ }
+
+ fn clone_for_cleanup(&self) -> Self {
+ Self {
+ config: self.config.clone(),
+ cache: self.cache.clone(),
+ stats: self.stats.clone(),
+ refresh_callbacks: self.refresh_callbacks.clone(),
+ cleanup_handle: self.cleanup_handle.clone(),
+ notification_manager: self.notification_manager.clone(),
+ }
+ }
+
+ fn clone_for_refresh(&self) -> Self {
+ Self {
+ config: self.config.clone(),
+ cache: self.cache.clone(),
+ stats: self.stats.clone(),
+ refresh_callbacks: self.refresh_callbacks.clone(),
+ cleanup_handle: self.cleanup_handle.clone(),
+ notification_manager: self.notification_manager.clone(),
+ }
+ }
+}
+
+/// Create a cache key from components
+pub fn create_cache_key(service: &str, username: Option<&str>, resource: Option<&str>) -> String {
+ let mut components = vec![service];
+ if let Some(user) = username {
+ components.push(user);
+ }
+ if let Some(res) = resource {
+ components.push(res);
+ }
+ components.join(":")
+}
+
+/// Authentication cache error
+#[derive(Debug, Clone, Serialize, Deserialize)]
+pub struct AuthCacheError {
+ pub code: String,
+ pub message: String,
+ pub details: Option>,
+}
\ No newline at end of file
diff --git a/tauri/src-tauri/src/backend_manager.rs b/tauri/src-tauri/src/backend_manager.rs
new file mode 100644
index 00000000..d7ad669a
--- /dev/null
+++ b/tauri/src-tauri/src/backend_manager.rs
@@ -0,0 +1,523 @@
+use serde::{Serialize, Deserialize};
+use std::sync::Arc;
+use tokio::sync::RwLock;
+use std::collections::HashMap;
+use std::path::PathBuf;
+use chrono::{DateTime, Utc};
+use tokio::process::Command;
+
+/// Backend type enumeration
+#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Eq, Hash)]
+pub enum BackendType {
+ Rust,
+ NodeJS,
+ Python,
+ Go,
+ Custom,
+}
+
+impl BackendType {
+ pub fn as_str(&self) -> &str {
+ match self {
+ BackendType::Rust => "rust",
+ BackendType::NodeJS => "nodejs",
+ BackendType::Python => "python",
+ BackendType::Go => "go",
+ BackendType::Custom => "custom",
+ }
+ }
+
+ pub fn from_str(s: &str) -> Self {
+ match s.to_lowercase().as_str() {
+ "rust" => BackendType::Rust,
+ "nodejs" | "node" => BackendType::NodeJS,
+ "python" => BackendType::Python,
+ "go" => BackendType::Go,
+ _ => BackendType::Custom,
+ }
+ }
+}
+
+/// Backend status
+#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Eq)]
+pub enum BackendStatus {
+ NotInstalled,
+ Installing,
+ Installed,
+ Starting,
+ Running,
+ Stopping,
+ Stopped,
+ Error,
+}
+
+/// Backend configuration
+#[derive(Debug, Clone, Serialize, Deserialize)]
+pub struct BackendConfig {
+ pub backend_type: BackendType,
+ pub name: String,
+ pub version: String,
+ pub executable_path: Option,
+ pub working_directory: Option,
+ pub environment_variables: HashMap,
+ pub arguments: Vec,
+ pub port: Option,
+ pub features: BackendFeatures,
+ pub requirements: BackendRequirements,
+}
+
+/// Backend features
+#[derive(Debug, Clone, Serialize, Deserialize)]
+pub struct BackendFeatures {
+ pub terminal_sessions: bool,
+ pub file_browser: bool,
+ pub port_forwarding: bool,
+ pub authentication: bool,
+ pub websocket_support: bool,
+ pub rest_api: bool,
+ pub graphql_api: bool,
+ pub metrics: bool,
+}
+
+/// Backend requirements
+#[derive(Debug, Clone, Serialize, Deserialize)]
+pub struct BackendRequirements {
+ pub runtime: Option,
+ pub runtime_version: Option,
+ pub dependencies: Vec,
+ pub system_packages: Vec,
+ pub min_memory_mb: Option,
+ pub min_disk_space_mb: Option,
+}
+
+/// Backend instance information
+#[derive(Debug, Clone, Serialize, Deserialize)]
+pub struct BackendInstance {
+ pub id: String,
+ pub backend_type: BackendType,
+ pub status: BackendStatus,
+ pub pid: Option,
+ pub port: u16,
+ pub started_at: Option>,
+ pub last_health_check: Option>,
+ pub health_status: HealthStatus,
+ pub metrics: BackendMetrics,
+}
+
+/// Health status
+#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Eq, Hash)]
+pub enum HealthStatus {
+ Healthy,
+ Degraded,
+ Unhealthy,
+ Unknown,
+}
+
+/// Backend metrics
+#[derive(Debug, Clone, Serialize, Deserialize)]
+pub struct BackendMetrics {
+ pub cpu_usage_percent: Option,
+ pub memory_usage_mb: Option,
+ pub request_count: u64,
+ pub error_count: u64,
+ pub average_response_time_ms: Option,
+ pub active_connections: u32,
+}
+
+/// Backend manager
+pub struct BackendManager {
+ configs: Arc>>,
+ instances: Arc>>,
+ active_backend: Arc>>,
+ notification_manager: Option>,
+}
+
+impl BackendManager {
+ /// Create a new backend manager
+ pub fn new() -> Self {
+ let manager = Self {
+ configs: Arc::new(RwLock::new(HashMap::new())),
+ instances: Arc::new(RwLock::new(HashMap::new())),
+ active_backend: Arc::new(RwLock::new(Some(BackendType::Rust))),
+ notification_manager: None,
+ };
+
+ // Initialize default backend configurations
+ tokio::spawn({
+ let configs = manager.configs.clone();
+ async move {
+ let default_configs = Self::initialize_default_configs();
+ *configs.write().await = default_configs;
+ }
+ });
+
+ manager
+ }
+
+ /// Set the notification manager
+ pub fn set_notification_manager(&mut self, notification_manager: Arc) {
+ self.notification_manager = Some(notification_manager);
+ }
+
+ /// Initialize default backend configurations
+ fn initialize_default_configs() -> HashMap {
+ let mut configs = HashMap::new();
+
+ // Rust backend (built-in)
+ configs.insert(BackendType::Rust, BackendConfig {
+ backend_type: BackendType::Rust,
+ name: "Rust (Built-in)".to_string(),
+ version: env!("CARGO_PKG_VERSION").to_string(),
+ executable_path: None,
+ working_directory: None,
+ environment_variables: HashMap::new(),
+ arguments: vec![],
+ port: Some(4020),
+ features: BackendFeatures {
+ terminal_sessions: true,
+ file_browser: true,
+ port_forwarding: true,
+ authentication: true,
+ websocket_support: true,
+ rest_api: true,
+ graphql_api: false,
+ metrics: true,
+ },
+ requirements: BackendRequirements {
+ runtime: None,
+ runtime_version: None,
+ dependencies: vec![],
+ system_packages: vec![],
+ min_memory_mb: Some(64),
+ min_disk_space_mb: Some(10),
+ },
+ });
+
+ // Node.js backend
+ configs.insert(BackendType::NodeJS, BackendConfig {
+ backend_type: BackendType::NodeJS,
+ name: "Node.js Server".to_string(),
+ version: "1.0.0".to_string(),
+ executable_path: Some(PathBuf::from("node")),
+ working_directory: None,
+ environment_variables: HashMap::new(),
+ arguments: vec!["server.js".to_string()],
+ port: Some(4021),
+ features: BackendFeatures {
+ terminal_sessions: true,
+ file_browser: true,
+ port_forwarding: false,
+ authentication: true,
+ websocket_support: true,
+ rest_api: true,
+ graphql_api: true,
+ metrics: false,
+ },
+ requirements: BackendRequirements {
+ runtime: Some("node".to_string()),
+ runtime_version: Some(">=16.0.0".to_string()),
+ dependencies: vec![
+ "express".to_string(),
+ "socket.io".to_string(),
+ "node-pty".to_string(),
+ ],
+ system_packages: vec![],
+ min_memory_mb: Some(128),
+ min_disk_space_mb: Some(50),
+ },
+ });
+
+ // Python backend
+ configs.insert(BackendType::Python, BackendConfig {
+ backend_type: BackendType::Python,
+ name: "Python Server".to_string(),
+ version: "1.0.0".to_string(),
+ executable_path: Some(PathBuf::from("python3")),
+ working_directory: None,
+ environment_variables: HashMap::new(),
+ arguments: vec!["-m".to_string(), "vibetunnel_server".to_string()],
+ port: Some(4022),
+ features: BackendFeatures {
+ terminal_sessions: true,
+ file_browser: true,
+ port_forwarding: false,
+ authentication: true,
+ websocket_support: true,
+ rest_api: true,
+ graphql_api: false,
+ metrics: true,
+ },
+ requirements: BackendRequirements {
+ runtime: Some("python3".to_string()),
+ runtime_version: Some(">=3.8".to_string()),
+ dependencies: vec![
+ "fastapi".to_string(),
+ "uvicorn".to_string(),
+ "websockets".to_string(),
+ "ptyprocess".to_string(),
+ ],
+ system_packages: vec![],
+ min_memory_mb: Some(96),
+ min_disk_space_mb: Some(30),
+ },
+ });
+
+ configs
+ }
+
+ /// Get available backends
+ pub async fn get_available_backends(&self) -> Vec {
+ self.configs.read().await.values().cloned().collect()
+ }
+
+ /// Get backend configuration
+ pub async fn get_backend_config(&self, backend_type: BackendType) -> Option {
+ self.configs.read().await.get(&backend_type).cloned()
+ }
+
+ /// Check if backend is installed
+ pub async fn is_backend_installed(&self, backend_type: BackendType) -> bool {
+ match backend_type {
+ BackendType::Rust => true, // Built-in
+ BackendType::NodeJS => self.check_nodejs_installed().await,
+ BackendType::Python => self.check_python_installed().await,
+ BackendType::Go => self.check_go_installed().await,
+ BackendType::Custom => false,
+ }
+ }
+
+ /// Install backend
+ pub async fn install_backend(&self, backend_type: BackendType) -> Result<(), String> {
+ match backend_type {
+ BackendType::Rust => Ok(()), // Already installed
+ BackendType::NodeJS => self.install_nodejs_backend().await,
+ BackendType::Python => self.install_python_backend().await,
+ BackendType::Go => Err("Go backend not yet implemented".to_string()),
+ BackendType::Custom => Err("Custom backend installation not supported".to_string()),
+ }
+ }
+
+ /// Start backend
+ pub async fn start_backend(&self, backend_type: BackendType) -> Result {
+ // Check if backend is installed
+ if !self.is_backend_installed(backend_type).await {
+ return Err(format!("{:?} backend is not installed", backend_type));
+ }
+
+ // Get backend configuration
+ let config = self.get_backend_config(backend_type).await
+ .ok_or_else(|| "Backend configuration not found".to_string())?;
+
+ // Generate instance ID
+ let instance_id = uuid::Uuid::new_v4().to_string();
+
+ // Create backend instance
+ let instance = BackendInstance {
+ id: instance_id.clone(),
+ backend_type,
+ status: BackendStatus::Starting,
+ pid: None,
+ port: config.port.unwrap_or(4020),
+ started_at: None,
+ last_health_check: None,
+ health_status: HealthStatus::Unknown,
+ metrics: BackendMetrics {
+ cpu_usage_percent: None,
+ memory_usage_mb: None,
+ request_count: 0,
+ error_count: 0,
+ average_response_time_ms: None,
+ active_connections: 0,
+ },
+ };
+
+ // Store instance
+ self.instances.write().await.insert(instance_id.clone(), instance);
+
+ // Start backend process
+ match backend_type {
+ BackendType::Rust => {
+ // Rust backend is handled internally
+ self.update_instance_status(&instance_id, BackendStatus::Running).await;
+ *self.active_backend.write().await = Some(BackendType::Rust);
+ Ok(instance_id)
+ }
+ _ => {
+ // Start external backend process
+ self.start_external_backend(&instance_id, config).await
+ }
+ }
+ }
+
+ /// Stop backend
+ pub async fn stop_backend(&self, instance_id: &str) -> Result<(), String> {
+ let instance = self.instances.read().await
+ .get(instance_id)
+ .cloned()
+ .ok_or_else(|| "Backend instance not found".to_string())?;
+
+ match instance.backend_type {
+ BackendType::Rust => {
+ // Rust backend is handled internally
+ self.update_instance_status(instance_id, BackendStatus::Stopped).await;
+ Ok(())
+ }
+ _ => {
+ // Stop external backend process
+ self.stop_external_backend(instance_id).await
+ }
+ }
+ }
+
+ /// Switch active backend
+ pub async fn switch_backend(&self, backend_type: BackendType) -> Result<(), String> {
+ // Stop current backend if different
+ let current_backend = *self.active_backend.read().await;
+ if let Some(current) = current_backend {
+ if current != backend_type {
+ // Find and stop current backend instances
+ let instance_id = {
+ let instances = self.instances.read().await;
+ instances.iter()
+ .find(|(_, instance)| instance.backend_type == current && instance.status == BackendStatus::Running)
+ .map(|(id, _)| id.clone())
+ };
+ if let Some(id) = instance_id {
+ self.stop_backend(&id).await?;
+ }
+ }
+ }
+
+ // Start new backend
+ self.start_backend(backend_type).await?;
+
+ // Update active backend
+ *self.active_backend.write().await = Some(backend_type);
+
+ // Notify about backend switch
+ if let Some(notification_manager) = &self.notification_manager {
+ let _ = notification_manager.notify_success(
+ "Backend Switched",
+ &format!("Switched to {:?} backend", backend_type)
+ ).await;
+ }
+
+ Ok(())
+ }
+
+ /// Get active backend
+ pub async fn get_active_backend(&self) -> Option {
+ *self.active_backend.read().await
+ }
+
+ /// Get backend instances
+ pub async fn get_backend_instances(&self) -> Vec {
+ self.instances.read().await.values().cloned().collect()
+ }
+
+ /// Get backend health
+ pub async fn check_backend_health(&self, instance_id: &str) -> Result {
+ let instance = self.instances.read().await
+ .get(instance_id)
+ .cloned()
+ .ok_or_else(|| "Backend instance not found".to_string())?;
+
+ if instance.status != BackendStatus::Running {
+ return Ok(HealthStatus::Unknown);
+ }
+
+ // Perform health check based on backend type
+ let health_status = match instance.backend_type {
+ BackendType::Rust => HealthStatus::Healthy, // Always healthy for built-in
+ _ => self.check_external_backend_health(&instance).await?,
+ };
+
+ // Update instance health status
+ if let Some(instance) = self.instances.write().await.get_mut(instance_id) {
+ instance.health_status = health_status;
+ instance.last_health_check = Some(Utc::now());
+ }
+
+ Ok(health_status)
+ }
+
+ // Helper methods
+ async fn check_nodejs_installed(&self) -> bool {
+ Command::new("node")
+ .arg("--version")
+ .output()
+ .await
+ .map(|output| output.status.success())
+ .unwrap_or(false)
+ }
+
+ async fn check_python_installed(&self) -> bool {
+ Command::new("python3")
+ .arg("--version")
+ .output()
+ .await
+ .map(|output| output.status.success())
+ .unwrap_or(false)
+ }
+
+ async fn check_go_installed(&self) -> bool {
+ Command::new("go")
+ .arg("version")
+ .output()
+ .await
+ .map(|output| output.status.success())
+ .unwrap_or(false)
+ }
+
+ async fn install_nodejs_backend(&self) -> Result<(), String> {
+ // TODO: Implement Node.js backend installation
+ // This would involve:
+ // 1. Creating package.json
+ // 2. Installing dependencies
+ // 3. Copying server files
+ Err("Node.js backend installation not yet implemented".to_string())
+ }
+
+ async fn install_python_backend(&self) -> Result<(), String> {
+ // TODO: Implement Python backend installation
+ // This would involve:
+ // 1. Creating virtual environment
+ // 2. Installing pip dependencies
+ // 3. Copying server files
+ Err("Python backend installation not yet implemented".to_string())
+ }
+
+ async fn start_external_backend(&self, _instance_id: &str, _config: BackendConfig) -> Result {
+ // TODO: Implement external backend startup
+ Err("External backend startup not yet implemented".to_string())
+ }
+
+ async fn stop_external_backend(&self, _instance_id: &str) -> Result<(), String> {
+ // TODO: Implement external backend shutdown
+ Err("External backend shutdown not yet implemented".to_string())
+ }
+
+ async fn check_external_backend_health(&self, _instance: &BackendInstance) -> Result {
+ // TODO: Implement health check for external backends
+ Ok(HealthStatus::Unknown)
+ }
+
+ async fn update_instance_status(&self, instance_id: &str, status: BackendStatus) {
+ if let Some(instance) = self.instances.write().await.get_mut(instance_id) {
+ instance.status = status;
+ if status == BackendStatus::Running {
+ instance.started_at = Some(Utc::now());
+ }
+ }
+ }
+}
+
+/// Backend statistics
+#[derive(Debug, Clone, Serialize, Deserialize)]
+pub struct BackendStats {
+ pub total_backends: usize,
+ pub installed_backends: usize,
+ pub running_instances: usize,
+ pub active_backend: Option,
+ pub health_summary: HashMap,
+}
\ No newline at end of file
diff --git a/tauri/src-tauri/src/cast.rs b/tauri/src-tauri/src/cast.rs
new file mode 100644
index 00000000..c7a770f2
--- /dev/null
+++ b/tauri/src-tauri/src/cast.rs
@@ -0,0 +1,364 @@
+use serde::{Deserialize, Serialize};
+use std::collections::HashMap;
+use std::fs::File;
+use std::io::{BufWriter, Write};
+use std::path::Path;
+use std::sync::Arc;
+use tokio::sync::Mutex;
+use chrono::{DateTime, Utc};
+
+/// Asciinema cast v2 format header
+#[derive(Debug, Clone, Serialize, Deserialize)]
+pub struct CastHeader {
+ pub version: u8,
+ pub width: u16,
+ pub height: u16,
+ #[serde(skip_serializing_if = "Option::is_none")]
+ pub timestamp: Option,
+ #[serde(skip_serializing_if = "Option::is_none")]
+ pub duration: Option,
+ #[serde(skip_serializing_if = "Option::is_none")]
+ pub idle_time_limit: Option,
+ #[serde(skip_serializing_if = "Option::is_none")]
+ pub command: Option,
+ #[serde(skip_serializing_if = "Option::is_none")]
+ pub title: Option,
+ #[serde(skip_serializing_if = "Option::is_none")]
+ pub env: Option>,
+}
+
+/// Event types for Asciinema cast format
+#[derive(Debug, Clone, Copy)]
+pub enum EventType {
+ Output,
+ Input,
+}
+
+impl EventType {
+ fn as_str(&self) -> &'static str {
+ match self {
+ EventType::Output => "o",
+ EventType::Input => "i",
+ }
+ }
+}
+
+/// A single event in the cast file
+#[derive(Debug)]
+pub struct CastEvent {
+ pub timestamp: f64,
+ pub event_type: EventType,
+ pub data: String,
+}
+
+/// Handles recording terminal sessions in Asciinema cast format
+pub struct CastRecorder {
+ header: CastHeader,
+ start_time: DateTime,
+ events: Arc>>,
+ file_writer: Option>>>,
+ is_recording: Arc>,
+}
+
+impl CastRecorder {
+ /// Create a new cast recorder
+ pub fn new(
+ width: u16,
+ height: u16,
+ title: Option,
+ command: Option,
+ ) -> Self {
+ let now = Utc::now();
+ let header = CastHeader {
+ version: 2,
+ width,
+ height,
+ timestamp: Some(now.timestamp()),
+ duration: None,
+ idle_time_limit: None,
+ command,
+ title,
+ env: None,
+ };
+
+ Self {
+ header,
+ start_time: now,
+ events: Arc::new(Mutex::new(Vec::new())),
+ file_writer: None,
+ is_recording: Arc::new(Mutex::new(false)),
+ }
+ }
+
+ /// Start recording to a file
+ pub async fn start_recording(&mut self, path: impl AsRef) -> Result<(), String> {
+ let mut is_recording = self.is_recording.lock().await;
+ if *is_recording {
+ return Err("Already recording".to_string());
+ }
+
+ // Create file and write header
+ let file = File::create(path.as_ref())
+ .map_err(|e| format!("Failed to create cast file: {}", e))?;
+ let mut writer = BufWriter::new(file);
+
+ // Write header as first line
+ let header_json = serde_json::to_string(&self.header)
+ .map_err(|e| format!("Failed to serialize header: {}", e))?;
+ writeln!(writer, "{}", header_json)
+ .map_err(|e| format!("Failed to write header: {}", e))?;
+
+ // Write any existing events
+ let events = self.events.lock().await;
+ for event in events.iter() {
+ self.write_event_to_file(&mut writer, event)?;
+ }
+
+ writer.flush()
+ .map_err(|e| format!("Failed to flush writer: {}", e))?;
+
+ self.file_writer = Some(Arc::new(Mutex::new(writer)));
+ *is_recording = true;
+ Ok(())
+ }
+
+ /// Stop recording
+ pub async fn stop_recording(&mut self) -> Result<(), String> {
+ let mut is_recording = self.is_recording.lock().await;
+ if !*is_recording {
+ return Ok(());
+ }
+
+ if let Some(writer_arc) = self.file_writer.take() {
+ let mut writer = writer_arc.lock().await;
+ writer.flush()
+ .map_err(|e| format!("Failed to flush final data: {}", e))?;
+ }
+
+ *is_recording = false;
+ Ok(())
+ }
+
+ /// Add output data to the recording
+ pub async fn add_output(&self, data: &[u8]) -> Result<(), String> {
+ self.add_event(EventType::Output, data).await
+ }
+
+ /// Add input data to the recording
+ pub async fn add_input(&self, data: &[u8]) -> Result<(), String> {
+ self.add_event(EventType::Input, data).await
+ }
+
+ /// Add an event to the recording
+ async fn add_event(&self, event_type: EventType, data: &[u8]) -> Result<(), String> {
+ let timestamp = Utc::now()
+ .signed_duration_since(self.start_time)
+ .num_milliseconds() as f64 / 1000.0;
+
+ // Convert data to string (handling potential UTF-8 errors)
+ let data_string = String::from_utf8_lossy(data).to_string();
+
+ let event = CastEvent {
+ timestamp,
+ event_type,
+ data: data_string,
+ };
+
+ // If we have a file writer, write immediately
+ if let Some(writer_arc) = &self.file_writer {
+ let mut writer = writer_arc.lock().await;
+ self.write_event_to_file(&mut writer, &event)?;
+ writer.flush()
+ .map_err(|e| format!("Failed to flush event: {}", e))?;
+ }
+
+ // Also store in memory
+ let mut events = self.events.lock().await;
+ events.push(event);
+
+ Ok(())
+ }
+
+ /// Write an event to the file
+ fn write_event_to_file(
+ &self,
+ writer: &mut BufWriter,
+ event: &CastEvent,
+ ) -> Result<(), String> {
+ // Format: [timestamp, event_type, data]
+ let event_array = serde_json::json!([
+ event.timestamp,
+ event.event_type.as_str(),
+ event.data
+ ]);
+
+ writeln!(writer, "{}", event_array)
+ .map_err(|e| format!("Failed to write event: {}", e))?;
+
+ Ok(())
+ }
+
+ /// Save all recorded events to a file
+ pub async fn save_to_file(&self, path: impl AsRef) -> Result<(), String> {
+ let file = File::create(path.as_ref())
+ .map_err(|e| format!("Failed to create cast file: {}", e))?;
+ let mut writer = BufWriter::new(file);
+
+ // Calculate duration
+ let events = self.events.lock().await;
+ let duration = events.last().map(|e| e.timestamp);
+
+ // Update header with duration
+ let mut header = self.header.clone();
+ header.duration = duration;
+
+ // Write header
+ let header_json = serde_json::to_string(&header)
+ .map_err(|e| format!("Failed to serialize header: {}", e))?;
+ writeln!(writer, "{}", header_json)
+ .map_err(|e| format!("Failed to write header: {}", e))?;
+
+ // Write events
+ for event in events.iter() {
+ self.write_event_to_file(&mut writer, event)?;
+ }
+
+ writer.flush()
+ .map_err(|e| format!("Failed to flush file: {}", e))?;
+
+ Ok(())
+ }
+
+ /// Get the current recording duration
+ pub async fn get_duration(&self) -> f64 {
+ let events = self.events.lock().await;
+ events.last().map(|e| e.timestamp).unwrap_or(0.0)
+ }
+
+ /// Check if currently recording
+ pub async fn is_recording(&self) -> bool {
+ *self.is_recording.lock().await
+ }
+
+ /// Update terminal dimensions
+ pub async fn resize(&mut self, width: u16, height: u16) {
+ self.header.width = width;
+ self.header.height = height;
+ // Note: In a real implementation, you might want to add a resize event
+ }
+}
+
+/// Manages cast recordings for multiple sessions
+pub struct CastManager {
+ recorders: Arc>>>>,
+}
+
+impl CastManager {
+ pub fn new() -> Self {
+ Self {
+ recorders: Arc::new(Mutex::new(HashMap::new())),
+ }
+ }
+
+ /// Create a new recorder for a session
+ pub async fn create_recorder(
+ &self,
+ session_id: String,
+ width: u16,
+ height: u16,
+ title: Option,
+ command: Option,
+ ) -> Result<(), String> {
+ let mut recorders = self.recorders.lock().await;
+ if recorders.contains_key(&session_id) {
+ return Err("Recorder already exists for this session".to_string());
+ }
+
+ let recorder = CastRecorder::new(width, height, title, command);
+ recorders.insert(session_id, Arc::new(Mutex::new(recorder)));
+ Ok(())
+ }
+
+ /// Get a recorder for a session
+ pub async fn get_recorder(&self, session_id: &str) -> Option>> {
+ self.recorders.lock().await.get(session_id).cloned()
+ }
+
+ /// Remove a recorder for a session
+ pub async fn remove_recorder(&self, session_id: &str) -> Result<(), String> {
+ let mut recorders = self.recorders.lock().await;
+ if let Some(recorder_arc) = recorders.remove(session_id) {
+ let mut recorder = recorder_arc.lock().await;
+ recorder.stop_recording().await?;
+ }
+ Ok(())
+ }
+
+ /// Start recording for a session
+ pub async fn start_recording(
+ &self,
+ session_id: &str,
+ path: impl AsRef,
+ ) -> Result<(), String> {
+ if let Some(recorder_arc) = self.get_recorder(session_id).await {
+ let mut recorder = recorder_arc.lock().await;
+ recorder.start_recording(path).await
+ } else {
+ Err("No recorder found for session".to_string())
+ }
+ }
+
+ /// Stop recording for a session
+ pub async fn stop_recording(&self, session_id: &str) -> Result<(), String> {
+ if let Some(recorder_arc) = self.get_recorder(session_id).await {
+ let mut recorder = recorder_arc.lock().await;
+ recorder.stop_recording().await
+ } else {
+ Err("No recorder found for session".to_string())
+ }
+ }
+
+ /// Add output to a session's recording
+ pub async fn add_output(&self, session_id: &str, data: &[u8]) -> Result<(), String> {
+ if let Some(recorder_arc) = self.get_recorder(session_id).await {
+ let recorder = recorder_arc.lock().await;
+ recorder.add_output(data).await
+ } else {
+ Ok(()) // Silently ignore if no recorder
+ }
+ }
+
+ /// Add input to a session's recording
+ pub async fn add_input(&self, session_id: &str, data: &[u8]) -> Result<(), String> {
+ if let Some(recorder_arc) = self.get_recorder(session_id).await {
+ let recorder = recorder_arc.lock().await;
+ recorder.add_input(data).await
+ } else {
+ Ok(()) // Silently ignore if no recorder
+ }
+ }
+
+ /// Save a session's recording to file
+ pub async fn save_recording(
+ &self,
+ session_id: &str,
+ path: impl AsRef,
+ ) -> Result<(), String> {
+ if let Some(recorder_arc) = self.get_recorder(session_id).await {
+ let recorder = recorder_arc.lock().await;
+ recorder.save_to_file(path).await
+ } else {
+ Err("No recorder found for session".to_string())
+ }
+ }
+
+ /// Check if a session is being recorded
+ pub async fn is_recording(&self, session_id: &str) -> bool {
+ if let Some(recorder_arc) = self.get_recorder(session_id).await {
+ let recorder = recorder_arc.lock().await;
+ recorder.is_recording().await
+ } else {
+ false
+ }
+ }
+}
\ No newline at end of file
diff --git a/tauri/src-tauri/src/commands.rs b/tauri/src-tauri/src/commands.rs
index 60a0b7ba..f16aeab7 100644
--- a/tauri/src-tauri/src/commands.rs
+++ b/tauri/src-tauri/src/commands.rs
@@ -125,9 +125,9 @@ pub async fn start_server(
// Start HTTP server with auth if configured
let mut http_server = if settings.dashboard.enable_password && !settings.dashboard.password.is_empty() {
let auth_config = crate::auth::AuthConfig::new(true, Some(settings.dashboard.password));
- HttpServer::with_auth(state.terminal_manager.clone(), auth_config)
+ HttpServer::with_auth(state.terminal_manager.clone(), state.session_monitor.clone(), auth_config)
} else {
- HttpServer::new(state.terminal_manager.clone())
+ HttpServer::new(state.terminal_manager.clone(), state.session_monitor.clone())
};
// Start server with appropriate access mode
@@ -246,9 +246,10 @@ pub async fn restart_server(
// Start a new server
let terminal_manager = state.terminal_manager.clone();
+ let session_monitor = state.session_monitor.clone();
let settings = crate::settings::Settings::load().unwrap_or_default();
- let mut new_server = HttpServer::new(terminal_manager);
+ let mut new_server = HttpServer::new(terminal_manager, session_monitor);
new_server.start_with_mode(match settings.dashboard.access_mode.as_str() {
"network" => "network",
_ => "localhost"
@@ -274,9 +275,11 @@ pub async fn show_server_console(
"server-console",
tauri::WebviewUrl::App("server-console.html".into())
)
- .title("Server Console")
- .inner_size(800.0, 600.0)
+ .title("Server Console - VibeTunnel")
+ .inner_size(900.0, 600.0)
.resizable(true)
+ .decorations(true)
+ .center()
.build()
.map_err(|e| e.to_string())?;
}
@@ -286,27 +289,10 @@ pub async fn show_server_console(
#[tauri::command]
pub async fn show_welcome_screen(
- app_handle: tauri::AppHandle,
+ state: State<'_, AppState>,
) -> Result<(), String> {
- // Check if welcome window already exists
- if let Some(window) = app_handle.get_webview_window("welcome") {
- window.show().map_err(|e| e.to_string())?;
- window.set_focus().map_err(|e| e.to_string())?;
- } else {
- // Create new welcome window
- tauri::WebviewWindowBuilder::new(
- &app_handle,
- "welcome",
- tauri::WebviewUrl::App("welcome.html".into())
- )
- .title("Welcome to VibeTunnel")
- .inner_size(700.0, 500.0)
- .resizable(false)
- .build()
- .map_err(|e| e.to_string())?;
- }
-
- Ok(())
+ let welcome_manager = &state.welcome_manager;
+ welcome_manager.show_welcome_window().await
}
#[tauri::command]
@@ -346,4 +332,1701 @@ pub async fn update_dock_icon_visibility(app_handle: tauri::AppHandle) -> Result
}
}
Ok(())
+}
+
+// Terminal Recording Commands
+#[derive(Debug, Serialize, Deserialize)]
+pub struct StartRecordingOptions {
+ pub session_id: String,
+ pub title: Option,
+ pub output_path: Option,
+}
+
+#[derive(Debug, Serialize, Deserialize)]
+pub struct RecordingStatus {
+ pub is_recording: bool,
+ pub duration: f64,
+}
+
+#[tauri::command]
+pub async fn start_terminal_recording(
+ options: StartRecordingOptions,
+ state: State<'_, AppState>,
+) -> Result<(), String> {
+ let cast_manager = &state.cast_manager;
+
+ // Get terminal info for metadata
+ let terminal_manager = &state.terminal_manager;
+ let sessions = terminal_manager.list_sessions().await;
+ let session = sessions.iter()
+ .find(|s| s.id == options.session_id)
+ .ok_or_else(|| "Session not found".to_string())?;
+
+ // Create recorder if it doesn't exist
+ cast_manager.create_recorder(
+ options.session_id.clone(),
+ session.cols,
+ session.rows,
+ options.title.or(Some(session.name.clone())),
+ None, // command
+ ).await.ok(); // Ignore if already exists
+
+ // Start recording
+ if let Some(path) = options.output_path {
+ cast_manager.start_recording(&options.session_id, path).await
+ } else {
+ // Use default path with timestamp
+ let timestamp = chrono::Utc::now().format("%Y%m%d_%H%M%S");
+ let filename = format!("vibetunnel_recording_{}.cast", timestamp);
+ let path = std::env::temp_dir().join(filename);
+ cast_manager.start_recording(&options.session_id, path).await
+ }
+}
+
+#[tauri::command]
+pub async fn stop_terminal_recording(
+ session_id: String,
+ state: State<'_, AppState>,
+) -> Result<(), String> {
+ let cast_manager = &state.cast_manager;
+ cast_manager.stop_recording(&session_id).await
+}
+
+#[tauri::command]
+pub async fn save_terminal_recording(
+ session_id: String,
+ output_path: String,
+ state: State<'_, AppState>,
+) -> Result<(), String> {
+ let cast_manager = &state.cast_manager;
+ cast_manager.save_recording(&session_id, output_path).await
+}
+
+#[tauri::command]
+pub async fn get_recording_status(
+ session_id: String,
+ state: State<'_, AppState>,
+) -> Result {
+ let cast_manager = &state.cast_manager;
+ let is_recording = cast_manager.is_recording(&session_id).await;
+
+ let duration = if let Some(recorder) = cast_manager.get_recorder(&session_id).await {
+ let rec = recorder.lock().await;
+ rec.get_duration().await
+ } else {
+ 0.0
+ };
+
+ Ok(RecordingStatus {
+ is_recording,
+ duration,
+ })
+}
+
+// TTY Forwarding Commands
+#[derive(Debug, Serialize, Deserialize)]
+pub struct StartTTYForwardOptions {
+ pub local_port: u16,
+ pub remote_host: Option,
+ pub remote_port: Option,
+ pub shell: Option,
+}
+
+#[derive(Debug, Serialize, Deserialize)]
+pub struct TTYForwardInfo {
+ pub id: String,
+ pub local_port: u16,
+ pub remote_host: String,
+ pub remote_port: u16,
+ pub connected: bool,
+ pub client_count: usize,
+}
+
+#[tauri::command]
+pub async fn start_tty_forward(
+ options: StartTTYForwardOptions,
+ state: State<'_, AppState>,
+) -> Result {
+ let tty_forward_manager = &state.tty_forward_manager;
+
+ let remote_host = options.remote_host.unwrap_or_else(|| "localhost".to_string());
+ let remote_port = options.remote_port.unwrap_or(22);
+
+ tty_forward_manager.start_forward(
+ options.local_port,
+ remote_host,
+ remote_port,
+ options.shell,
+ ).await
+}
+
+#[tauri::command]
+pub async fn stop_tty_forward(
+ id: String,
+ state: State<'_, AppState>,
+) -> Result<(), String> {
+ let tty_forward_manager = &state.tty_forward_manager;
+ tty_forward_manager.stop_forward(&id).await
+}
+
+#[tauri::command]
+pub async fn list_tty_forwards(
+ state: State<'_, AppState>,
+) -> Result, String> {
+ let tty_forward_manager = &state.tty_forward_manager;
+ let forwards = tty_forward_manager.list_forwards().await;
+
+ Ok(forwards.into_iter().map(|f| TTYForwardInfo {
+ id: f.id,
+ local_port: f.local_port,
+ remote_host: f.remote_host,
+ remote_port: f.remote_port,
+ connected: f.connected,
+ client_count: f.client_count,
+ }).collect())
+}
+
+#[tauri::command]
+pub async fn get_tty_forward(
+ id: String,
+ state: State<'_, AppState>,
+) -> Result