From f4679eecc683b7e3df87ba6749d0dff64798e656 Mon Sep 17 00:00:00 2001 From: Drew Newberry Date: Tue, 28 Apr 2026 08:26:26 -0700 Subject: [PATCH 1/4] ci(rust): enforce -D warnings on clippy Promote cargo clippy from warning-only to error-on-warning across the workspace and the e2e crate. Resolves all existing clippy lints surfaced by the workspace pedantic/nursery config so CI can gate on a clean lint run. Most fixes are mechanical (inline format args, collapse if-let chains, Option::map_or_else, From::from for lossless casts, doc backticks, let-else, raw string hash trimming). A small number of narrow, function-scoped #[allow(...)] attributes were added with comments where clippy's suggestion would change a public/trait signature, FFI/unsafe contracts, or a serde-deserialized protocol shape; no new entries were added to the workspace-level allow list in Cargo.toml. --- crates/openshell-bootstrap/src/docker.rs | 64 ++-- crates/openshell-bootstrap/src/lib.rs | 17 +- crates/openshell-bootstrap/src/metadata.rs | 11 +- crates/openshell-cli/src/auth.rs | 8 +- crates/openshell-cli/src/bootstrap.rs | 2 +- crates/openshell-cli/src/completers.rs | 10 +- crates/openshell-cli/src/main.rs | 28 +- crates/openshell-cli/src/policy_update.rs | 16 +- crates/openshell-cli/src/run.rs | 230 ++++++------ crates/openshell-cli/src/ssh.rs | 78 ++--- .../tests/ensure_providers_integration.rs | 25 +- .../openshell-cli/tests/mtls_integration.rs | 9 +- .../tests/provider_commands_integration.rs | 25 +- .../sandbox_create_lifecycle_integration.rs | 9 +- .../sandbox_name_fallback_integration.rs | 9 +- crates/openshell-core/src/config.rs | 4 +- crates/openshell-core/src/error.rs | 6 +- crates/openshell-core/src/forward.rs | 18 +- crates/openshell-core/src/inference.rs | 10 +- crates/openshell-core/src/lib.rs | 2 +- crates/openshell-core/src/metadata.rs | 30 +- crates/openshell-core/src/net.rs | 8 +- .../openshell-driver-kubernetes/src/driver.rs | 35 +- crates/openshell-driver-podman/src/client.rs | 42 ++- .../openshell-driver-podman/src/container.rs | 23 +- crates/openshell-driver-podman/src/driver.rs | 21 +- crates/openshell-driver-podman/src/grpc.rs | 13 +- crates/openshell-driver-podman/src/watcher.rs | 27 +- crates/openshell-driver-vm/build.rs | 4 +- crates/openshell-driver-vm/src/driver.rs | 41 ++- .../src/embedded_runtime.rs | 7 +- crates/openshell-driver-vm/src/ffi.rs | 8 +- crates/openshell-driver-vm/src/procguard.rs | 12 +- crates/openshell-driver-vm/src/runtime.rs | 64 ++-- crates/openshell-ocsf/src/format/shorthand.rs | 1 - .../src/tracing_layers/jsonl_layer.rs | 8 +- .../src/tracing_layers/shorthand_layer.rs | 28 +- crates/openshell-policy/src/lib.rs | 52 +-- crates/openshell-policy/src/merge.rs | 14 +- crates/openshell-prover/src/lib.rs | 30 +- crates/openshell-prover/src/model.rs | 23 +- crates/openshell-prover/src/policy.rs | 18 +- crates/openshell-prover/src/queries.rs | 3 +- crates/openshell-prover/src/registry.rs | 13 +- crates/openshell-router/src/backend.rs | 30 +- crates/openshell-router/src/config.rs | 13 +- .../openshell-sandbox/src/bypass_monitor.rs | 19 +- crates/openshell-sandbox/src/child_env.rs | 6 +- crates/openshell-sandbox/src/grpc_client.rs | 16 +- crates/openshell-sandbox/src/l7/mod.rs | 33 +- crates/openshell-sandbox/src/l7/path.rs | 17 +- crates/openshell-sandbox/src/l7/relay.rs | 9 +- crates/openshell-sandbox/src/l7/rest.rs | 26 +- crates/openshell-sandbox/src/l7/tls.rs | 4 +- crates/openshell-sandbox/src/lib.rs | 303 ++++++++-------- crates/openshell-sandbox/src/log_push.rs | 6 +- crates/openshell-sandbox/src/main.rs | 8 +- .../src/mechanistic_mapper.rs | 21 +- crates/openshell-sandbox/src/opa.rs | 47 +-- crates/openshell-sandbox/src/process.rs | 19 +- crates/openshell-sandbox/src/procfs.rs | 2 +- crates/openshell-sandbox/src/proxy.rs | 74 ++-- crates/openshell-sandbox/src/secrets.rs | 132 ++++--- crates/openshell-sandbox/src/ssh.rs | 33 +- .../tests/websocket_upgrade.rs | 7 + crates/openshell-server/src/auth.rs | 49 ++- crates/openshell-server/src/cli.rs | 2 +- crates/openshell-server/src/compute/mod.rs | 25 +- crates/openshell-server/src/compute/vm.rs | 32 +- crates/openshell-server/src/grpc/mod.rs | 6 +- crates/openshell-server/src/grpc/policy.rs | 76 ++-- crates/openshell-server/src/grpc/provider.rs | 26 +- crates/openshell-server/src/grpc/sandbox.rs | 28 +- .../openshell-server/src/grpc/validation.rs | 15 +- crates/openshell-server/src/inference.rs | 21 +- crates/openshell-server/src/lib.rs | 28 +- .../openshell-server/src/persistence/mod.rs | 13 +- .../src/persistence/postgres.rs | 2 +- .../src/persistence/sqlite.rs | 2 +- .../openshell-server/src/persistence/tests.rs | 4 +- crates/openshell-server/src/sandbox_watch.rs | 20 +- crates/openshell-server/src/ssh_tunnel.rs | 35 +- .../src/supervisor_session.rs | 40 ++- .../tests/edge_tunnel_auth.rs | 22 +- .../tests/multiplex_integration.rs | 8 +- .../tests/multiplex_tls_integration.rs | 8 +- .../tests/ws_tunnel_integration.rs | 11 +- crates/openshell-tui/src/app.rs | 34 +- crates/openshell-tui/src/event.rs | 6 +- crates/openshell-tui/src/lib.rs | 326 +++++++++--------- crates/openshell-tui/src/theme.rs | 2 +- .../openshell-tui/src/ui/create_provider.rs | 5 +- crates/openshell-tui/src/ui/create_sandbox.rs | 8 +- .../openshell-tui/src/ui/global_settings.rs | 4 +- crates/openshell-tui/src/ui/mod.rs | 33 +- crates/openshell-tui/src/ui/sandbox_detail.rs | 6 +- crates/openshell-tui/src/ui/sandbox_draft.rs | 2 +- crates/openshell-tui/src/ui/sandbox_logs.rs | 36 +- .../openshell-tui/src/ui/sandbox_settings.rs | 6 +- crates/openshell-vm/build.rs | 4 +- crates/openshell-vm/src/embedded.rs | 24 +- crates/openshell-vm/src/exec.rs | 31 +- crates/openshell-vm/src/ffi.rs | 6 +- crates/openshell-vm/src/health.rs | 5 +- crates/openshell-vm/src/lib.rs | 81 ++--- crates/openshell-vm/src/main.rs | 50 +-- .../openshell-vm/tests/gateway_integration.rs | 10 +- e2e/rust/src/harness/binary.rs | 3 +- e2e/rust/src/harness/sandbox.rs | 8 +- e2e/rust/tests/cf_auth_smoke.rs | 2 +- e2e/rust/tests/docker_preflight.rs | 2 +- e2e/rust/tests/sandbox_labels.rs | 3 +- tasks/rust.toml | 7 +- 113 files changed, 1594 insertions(+), 1514 deletions(-) diff --git a/crates/openshell-bootstrap/src/docker.rs b/crates/openshell-bootstrap/src/docker.rs index 65482739f..c0a4459a9 100644 --- a/crates/openshell-bootstrap/src/docker.rs +++ b/crates/openshell-bootstrap/src/docker.rs @@ -28,7 +28,7 @@ const REGISTRY_NAMESPACE_DEFAULT: &str = "openshell"; /// `connect_with_local_defaults()` ceiling is 120s, which is far too short for /// multi-GB image exports — a 7 GB image on a laptop SSD takes ~4–5 minutes. /// One hour is a safe upper bound; override with `OPENSHELL_DOCKER_TIMEOUT_SECS`. -pub(crate) const DEFAULT_LARGE_TRANSFER_TIMEOUT_SECS: u64 = 3600; +pub const DEFAULT_LARGE_TRANSFER_TIMEOUT_SECS: u64 = 3600; /// Build a local-Docker client suitable for large streaming transfers. /// Respects `OPENSHELL_DOCKER_TIMEOUT_SECS` (in seconds); falls back to @@ -50,7 +50,7 @@ pub fn connect_local_for_large_transfers() -> std::result::Result Vec { +pub fn resolve_gpu_device_ids(gpu: &[String], cdi_enabled: bool) -> Vec { match gpu { [] => vec![], [v] if v == "auto" => { @@ -346,22 +346,25 @@ pub async fn find_gateway_container(docker: &Docker, port: Option) -> Resul let matches: Vec = containers .iter() - .filter(|c| is_gateway_image(c) && port.map_or(true, |p| has_port(c, p))) + .filter(|c| is_gateway_image(c) && port.is_none_or(|p| has_port(c, p))) .filter_map(container_name) .collect(); match matches.len() { 0 => { - let hint = if let Some(p) = port { - format!( - "No openshell gateway container found listening on port {p}.\n\ + let hint = port.map_or_else( + || { + "No openshell gateway container found.\n\ Is the gateway running? Check with: docker ps" - ) - } else { - "No openshell gateway container found.\n\ - Is the gateway running? Check with: docker ps" - .to_string() - }; + .to_string() + }, + |p| { + format!( + "No openshell gateway container found listening on port {p}.\n\ + Is the gateway running? Check with: docker ps" + ) + }, + ); Err(miette::miette!("{hint}")) } 1 => Ok(matches.into_iter().next().unwrap()), @@ -488,6 +491,8 @@ pub async fn ensure_image( /// Returns the actual host port the container is using. When an existing /// container is reused (same image), this may differ from `gateway_port` /// because the container was originally created with a different port. +// Refactoring this signature would touch many call sites across the workspace. +#[allow(clippy::too_many_arguments)] pub async fn ensure_container( docker: &Docker, name: &str, @@ -863,22 +868,22 @@ pub async fn check_port_conflicts( let ports = container.ports.as_deref().unwrap_or_default(); for port in ports { - if let Some(public) = port.public_port { - if needed_ports.contains(&public) { - let cname = names - .first() - .map(|n| n.trim_start_matches('/').to_string()) - .unwrap_or_else(|| { - container - .id - .clone() - .unwrap_or_else(|| "".to_string()) - }); - conflicts.push(PortConflict { - container_name: cname, - host_port: public, - }); - } + if let Some(public) = port.public_port + && needed_ports.contains(&public) + { + let cname = names.first().map_or_else( + || { + container + .id + .clone() + .unwrap_or_else(|| "".to_string()) + }, + |n| n.trim_start_matches('/').to_string(), + ); + conflicts.push(PortConflict { + container_name: cname, + host_port: public, + }); } } } @@ -1371,6 +1376,9 @@ mod tests { ); } + // Test-only: mutates DOCKER_HOST env var via std::env::set_var/remove_var, + // which require unsafe in the 2024 edition. + #[allow(unsafe_code)] #[test] fn docker_not_reachable_error_with_docker_host() { // Simulate: DOCKER_HOST is set but daemon unresponsive. diff --git a/crates/openshell-bootstrap/src/lib.rs b/crates/openshell-bootstrap/src/lib.rs index 53f659fc6..5f38a1251 100644 --- a/crates/openshell-bootstrap/src/lib.rs +++ b/crates/openshell-bootstrap/src/lib.rs @@ -117,7 +117,7 @@ pub struct DeployOptions { /// - `[]` — no GPU passthrough (default) /// - `["legacy"]` — internal non-CDI fallback path (`driver="nvidia"`, `count=-1`) /// - `["auto"]` — resolved at deploy time: CDI if enabled on the daemon, else the non-CDI fallback - /// - `[cdi-ids…]` — CDI DeviceRequest with the given device IDs + /// - `[cdi-ids…]` — CDI `DeviceRequest` with the given device IDs pub gpu: Vec, /// When true, destroy any existing gateway resources before deploying. /// When false, an existing gateway is left as-is and deployment is @@ -340,8 +340,8 @@ where // the image to recreate the container, so the pull must happen. let need_image = !resume || !resume_container_exists; if need_image { + log("[status] Downloading gateway".to_string()); if remote_opts.is_some() { - log("[status] Downloading gateway".to_string()); let on_log_clone = Arc::clone(&on_log); let progress_cb = move |msg: String| { if let Ok(mut f) = on_log_clone.lock() { @@ -358,7 +358,6 @@ where .await?; } else { // Local deployment: ensure image exists (pull if needed) - log("[status] Downloading gateway".to_string()); ensure_image( &target_docker, &image_ref, @@ -732,16 +731,16 @@ pub async fn gateway_container_logs( Ok(()) } -/// Fetch the last `n` lines of container logs for a local gateway as a -/// `String`. This is a convenience wrapper for diagnostic call sites (e.g. -/// failure diagnosis in the CLI) that do not hold a Docker client handle. +/// Fetch the last `n` lines of container logs for a local gateway as a `String`. +/// +/// This is a convenience wrapper for diagnostic call sites (e.g. failure +/// diagnosis in the CLI) that do not hold a Docker client handle. /// /// Returns an empty string on any Docker/connection error so callers don't /// need to worry about error handling. pub async fn fetch_gateway_logs(name: &str, n: usize) -> String { - let docker = match Docker::connect_with_local_defaults() { - Ok(d) => d, - Err(_) => return String::new(), + let Ok(docker) = Docker::connect_with_local_defaults() else { + return String::new(); }; let container = container_name(name); fetch_recent_logs(&docker, &container, n).await diff --git a/crates/openshell-bootstrap/src/metadata.rs b/crates/openshell-bootstrap/src/metadata.rs index 8e6b8a070..41e75e811 100644 --- a/crates/openshell-bootstrap/src/metadata.rs +++ b/crates/openshell-bootstrap/src/metadata.rs @@ -305,12 +305,11 @@ pub fn load_last_sandbox(gateway: &str) -> Option { /// This should be called after a sandbox is deleted so that subsequent commands /// don't try to connect to a sandbox that no longer exists. pub fn clear_last_sandbox_if_matches(gateway: &str, sandbox: &str) { - if let Some(current) = load_last_sandbox(gateway) { - if current == sandbox { - if let Ok(path) = last_sandbox_path(gateway) { - let _ = std::fs::remove_file(path); - } - } + if let Some(current) = load_last_sandbox(gateway) + && current == sandbox + && let Ok(path) = last_sandbox_path(gateway) + { + let _ = std::fs::remove_file(path); } } diff --git a/crates/openshell-cli/src/auth.rs b/crates/openshell-cli/src/auth.rs index e961828c4..509679f33 100644 --- a/crates/openshell-cli/src/auth.rs +++ b/crates/openshell-cli/src/auth.rs @@ -72,6 +72,9 @@ fn generate_confirmation_code() -> String { let hash_b = hasher_b.finish(); prev_hash = hash_b; + // hash_b is `u64`; truncation to `usize` is acceptable here since we mod + // by charset.len() (small) and only use it as an index. + #[allow(clippy::cast_possible_truncation)] let idx = (hash_b as usize) % charset.len(); code.push(charset[idx] as char); } @@ -129,8 +132,9 @@ pub async fn browser_auth_flow(gateway_endpoint: &str) -> Result { eprint!("Press Enter to open the browser for authentication..."); std::io::stderr().flush().ok(); - let mut _input = String::new(); - std::io::stdin().read_line(&mut _input).ok(); + let mut input = String::new(); + std::io::stdin().read_line(&mut input).ok(); + drop(input); if let Err(e) = open_browser(&auth_url) { debug!(error = %e, "failed to open browser"); diff --git a/crates/openshell-cli/src/bootstrap.rs b/crates/openshell-cli/src/bootstrap.rs index ee9a481aa..d6245a760 100644 --- a/crates/openshell-cli/src/bootstrap.rs +++ b/crates/openshell-cli/src/bootstrap.rs @@ -101,7 +101,7 @@ fn is_connectivity_error(error: &miette::Report) -> bool { /// `false` to skip bootstrap. Otherwise returns `true` — a gateway is created /// automatically without prompting the user. pub fn confirm_bootstrap(override_value: Option) -> Result { - if let Some(false) = override_value { + if override_value == Some(false) { return Ok(false); } Ok(true) diff --git a/crates/openshell-cli/src/completers.rs b/crates/openshell-cli/src/completers.rs index 257ceb9ff..c8b5c82a3 100644 --- a/crates/openshell-cli/src/completers.rs +++ b/crates/openshell-cli/src/completers.rs @@ -188,11 +188,11 @@ mod tests { .unwrap(); let result = complete_gateway_names(OsStr::new("a")); - let names: Vec = result - .iter() - .map(|candidate| candidate.get_value().to_string_lossy().into_owned()) - .collect(); - assert!(names.contains(&"alpha".to_string())); + assert!( + result + .iter() + .any(|candidate| candidate.get_value().to_string_lossy() == "alpha") + ); }); } } diff --git a/crates/openshell-cli/src/main.rs b/crates/openshell-cli/src/main.rs index 385399312..89940ef5b 100644 --- a/crates/openshell-cli/src/main.rs +++ b/crates/openshell-cli/src/main.rs @@ -816,7 +816,7 @@ enum GatewayCommands { /// `nvidia.com/gpu` resources. Requires NVIDIA drivers and the /// NVIDIA Container Toolkit on the host. /// - /// When enabled, OpenShell auto-selects CDI when the Docker daemon has + /// When enabled, `OpenShell` auto-selects CDI when the Docker daemon has /// CDI enabled and falls back to Docker's NVIDIA GPU request path /// (`--gpus all`) otherwise. #[arg(long)] @@ -1163,7 +1163,7 @@ enum SandboxCommands { policy: Option, /// Forward a local port to the sandbox before the initial command or shell starts. - /// Accepts [bind_address:]port (e.g. 8080, 0.0.0.0:8080). Keeps the sandbox alive. + /// Accepts [`bind_address`:]port (e.g. 8080, 0.0.0.0:8080). Keeps the sandbox alive. #[arg(long, conflicts_with = "no_keep")] forward: Option, @@ -1472,11 +1472,11 @@ enum PolicyCommands { #[arg(long = "remove-endpoint")] remove_endpoints: Vec, - /// Add a REST allow rule: host:port:METHOD:path_glob. + /// Add a REST allow rule: `host:port:METHOD:path_glob`. #[arg(long = "add-allow")] add_allow: Vec, - /// Add a REST deny rule: host:port:METHOD:path_glob. + /// Add a REST deny rule: `host:port:METHOD:path_glob`. #[arg(long = "add-deny")] add_deny: Vec, @@ -1556,7 +1556,7 @@ enum PolicyCommands { /// Prove properties of a sandbox policy — or find counterexamples. #[command(help_template = LEAF_HELP_TEMPLATE, next_help_heading = "FLAGS")] Prove { - /// Path to OpenShell sandbox policy YAML. + /// Path to `OpenShell` sandbox policy YAML. #[arg(long, value_hint = ValueHint::FilePath)] policy: String, @@ -1646,7 +1646,7 @@ enum ForwardCommands { /// Start forwarding a local port to a sandbox. #[command(help_template = LEAF_HELP_TEMPLATE, next_help_heading = "FLAGS")] Start { - /// Port to forward: [bind_address:]port (e.g. 8080, 0.0.0.0:8080). + /// Port to forward: [`bind_address`:]port (e.g. 8080, 0.0.0.0:8080). port: String, /// Sandbox name (defaults to last-used sandbox). @@ -1675,6 +1675,7 @@ enum ForwardCommands { } #[tokio::main] +#[allow(clippy::large_stack_frames)] // CLI dispatch holds many futures; OK at top level. async fn main() -> Result<()> { // Install the rustls crypto provider before completion runs — completers may // establish TLS connections to the gateway. @@ -1744,7 +1745,7 @@ async fn main() -> Result<()> { } else { vec![] }; - run::gateway_admin_deploy( + Box::pin(run::gateway_admin_deploy( &name, remote.as_deref(), ssh_key.as_deref(), @@ -1756,7 +1757,7 @@ async fn main() -> Result<()> { registry_username.as_deref(), registry_token.as_deref(), gpu, - ) + )) .await?; } GatewayCommands::Stop { @@ -1891,16 +1892,15 @@ async fn main() -> Result<()> { ForwardCommands::Stop { port, name } => { let name = match name { Some(n) => n, - None => match run::find_forward_by_port(port)? { - Some(n) => { + None => { + if let Some(n) = run::find_forward_by_port(port)? { eprintln!("→ Found forward on sandbox '{n}'"); n - } - None => { + } else { eprintln!("{} No active forward found for port {port}", "!".yellow(),); return Ok(()); } - }, + } }; if run::stop_forward(&name, port)? { eprintln!( @@ -3280,7 +3280,7 @@ mod tests { }); } - /// Verify the flag names the TUI uses to build its ProxyCommand are + /// Verify the flag names the TUI uses to build its `ProxyCommand` are /// accepted by the `SshProxy` subcommand and land in the right fields. /// This catches drift when CLI flags are renamed or restructured. #[test] diff --git a/crates/openshell-cli/src/policy_update.rs b/crates/openshell-cli/src/policy_update.rs index 9f053f73b..f51edfff6 100644 --- a/crates/openshell-cli/src/policy_update.rs +++ b/crates/openshell-cli/src/policy_update.rs @@ -1,7 +1,7 @@ // SPDX-FileCopyrightText: Copyright (c) 2025-2026 NVIDIA CORPORATION & AFFILIATES. All rights reserved. // SPDX-License-Identifier: Apache-2.0 -use std::collections::BTreeMap; +use std::collections::{BTreeMap, HashMap}; use miette::{Result, miette}; use openshell_core::proto::policy_merge_operation; @@ -13,12 +13,12 @@ use openshell_core::proto::{ use openshell_policy::{PolicyMergeOp, generated_rule_name}; #[derive(Debug, Clone)] -pub(crate) struct PolicyUpdatePlan { +pub struct PolicyUpdatePlan { pub merge_operations: Vec, pub preview_operations: Vec, } -pub(crate) fn build_policy_update_plan( +pub fn build_policy_update_plan( add_endpoints: &[String], remove_endpoints: &[String], add_deny: &[String], @@ -51,8 +51,10 @@ pub(crate) fn build_policy_update_plan( let target_rule_name = rule_name .map(str::trim) .filter(|name| !name.is_empty()) - .map(ToString::to_string) - .unwrap_or_else(|| generated_rule_name(&endpoint.host, endpoint.port)); + .map_or_else( + || generated_rule_name(&endpoint.host, endpoint.port), + ToString::to_string, + ); let rule = NetworkPolicyRule { name: target_rule_name.clone(), endpoints: vec![endpoint.clone()], @@ -165,7 +167,7 @@ fn group_allow_rules(specs: &[String]) -> Result Result String { const GB: u64 = 1024 * MB; if bytes >= GB { - format!("{:.1} GB", bytes as f64 / GB as f64) + // GB-scale precision loss is acceptable for a human-readable label. + #[allow(clippy::cast_precision_loss)] + let gb = bytes as f64 / GB as f64; + format!("{gb:.1} GB") } else if bytes >= MB { format!("{} MB", bytes / MB) } else if bytes >= KB { format!("{} KB", bytes / KB) } else { - format!("{} B", bytes) + format!("{bytes} B") } } @@ -623,11 +626,10 @@ impl GatewayDeployLogPanel { } fn update_spinner_message(&self) { - let msg = if let Some(detail) = &self.progress { - format!("{} ({})", self.status, detail.dimmed()) - } else { - self.status.clone() - }; + let msg = self.progress.as_ref().map_or_else( + || self.status.clone(), + |detail| format!("{} ({})", self.status, detail.dimmed()), + ); self.spinner.set_message(msg); } @@ -881,13 +883,11 @@ fn plaintext_gateway_metadata( remote: Option<&str>, local: bool, ) -> GatewayMetadata { - let (remote_host, resolved_host) = if let Some(dest) = remote { + let (remote_host, resolved_host) = remote.map_or((None, None), |dest| { let ssh_host = extract_host_from_ssh_destination(dest); let resolved = resolve_ssh_hostname(&ssh_host); (Some(dest.to_string()), Some(resolved)) - } else { - (None, None) - }; + }); GatewayMetadata { name: name.to_string(), @@ -1084,13 +1084,11 @@ pub async fn gateway_add( openshell_bootstrap::extract_and_store_pki(name, remote_opts.as_ref(), endpoint_port) .await?; - let (remote_host, resolved_host) = if let Some(dest) = remote { + let (remote_host, resolved_host) = remote.map_or((None, None), |dest| { let ssh_host = extract_host_from_ssh_destination(dest); let resolved = resolve_ssh_hostname(&ssh_host); (Some(dest.to_string()), Some(resolved)) - } else { - (None, None) - }; + }); let metadata = GatewayMetadata { name: name.to_string(), @@ -1424,6 +1422,7 @@ fn print_failure_diagnosis(diagnosis: &openshell_bootstrap::errors::GatewayFailu } /// Provision or start a gateway (local or remote). +#[allow(clippy::too_many_arguments)] // user-facing CLI command pub async fn gateway_admin_deploy( name: &str, remote: Option<&str>, @@ -1450,18 +1449,16 @@ pub async fn gateway_admin_deploy( }); // If the gateway is already running and we're not recreating, short-circuit. - if !recreate { - if let Some(existing) = + if !recreate + && let Some(existing) = openshell_bootstrap::check_existing_deployment(name, remote_opts.as_ref()).await? - { - if existing.container_running { - eprintln!( - "{} Gateway '{name}' is already running.", - "✓".green().bold() - ); - return Ok(()); - } - } + && existing.container_running + { + eprintln!( + "{} Gateway '{name}' is already running.", + "✓".green().bold() + ); + return Ok(()); } // When resuming an existing gateway (not recreating), prefer the port @@ -1469,10 +1466,10 @@ pub async fn gateway_admin_deploy( // may have originally bootstrapped on a non-default port (e.g. `--port // 8082`) or with `--gateway-host host.docker.internal`, and a bare // `gateway start` without those flags should honour the original values. - let stored_metadata = if !recreate { - openshell_bootstrap::load_gateway_metadata(name).ok() - } else { + let stored_metadata = if recreate { None + } else { + openshell_bootstrap::load_gateway_metadata(name).ok() }; let effective_port = stored_metadata .as_ref() @@ -1688,6 +1685,7 @@ pub fn gateway_admin_info(name: &str) -> Result<()> { /// /// Connects to the Docker daemon (local or remote via SSH) and retrieves /// logs from the `openshell-cluster-{name}` container. +#[allow(clippy::future_not_send)] // Holds stdout lock; CLI command, never sent across threads. pub async fn doctor_logs( name: &str, lines: Option, @@ -1696,24 +1694,29 @@ pub async fn doctor_logs( ssh_key: Option<&str>, ) -> Result<()> { // Build remote options: explicit --remote flag, or auto-resolve from metadata - let remote_opts = if let Some(dest) = remote { - let mut opts = RemoteOptions::new(dest); - if let Some(key) = ssh_key { - opts = opts.with_ssh_key(key); - } - Some(opts) - } else if let Some(metadata) = get_gateway_metadata(name) - && metadata.is_remote - && let Some(ref host) = metadata.remote_host - { - let mut opts = RemoteOptions::new(host.clone()); - if let Some(key) = ssh_key { - opts = opts.with_ssh_key(key); - } - Some(opts) - } else { - None - }; + let remote_opts = remote.map_or_else( + || { + if let Some(metadata) = get_gateway_metadata(name) + && metadata.is_remote + && let Some(ref host) = metadata.remote_host + { + let mut opts = RemoteOptions::new(host.clone()); + if let Some(key) = ssh_key { + opts = opts.with_ssh_key(key); + } + Some(opts) + } else { + None + } + }, + |dest| { + let mut opts = RemoteOptions::new(dest); + if let Some(key) = ssh_key { + opts = opts.with_ssh_key(key); + } + Some(opts) + }, + ); let stdout = std::io::stdout().lock(); openshell_bootstrap::gateway_container_logs(remote_opts.as_ref(), name, lines, tail, stdout) @@ -1744,15 +1747,18 @@ pub fn doctor_exec( }; // Resolve remote destination: explicit --remote flag, or auto-resolve from metadata - let remote_host = if let Some(dest) = remote { - Some(dest.to_string()) - } else if let Some(metadata) = get_gateway_metadata(name) - && metadata.is_remote - { - metadata.remote_host.clone() - } else { - None - }; + let remote_host = remote.map_or_else( + || { + if let Some(metadata) = get_gateway_metadata(name) + && metadata.is_remote + { + metadata.remote_host + } else { + None + } + }, + |dest| Some(dest.to_string()), + ); let mut cmd = if let Some(ref host) = remote_host { validate_ssh_host(host)?; @@ -1828,6 +1834,7 @@ pub fn doctor_llm() -> Result<()> { /// /// Checks Docker connectivity and reports the result. Returns exit code 0 /// if all checks pass, 1 otherwise. +#[allow(clippy::future_not_send)] // Holds stdout lock; CLI command, never sent across threads. pub async fn doctor_check() -> Result<()> { use std::io::Write; let mut stdout = std::io::stdout().lock(); @@ -1848,7 +1855,7 @@ pub async fn doctor_check() -> Result<()> { match std::env::var("DOCKER_HOST") { Ok(val) => writeln!(stdout, "{val}").into_diagnostic()?, Err(_) => writeln!(stdout, "(not set, using default socket)").into_diagnostic()?, - }; + } writeln!(stdout, "\nAll checks passed.").into_diagnostic()?; Ok(()) @@ -1943,11 +1950,15 @@ pub async fn sandbox_create_with_bootstrap( )); } let requested_gpu = gpu || from.is_some_and(source_requests_gpu); - let (tls, server, gateway_name) = - crate::bootstrap::run_bootstrap(remote, ssh_key, requested_gpu).await?; + let (tls, server, gateway_name) = Box::pin(crate::bootstrap::run_bootstrap( + remote, + ssh_key, + requested_gpu, + )) + .await?; // Disable bootstrap inside sandbox_create so that a transient connection // failure right after deploy does not trigger a second bootstrap attempt. - sandbox_create( + Box::pin(sandbox_create( &server, name, from, @@ -1966,9 +1977,9 @@ pub async fn sandbox_create_with_bootstrap( tty_override, Some(false), auto_providers_override, - &std::collections::HashMap::new(), + &HashMap::new(), &tls, - ) + )) .await } @@ -2003,7 +2014,7 @@ async fn finalize_sandbox_create_session( } /// Create a sandbox with default settings. -#[allow(clippy::too_many_arguments)] +#[allow(clippy::too_many_arguments, clippy::implicit_hasher)] // user-facing CLI command; default hasher is fine pub async fn sandbox_create( server: &str, name: Option<&str>, @@ -2023,7 +2034,7 @@ pub async fn sandbox_create( tty_override: Option, bootstrap_override: Option, auto_providers_override: Option, - labels: &std::collections::HashMap, + labels: &HashMap, tls: &TlsOptions, ) -> Result<()> { if editor.is_some() && !command.is_empty() { @@ -2072,8 +2083,12 @@ pub async fn sandbox_create( return Err(err); } let requested_gpu = gpu || from.is_some_and(source_requests_gpu); - let (new_tls, new_server, _) = - crate::bootstrap::run_bootstrap(remote, ssh_key, requested_gpu).await?; + let (new_tls, new_server, _) = Box::pin(crate::bootstrap::run_bootstrap( + remote, + ssh_key, + requested_gpu, + )) + .await?; let c = grpc_client(&new_server, &new_tls) .await .wrap_err("bootstrap succeeded but failed to connect to gateway")?; @@ -2336,7 +2351,7 @@ pub async fn sandbox_create( let label = if size_label.is_empty() { "Image pulled".to_string() } else { - format!("Image pulled ({})", size_label) + format!("Image pulled ({size_label})") }; if let Some(d) = display.as_mut() { d.complete_step_with_label( @@ -2375,10 +2390,10 @@ pub async fn sandbox_create( eprintln!(" {} {} {}", ts.dimmed(), "WARN".yellow(), w.message); } } - Some(openshell_core::proto::sandbox_stream_event::Payload::DraftPolicyUpdate(_)) => { + Some(openshell_core::proto::sandbox_stream_event::Payload::DraftPolicyUpdate(_)) + | None => { // Draft policy updates are handled in the draft panel, not during provisioning. } - None => {} } } @@ -2625,14 +2640,10 @@ fn resolve_from(value: &str) -> Result { } fn source_requests_gpu(source: &str) -> bool { - if let Ok(resolved) = resolve_from(source) { - match resolved { - ResolvedSource::Image(image) => image_requests_gpu(&image), - ResolvedSource::Dockerfile { .. } => false, - } - } else { - false - } + resolve_from(source).is_ok_and(|resolved| match resolved { + ResolvedSource::Image(image) => image_requests_gpu(&image), + ResolvedSource::Dockerfile { .. } => false, + }) } fn image_requests_gpu(image: &str) -> bool { @@ -2816,14 +2827,14 @@ pub async fn sandbox_get( println!(" {} {}", "Phase:".dimmed(), phase_name(sandbox.phase)); // Display labels if present - if let Some(metadata) = &sandbox.metadata { - if !metadata.labels.is_empty() { - println!(" {} ", "Labels:".dimmed()); - let mut labels: Vec<_> = metadata.labels.iter().collect(); - labels.sort_by_key(|(k, _)| *k); - for (key, value) in labels { - println!(" {}: {}", key, value); - } + if let Some(metadata) = &sandbox.metadata + && !metadata.labels.is_empty() + { + println!(" {} ", "Labels:".dimmed()); + let mut labels: Vec<_> = metadata.labels.iter().collect(); + labels.sort_by_key(|(k, _)| *k); + for (key, value) in labels { + println!(" {key}: {value}"); } } @@ -2903,7 +2914,9 @@ pub async fn sandbox_exec_grpc( // Read stdin if piped (not a TTY), using spawn_blocking to avoid blocking // the async runtime. Cap the read at MAX_STDIN_PAYLOAD + 1 so we never // buffer more than the limit into memory. - let stdin_payload = if !std::io::stdin().is_terminal() { + let stdin_payload = if std::io::stdin().is_terminal() { + Vec::new() + } else { tokio::task::spawn_blocking(|| { let limit = (MAX_STDIN_PAYLOAD + 1) as u64; let mut buf = Vec::new(); @@ -2921,8 +2934,6 @@ pub async fn sandbox_exec_grpc( }) .await .into_diagnostic()?? // first ? unwraps JoinError, second ? unwraps Result - } else { - Vec::new() }; // Resolve TTY mode: explicit --tty / --no-tty wins, otherwise auto-detect. @@ -3067,7 +3078,7 @@ pub async fn sandbox_list( if names_only { for sandbox in sandboxes { - println!("{}", sandbox.object_name().to_string()); + println!("{}", sandbox.object_name()); } return Ok(()); } @@ -3099,13 +3110,7 @@ pub async fn sandbox_list( Ok(SandboxPhase::Deleting) => phase.dimmed().to_string(), _ => phase.to_string(), }; - let created = format_epoch_ms( - sandbox - .metadata - .as_ref() - .map(|m| m.created_at_ms) - .unwrap_or(0), - ); + let created = format_epoch_ms(sandbox.metadata.as_ref().map_or(0, |m| m.created_at_ms)); println!( "{: Result< println!("{}", "Provider:".cyan().bold()); println!(); - println!(" {} {}", "Id:".dimmed(), provider.object_id().to_string()); - println!( - " {} {}", - "Name:".dimmed(), - provider.object_name().to_string() - ); + println!(" {} {}", "Id:".dimmed(), provider.object_id()); + println!(" {} {}", "Name:".dimmed(), provider.object_name()); println!(" {} {}", "Type:".dimmed(), provider.r#type); println!( " {} {}", @@ -3668,7 +3669,7 @@ pub async fn provider_list( if names_only { for provider in providers { - println!("{}", provider.object_name().to_string()); + println!("{}", provider.object_name()); } return Ok(()); } @@ -3764,7 +3765,7 @@ pub async fn provider_update( id: String::new(), name: name.to_string(), created_at_ms: 0, - labels: std::collections::HashMap::new(), + labels: HashMap::new(), }), r#type: String::new(), credentials: credential_map, @@ -3782,7 +3783,7 @@ pub async fn provider_update( println!( "{} Updated provider {}", "✓".green().bold(), - provider.object_name().to_string() + provider.object_name() ); Ok(()) } @@ -4374,9 +4375,9 @@ pub async fn sandbox_settings_get( "sandbox" }; - println!("Sandbox: {}", name); + println!("Sandbox: {name}"); println!("Config Rev: {}", response.config_revision); - println!("Policy Source: {}", policy_source); + println!("Policy Source: {policy_source}"); println!("Policy Hash: {}", response.policy_hash); if response.settings.is_empty() { @@ -5121,6 +5122,7 @@ fn print_policy_revision_table(revisions: &[openshell_core::proto::SandboxPolicy // Sandbox logs command // --------------------------------------------------------------------------- +#[allow(clippy::too_many_arguments)] // user-facing CLI command pub async fn sandbox_logs( server: &str, name: &str, diff --git a/crates/openshell-cli/src/ssh.rs b/crates/openshell-cli/src/ssh.rs index e6273413c..19dd78389 100644 --- a/crates/openshell-cli/src/ssh.rs +++ b/crates/openshell-cli/src/ssh.rs @@ -44,7 +44,7 @@ impl Editor { } } - fn remote_target(self, host_alias: &str) -> String { + fn remote_target(host_alias: &str) -> String { format!("ssh-remote+{host_alias}") } @@ -357,12 +357,13 @@ pub async fn sandbox_forward( command.status().await.into_diagnostic()? } else { let mut child = command.spawn().into_diagnostic()?; - match tokio::time::timeout(FOREGROUND_FORWARD_STARTUP_GRACE_PERIOD, child.wait()).await { - Ok(status) => status.into_diagnostic()?, - Err(_) => { - eprintln!("{}", foreground_forward_started_message(name, spec)); - child.wait().await.into_diagnostic()? - } + if let Ok(status) = + tokio::time::timeout(FOREGROUND_FORWARD_STARTUP_GRACE_PERIOD, child.wait()).await + { + status.into_diagnostic()? + } else { + eprintln!("{}", foreground_forward_started_message(name, spec)); + child.wait().await.into_diagnostic()? } }; @@ -556,10 +557,7 @@ async fn ssh_tar_upload( // When no explicit destination is given, use the unescaped `$HOME` shell // variable so the remote shell resolves it at runtime. - let escaped_dest = match dest_dir { - Some(d) => shell_escape(d), - None => "$HOME".to_string(), - }; + let escaped_dest = dest_dir.map_or_else(|| "$HOME".to_string(), shell_escape); let mut ssh = ssh_base_command(&session.proxy_command); ssh.arg("-T") @@ -598,7 +596,7 @@ async fn ssh_tar_upload( Ok(()) } -/// Split a sandbox path into (parent_directory, basename). +/// Split a sandbox path into (`parent_directory`, basename). /// /// Examples: /// `"/sandbox/.bashrc"` -> `("/sandbox", ".bashrc")` @@ -666,22 +664,23 @@ pub async fn sandbox_sync_up( // passed "/sandbox"), fall through to directory semantics instead. The // sandbox user cannot write to "/" and the intent is almost certainly // "put the file inside /sandbox", not "create a file named sandbox in /". - if let Some(path) = sandbox_path { - if local_path.is_file() && !path.ends_with('/') { - let (parent, target_name) = split_sandbox_path(path); - if parent != "/" { - return ssh_tar_upload( - server, - name, - Some(parent), - UploadSource::SinglePath { - local_path: local_path.to_path_buf(), - tar_name: target_name.into(), - }, - tls, - ) - .await; - } + if let Some(path) = sandbox_path + && local_path.is_file() + && !path.ends_with('/') + { + let (parent, target_name) = split_sandbox_path(path); + if parent != "/" { + return ssh_tar_upload( + server, + name, + Some(parent), + UploadSource::SinglePath { + local_path: local_path.to_path_buf(), + tar_name: target_name.into(), + }, + tls, + ) + .await; } } @@ -720,8 +719,7 @@ pub async fn sandbox_sync_up( fn directory_upload_prefix(local_path: &Path) -> std::ffi::OsString { local_path .file_name() - .map(|n| n.to_os_string()) - .unwrap_or_else(|| ".".into()) + .map_or_else(|| ".".into(), std::ffi::OsStr::to_os_string) } fn file_list_archive_prefix(local_path: &Path) -> Option { @@ -824,6 +822,13 @@ pub async fn sandbox_ssh_proxy( token: &str, tls: &TlsOptions, ) -> Result<()> { + // The gateway returns 412 (Precondition Failed) when the sandbox pod + // exists but hasn't reached Ready phase yet. This is a transient state + // after sandbox allocation — retry with backoff instead of failing + // immediately. + const MAX_CONNECT_WAIT: Duration = Duration::from_secs(60); + const INITIAL_BACKOFF: Duration = Duration::from_secs(1); + let url: url::Url = gateway_url .parse() .into_diagnostic() @@ -842,13 +847,6 @@ pub async fn sandbox_ssh_proxy( "CONNECT {connect_path} HTTP/1.1\r\nHost: {gateway_host}\r\nX-Sandbox-Id: {sandbox_id}\r\nX-Sandbox-Token: {token}\r\n\r\n" ); - // The gateway returns 412 (Precondition Failed) when the sandbox pod - // exists but hasn't reached Ready phase yet. This is a transient state - // after sandbox allocation — retry with backoff instead of failing - // immediately. - const MAX_CONNECT_WAIT: Duration = Duration::from_secs(60); - const INITIAL_BACKOFF: Duration = Duration::from_secs(1); - let start = std::time::Instant::now(); let mut backoff = INITIAL_BACKOFF; let mut buf_stream; @@ -1038,8 +1036,7 @@ fn upsert_host_block(contents: &str, alias: &str, block: &str) -> String { .enumerate() .skip(start + 1) .find(|(_, line)| line.trim_start().starts_with("Host ")) - .map(|(idx, _)| idx) - .unwrap_or(lines.len()); + .map_or(lines.len(), |(idx, _)| idx); out.extend_from_slice(&lines[..start]); if !out.is_empty() && !out.last().is_some_and(|line| line.is_empty()) { @@ -1088,7 +1085,7 @@ fn launch_editor(editor: Editor, host_alias: &str) -> Result<()> { launch_editor_command( editor.binary(), editor.label(), - &editor.remote_target(host_alias), + &Editor::remote_target(host_alias), ) } @@ -1244,6 +1241,7 @@ mod tests { } #[test] + #[allow(unsafe_code)] // Test-only: env vars require unsafe in Rust 2024. fn install_ssh_config_adds_include_once_and_updates_managed_file() { let _guard = TEST_ENV_LOCK .lock() diff --git a/crates/openshell-cli/tests/ensure_providers_integration.rs b/crates/openshell-cli/tests/ensure_providers_integration.rs index 34485377d..a5a485735 100644 --- a/crates/openshell-cli/tests/ensure_providers_integration.rs +++ b/crates/openshell-cli/tests/ensure_providers_integration.rs @@ -210,10 +210,10 @@ impl OpenShell for TestOpenShell { if providers.contains_key(&provider_name) { return Err(Status::already_exists("provider already exists")); } - if provider.object_id().is_empty() { - if let Some(metadata) = &mut provider.metadata { - metadata.id = format!("id-{provider_name}"); - } + if provider.object_id().is_empty() + && let Some(metadata) = &mut provider.metadata + { + metadata.id = format!("id-{provider_name}"); } providers.insert(provider_name, provider.clone()); Ok(Response::new(ProviderResponse { @@ -266,9 +266,9 @@ impl OpenShell for TestOpenShell { .cloned() .ok_or_else(|| Status::not_found("provider not found"))?; // Merge semantics: empty map = no change, empty value = delete key. - let merge = |mut base: std::collections::HashMap, - incoming: std::collections::HashMap| - -> std::collections::HashMap { + let merge = |mut base: HashMap, + incoming: HashMap| + -> HashMap { if incoming.is_empty() { return base; } @@ -286,7 +286,7 @@ impl OpenShell for TestOpenShell { let updated = Provider { metadata: Some(openshell_core::proto::datamodel::v1::ObjectMeta { id: existing_metadata.id, - name: provider_metadata.name.clone(), + name: provider_metadata.name, created_at_ms: existing_metadata.created_at_ms, labels: existing_metadata.labels, }), @@ -449,15 +449,14 @@ impl OpenShell for TestOpenShell { Err(Status::unimplemented("not implemented in test")) } - type RelayStreamStream = tokio_stream::wrappers::ReceiverStream< - Result, - >; + type RelayStreamStream = + tokio_stream::wrappers::ReceiverStream>; async fn relay_stream( &self, _request: tonic::Request>, - ) -> Result, tonic::Status> { - Err(tonic::Status::unimplemented("not implemented in test")) + ) -> Result, Status> { + Err(Status::unimplemented("not implemented in test")) } } diff --git a/crates/openshell-cli/tests/mtls_integration.rs b/crates/openshell-cli/tests/mtls_integration.rs index e78c91578..77d33f7b0 100644 --- a/crates/openshell-cli/tests/mtls_integration.rs +++ b/crates/openshell-cli/tests/mtls_integration.rs @@ -336,15 +336,14 @@ impl OpenShell for TestOpenShell { Err(Status::unimplemented("not implemented in test")) } - type RelayStreamStream = tokio_stream::wrappers::ReceiverStream< - Result, - >; + type RelayStreamStream = + tokio_stream::wrappers::ReceiverStream>; async fn relay_stream( &self, _request: tonic::Request>, - ) -> Result, tonic::Status> { - Err(tonic::Status::unimplemented("not implemented in test")) + ) -> Result, Status> { + Err(Status::unimplemented("not implemented in test")) } } diff --git a/crates/openshell-cli/tests/provider_commands_integration.rs b/crates/openshell-cli/tests/provider_commands_integration.rs index 9bda696c1..d151e5a1c 100644 --- a/crates/openshell-cli/tests/provider_commands_integration.rs +++ b/crates/openshell-cli/tests/provider_commands_integration.rs @@ -160,10 +160,10 @@ impl OpenShell for TestOpenShell { if providers.contains_key(&provider_name) { return Err(Status::already_exists("provider already exists")); } - if provider.object_id().is_empty() { - if let Some(metadata) = &mut provider.metadata { - metadata.id = format!("id-{provider_name}"); - } + if provider.object_id().is_empty() + && let Some(metadata) = &mut provider.metadata + { + metadata.id = format!("id-{provider_name}"); } providers.insert(provider_name, provider.clone()); Ok(Response::new(ProviderResponse { @@ -216,9 +216,9 @@ impl OpenShell for TestOpenShell { .cloned() .ok_or_else(|| Status::not_found("provider not found"))?; // Merge semantics: empty map = no change, empty value = delete key. - let merge = |mut base: std::collections::HashMap, - incoming: std::collections::HashMap| - -> std::collections::HashMap { + let merge = |mut base: HashMap, + incoming: HashMap| + -> HashMap { if incoming.is_empty() { return base; } @@ -236,7 +236,7 @@ impl OpenShell for TestOpenShell { let updated = Provider { metadata: Some(openshell_core::proto::datamodel::v1::ObjectMeta { id: existing_metadata.id, - name: provider_metadata.name.clone(), + name: provider_metadata.name, created_at_ms: existing_metadata.created_at_ms, labels: existing_metadata.labels, }), @@ -399,15 +399,14 @@ impl OpenShell for TestOpenShell { Err(Status::unimplemented("not implemented in test")) } - type RelayStreamStream = tokio_stream::wrappers::ReceiverStream< - Result, - >; + type RelayStreamStream = + tokio_stream::wrappers::ReceiverStream>; async fn relay_stream( &self, _request: tonic::Request>, - ) -> Result, tonic::Status> { - Err(tonic::Status::unimplemented("not implemented in test")) + ) -> Result, Status> { + Err(Status::unimplemented("not implemented in test")) } } diff --git a/crates/openshell-cli/tests/sandbox_create_lifecycle_integration.rs b/crates/openshell-cli/tests/sandbox_create_lifecycle_integration.rs index 6b78dab9a..e69d06f4f 100644 --- a/crates/openshell-cli/tests/sandbox_create_lifecycle_integration.rs +++ b/crates/openshell-cli/tests/sandbox_create_lifecycle_integration.rs @@ -423,15 +423,14 @@ impl OpenShell for TestOpenShell { Err(Status::unimplemented("not implemented in test")) } - type RelayStreamStream = tokio_stream::wrappers::ReceiverStream< - Result, - >; + type RelayStreamStream = + tokio_stream::wrappers::ReceiverStream>; async fn relay_stream( &self, _request: tonic::Request>, - ) -> Result, tonic::Status> { - Err(tonic::Status::unimplemented("not implemented in test")) + ) -> Result, Status> { + Err(Status::unimplemented("not implemented in test")) } } diff --git a/crates/openshell-cli/tests/sandbox_name_fallback_integration.rs b/crates/openshell-cli/tests/sandbox_name_fallback_integration.rs index 5c463de9e..7d6a9536a 100644 --- a/crates/openshell-cli/tests/sandbox_name_fallback_integration.rs +++ b/crates/openshell-cli/tests/sandbox_name_fallback_integration.rs @@ -360,15 +360,14 @@ impl OpenShell for TestOpenShell { Err(Status::unimplemented("not implemented in test")) } - type RelayStreamStream = tokio_stream::wrappers::ReceiverStream< - Result, - >; + type RelayStreamStream = + tokio_stream::wrappers::ReceiverStream>; async fn relay_stream( &self, _request: tonic::Request>, - ) -> Result, tonic::Status> { - Err(tonic::Status::unimplemented("not implemented in test")) + ) -> Result, Status> { + Err(Status::unimplemented("not implemented in test")) } } diff --git a/crates/openshell-core/src/config.rs b/crates/openshell-core/src/config.rs index 40a87fc41..f5d844fb8 100644 --- a/crates/openshell-core/src/config.rs +++ b/crates/openshell-core/src/config.rs @@ -1,7 +1,7 @@ // SPDX-FileCopyrightText: Copyright (c) 2025-2026 NVIDIA CORPORATION & AFFILIATES. All rights reserved. // SPDX-License-Identifier: Apache-2.0 -//! Configuration management for OpenShell components. +//! Configuration management for `OpenShell` components. use serde::{Deserialize, Serialize}; use std::fmt; @@ -138,7 +138,7 @@ pub struct Config { #[serde(default)] pub sandbox_image_pull_policy: String, - /// gRPC endpoint for sandboxes to connect back to OpenShell. + /// gRPC endpoint for sandboxes to connect back to `OpenShell`. /// Used by sandbox pods to fetch their policy at startup. #[serde(default)] pub grpc_endpoint: String, diff --git a/crates/openshell-core/src/error.rs b/crates/openshell-core/src/error.rs index 9d1650135..7c33c9eaf 100644 --- a/crates/openshell-core/src/error.rs +++ b/crates/openshell-core/src/error.rs @@ -1,15 +1,15 @@ // SPDX-FileCopyrightText: Copyright (c) 2025-2026 NVIDIA CORPORATION & AFFILIATES. All rights reserved. // SPDX-License-Identifier: Apache-2.0 -//! Common error types for OpenShell. +//! Common error types for `OpenShell`. use miette::Diagnostic; use thiserror::Error; -/// Result type alias using OpenShell's error type. +/// Result type alias using `OpenShell`'s error type. pub type Result = std::result::Result; -/// OpenShell error type. +/// `OpenShell` error type. #[derive(Debug, Error, Diagnostic)] pub enum Error { /// Configuration error. diff --git a/crates/openshell-core/src/forward.rs b/crates/openshell-core/src/forward.rs index e6c7d2f7c..27f3275c0 100644 --- a/crates/openshell-core/src/forward.rs +++ b/crates/openshell-core/src/forward.rs @@ -135,18 +135,17 @@ pub fn pid_matches_forward(pid: u32, port: u16, sandbox_id: Option<&str>) -> boo /// match is expected. pub fn find_forward_by_port(port: u16) -> Result> { let dir = forward_pid_dir()?; - let entries = match std::fs::read_dir(&dir) { - Ok(e) => e, - Err(_) => return Ok(None), + let Ok(entries) = std::fs::read_dir(&dir) else { + return Ok(None); }; let suffix = format!("-{port}.pid"); for entry in entries.flatten() { let file_name = entry.file_name(); let file_name = file_name.to_string_lossy(); - if let Some(name) = file_name.strip_suffix(&suffix) { - if !name.is_empty() { - return Ok(Some(name.to_string())); - } + if let Some(name) = file_name.strip_suffix(&suffix) + && !name.is_empty() + { + return Ok(Some(name.to_string())); } } Ok(None) @@ -1049,9 +1048,8 @@ mod tests { // `python3 -m http.server` which listens on [::] by default. The // IPv4-only TcpListener::bind("127.0.0.1", port) might succeed, but // lsof should detect the listener and the check should still fail. - let listener = match TcpListener::bind("[::]:0") { - Ok(l) => l, - Err(_) => return, // IPv6 not available, skip + let Ok(listener) = TcpListener::bind("[::]:0") else { + return; // IPv6 not available, skip }; let port = listener.local_addr().unwrap().port(); diff --git a/crates/openshell-core/src/inference.rs b/crates/openshell-core/src/inference.rs index d2581f7eb..0360cae5c 100644 --- a/crates/openshell-core/src/inference.rs +++ b/crates/openshell-core/src/inference.rs @@ -124,8 +124,9 @@ pub fn auth_for_provider_type(provider_type: &str) -> (AuthHeader, Vec<(String, pub fn route_headers_for_provider_type( provider_type: &str, ) -> (AuthHeader, Vec<(String, String)>, Vec) { - match profile_for(provider_type) { - Some(profile) => { + profile_for(provider_type).map_or_else( + || (AuthHeader::Bearer, Vec::new(), Vec::new()), + |profile| { let headers = profile .default_headers .iter() @@ -137,9 +138,8 @@ pub fn route_headers_for_provider_type( .map(|name| (*name).to_string()) .collect(); (profile.auth.clone(), headers, passthrough_headers) - } - None => (AuthHeader::Bearer, Vec::new(), Vec::new()), - } + }, + ) } // --------------------------------------------------------------------------- diff --git a/crates/openshell-core/src/lib.rs b/crates/openshell-core/src/lib.rs index dff6d82ec..8cdcd5bab 100644 --- a/crates/openshell-core/src/lib.rs +++ b/crates/openshell-core/src/lib.rs @@ -1,7 +1,7 @@ // SPDX-FileCopyrightText: Copyright (c) 2025-2026 NVIDIA CORPORATION & AFFILIATES. All rights reserved. // SPDX-License-Identifier: Apache-2.0 -//! OpenShell Core - shared library for OpenShell components. +//! `OpenShell` Core - shared library for `OpenShell` components. //! //! This crate provides: //! - Protocol buffer definitions and generated code diff --git a/crates/openshell-core/src/metadata.rs b/crates/openshell-core/src/metadata.rs index 30f0999fb..90566dcfd 100644 --- a/crates/openshell-core/src/metadata.rs +++ b/crates/openshell-core/src/metadata.rs @@ -3,7 +3,7 @@ //! Object metadata accessors for Kubernetes-style resources. //! -//! These traits provide uniform access to ObjectMeta fields across all resource types. +//! These traits provide uniform access to `ObjectMeta` fields across all resource types. use crate::proto::{InferenceRoute, ObjectForTest, Provider, Sandbox, SshSession}; use std::collections::HashMap; @@ -26,16 +26,13 @@ pub trait ObjectLabels { // Implementations for Sandbox impl ObjectId for Sandbox { fn object_id(&self) -> &str { - self.metadata.as_ref().map(|m| m.id.as_str()).unwrap_or("") + self.metadata.as_ref().map_or("", |m| m.id.as_str()) } } impl ObjectName for Sandbox { fn object_name(&self) -> &str { - self.metadata - .as_ref() - .map(|m| m.name.as_str()) - .unwrap_or("") + self.metadata.as_ref().map_or("", |m| m.name.as_str()) } } @@ -48,16 +45,13 @@ impl ObjectLabels for Sandbox { // Implementations for Provider impl ObjectId for Provider { fn object_id(&self) -> &str { - self.metadata.as_ref().map(|m| m.id.as_str()).unwrap_or("") + self.metadata.as_ref().map_or("", |m| m.id.as_str()) } } impl ObjectName for Provider { fn object_name(&self) -> &str { - self.metadata - .as_ref() - .map(|m| m.name.as_str()) - .unwrap_or("") + self.metadata.as_ref().map_or("", |m| m.name.as_str()) } } @@ -70,16 +64,13 @@ impl ObjectLabels for Provider { // Implementations for SshSession impl ObjectId for SshSession { fn object_id(&self) -> &str { - self.metadata.as_ref().map(|m| m.id.as_str()).unwrap_or("") + self.metadata.as_ref().map_or("", |m| m.id.as_str()) } } impl ObjectName for SshSession { fn object_name(&self) -> &str { - self.metadata - .as_ref() - .map(|m| m.name.as_str()) - .unwrap_or("") + self.metadata.as_ref().map_or("", |m| m.name.as_str()) } } @@ -92,16 +83,13 @@ impl ObjectLabels for SshSession { // Implementations for InferenceRoute impl ObjectId for InferenceRoute { fn object_id(&self) -> &str { - self.metadata.as_ref().map(|m| m.id.as_str()).unwrap_or("") + self.metadata.as_ref().map_or("", |m| m.id.as_str()) } } impl ObjectName for InferenceRoute { fn object_name(&self) -> &str { - self.metadata - .as_ref() - .map(|m| m.name.as_str()) - .unwrap_or("") + self.metadata.as_ref().map_or("", |m| m.name.as_str()) } } diff --git a/crates/openshell-core/src/net.rs b/crates/openshell-core/src/net.rs index a73cb38dc..5dca4feb6 100644 --- a/crates/openshell-core/src/net.rs +++ b/crates/openshell-core/src/net.rs @@ -1,7 +1,7 @@ // SPDX-FileCopyrightText: Copyright (c) 2025-2026 NVIDIA CORPORATION & AFFILIATES. All rights reserved. // SPDX-License-Identifier: Apache-2.0 -//! Network IP classification utilities shared across OpenShell crates. +//! Network IP classification utilities shared across `OpenShell` crates. //! //! These helpers enforce the always-blocked IP invariant (loopback, link-local, //! unspecified) and the broader internal-IP classification (adds RFC 1918 and @@ -133,7 +133,7 @@ pub fn is_always_blocked_net(net: ipnet::IpNet) -> bool { /// when `allowed_ips` should be populated in proposals. pub fn is_internal_ip(ip: IpAddr) -> bool { match ip { - IpAddr::V4(v4) => is_internal_v4(&v4), + IpAddr::V4(v4) => is_internal_v4(v4), IpAddr::V6(v6) => { if v6.is_loopback() || v6.is_unspecified() { return true; @@ -148,7 +148,7 @@ pub fn is_internal_ip(ip: IpAddr) -> bool { } // Check IPv4-mapped IPv6 (::ffff:x.x.x.x) if let Some(v4) = v6.to_ipv4_mapped() { - return is_internal_v4(&v4); + return is_internal_v4(v4); } false } @@ -157,7 +157,7 @@ pub fn is_internal_ip(ip: IpAddr) -> bool { /// IPv4 internal address check covering RFC 1918, CGNAT (RFC 6598), and other /// special-use ranges that should never be reachable from sandbox egress. -fn is_internal_v4(v4: &Ipv4Addr) -> bool { +fn is_internal_v4(v4: Ipv4Addr) -> bool { if v4.is_loopback() || v4.is_private() || v4.is_link_local() || v4.is_unspecified() { return true; } diff --git a/crates/openshell-driver-kubernetes/src/driver.rs b/crates/openshell-driver-kubernetes/src/driver.rs index a3f90457c..360a94152 100644 --- a/crates/openshell-driver-kubernetes/src/driver.rs +++ b/crates/openshell-driver-kubernetes/src/driver.rs @@ -411,6 +411,8 @@ impl KubernetesComputeDriver { } } + // Kept `async` to match the gRPC handler signature in `grpc.rs`, which awaits this method. + #[allow(clippy::unused_async)] pub async fn watch_sandboxes(&self) -> Result { let namespace = self.config.namespace.clone(); let sandbox_api = self.watch_api(); @@ -580,7 +582,6 @@ fn sandbox_from_object(namespace: &str, obj: DynamicObject) -> Result Option, sandbox_id: &str, @@ -1503,7 +1504,7 @@ mod tests { .expect("command should be set"); assert_eq!( command[0].as_str().unwrap(), - format!("{}/openshell-sandbox", SUPERVISOR_MOUNT_PATH) + format!("{SUPERVISOR_MOUNT_PATH}/openshell-sandbox") ); // Volume mount should be read-only @@ -1597,13 +1598,12 @@ mod tests { fn gpu_sandbox_uses_template_runtime_class_name_when_set() { let template = SandboxTemplate { platform_config: Some(Struct { - fields: [( + fields: std::iter::once(( "runtime_class_name".to_string(), Value { kind: Some(Kind::StringValue("kata-containers".to_string())), }, - )] - .into_iter() + )) .collect(), }), ..SandboxTemplate::default() @@ -1636,13 +1636,12 @@ mod tests { fn non_gpu_sandbox_uses_template_runtime_class_name_when_set() { let template = SandboxTemplate { platform_config: Some(Struct { - fields: [( + fields: std::iter::once(( "runtime_class_name".to_string(), Value { kind: Some(Kind::StringValue("kata-containers".to_string())), }, - )] - .into_iter() + )) .collect(), }), ..SandboxTemplate::default() @@ -1851,9 +1850,7 @@ mod tests { // Verify we did not inject one (which would cause a duplicate). let has_pvc_vol = pod_template["spec"]["volumes"] .as_array() - .map_or(false, |vols| { - vols.iter().any(|v| v["name"] == WORKSPACE_VOLUME_NAME) - }); + .is_some_and(|vols| vols.iter().any(|v| v["name"] == WORKSPACE_VOLUME_NAME)); assert!( !has_pvc_vol, "apply_workspace_persistence must NOT add a PVC volume (the CRD controller does that)" @@ -1933,16 +1930,14 @@ mod tests { pod_template["spec"]["initContainers"].is_null() || pod_template["spec"]["initContainers"] .as_array() - .is_none_or(|a| a.is_empty()), + .is_none_or(Vec::is_empty), "workspace init container must NOT be present when inject_workspace is false" ); // No workspace volume mount on agent let has_workspace_mount = pod_template["spec"]["containers"][0]["volumeMounts"] .as_array() - .map_or(false, |mounts| { - mounts.iter().any(|m| m["name"] == WORKSPACE_VOLUME_NAME) - }); + .is_some_and(|mounts| mounts.iter().any(|m| m["name"] == WORKSPACE_VOLUME_NAME)); assert!( !has_workspace_mount, "workspace mount must NOT be present when inject_workspace is false" diff --git a/crates/openshell-driver-podman/src/client.rs b/crates/openshell-driver-podman/src/client.rs index 1c590db2c..12ea0901f 100644 --- a/crates/openshell-driver-podman/src/client.rs +++ b/crates/openshell-driver-podman/src/client.rs @@ -54,7 +54,7 @@ const MAX_NAME_LEN: usize = 255; /// Valid names start with an alphanumeric character and contain only /// alphanumerics, dots, underscores, and hyphens — matching Podman's /// own naming rules. Names longer than [`MAX_NAME_LEN`] are rejected. -pub(crate) fn validate_name(name: &str) -> Result<(), PodmanApiError> { +pub fn validate_name(name: &str) -> Result<(), PodmanApiError> { // Regex-equivalent: ^[a-zA-Z0-9][a-zA-Z0-9._-]*$ if name.is_empty() { return Err(PodmanApiError::InvalidInput( @@ -92,6 +92,7 @@ pub struct ContainerInspect { pub name: String, pub state: ContainerState, #[serde(default)] + #[allow(dead_code)] // kept for podman API compat pub network_settings: NetworkSettings, #[serde(default)] pub config: ContainerConfig, @@ -101,6 +102,7 @@ pub struct ContainerInspect { #[serde(rename_all = "PascalCase")] pub struct ContainerState { pub status: String, + #[allow(dead_code)] // kept for podman API compat pub running: bool, #[serde(default)] pub exit_code: i64, @@ -125,8 +127,10 @@ pub struct HealthState { #[serde(rename_all = "PascalCase")] pub struct NetworkSettings { #[serde(default)] + #[allow(dead_code)] // kept for podman API compat pub networks: HashMap, #[serde(default)] + #[allow(dead_code)] // kept for podman API compat pub ports: HashMap>>, } @@ -135,6 +139,7 @@ pub struct NetworkSettings { pub struct NetworkInfo { #[serde(rename = "IPAddress")] #[serde(default)] + #[allow(dead_code)] // kept for podman API compat pub ip_address: String, } @@ -142,9 +147,11 @@ pub struct NetworkInfo { #[serde(rename_all = "PascalCase")] pub struct PortBinding { #[serde(default)] + #[allow(dead_code)] // kept for podman API compat pub host_port: String, #[serde(rename = "HostIp")] #[serde(default)] + #[allow(dead_code)] // kept for podman API compat pub host_ip: String, } @@ -166,19 +173,26 @@ pub struct ContainerListEntry { #[serde(default)] pub labels: HashMap, #[serde(default)] + #[allow(dead_code)] // kept for podman API compat pub ports: Option>, #[serde(default)] + #[allow(dead_code)] // kept for podman API compat pub networks: Option>, #[serde(default)] + #[allow(dead_code)] // kept for podman API compat pub exit_code: i64, } #[derive(Debug, Clone, serde::Deserialize)] pub struct PortMappingEntry { + #[allow(dead_code)] // kept for podman API compat pub host_port: u16, + #[allow(dead_code)] // kept for podman API compat pub container_port: u16, + #[allow(dead_code)] // kept for podman API compat pub protocol: String, #[serde(default)] + #[allow(dead_code)] // kept for podman API compat pub host_ip: String, } @@ -187,11 +201,13 @@ pub struct PortMappingEntry { #[serde(rename_all = "PascalCase")] pub struct PodmanEvent { #[serde(rename = "Type")] + #[allow(dead_code)] // kept for podman API compat pub event_type: String, pub action: String, #[serde(default)] pub actor: EventActor, #[serde(rename = "timeNano", default)] + #[allow(dead_code)] // kept for podman API compat pub time_nano: i64, } @@ -407,7 +423,7 @@ impl PodmanClient { timeout_secs: u32, ) -> Result<(), PodmanApiError> { validate_name(name)?; - let http_timeout = Duration::from_secs(timeout_secs as u64 + 5); + let http_timeout = Duration::from_secs(u64::from(timeout_secs) + 5); let (status, bytes) = self .request( hyper::Method::POST, @@ -614,17 +630,15 @@ impl PodmanClient { } // The response is NDJSON. Check the last line for an error field. let body = String::from_utf8_lossy(&bytes); - if let Some(last_line) = body.lines().filter(|l| !l.is_empty()).last() { - if let Ok(obj) = serde_json::from_str::(last_line) { - if let Some(err) = obj.get("error").and_then(|v| v.as_str()) { - if !err.is_empty() { - return Err(PodmanApiError::Api { - status: 500, - message: format!("image pull failed: {err}"), - }); - } - } - } + if let Some(last_line) = body.lines().rfind(|l| !l.is_empty()) + && let Ok(obj) = serde_json::from_str::(last_line) + && let Some(err) = obj.get("error").and_then(|v| v.as_str()) + && !err.is_empty() + { + return Err(PodmanApiError::Api { + status: 500, + message: format!("image pull failed: {err}"), + }); } Ok(()) } @@ -730,7 +744,7 @@ impl PodmanClient { // Parse complete newline-delimited JSON lines. while let Some(pos) = buffer.iter().position(|&b| b == b'\n') { let line: Vec = buffer.drain(..=pos).collect(); - let trimmed = line.strip_suffix(&[b'\n']).unwrap_or(&line); + let trimmed = line.strip_suffix(b"\n").unwrap_or(&line); if trimmed.is_empty() { continue; } diff --git a/crates/openshell-driver-podman/src/container.rs b/crates/openshell-driver-podman/src/container.rs index d992881fe..cc7bbc519 100644 --- a/crates/openshell-driver-podman/src/container.rs +++ b/crates/openshell-driver-podman/src/container.rs @@ -48,7 +48,7 @@ pub fn secret_name(sandbox_id: &str) -> String { /// Truncate a container ID to 12 characters (standard short form). #[must_use] -pub(crate) fn short_id(id: &str) -> String { +pub fn short_id(id: &str) -> String { id.chars().take(12).collect() } @@ -80,7 +80,7 @@ struct ContainerSpec { healthconfig: HealthConfig, resource_limits: ResourceLimits, /// Env-type secrets: map of `ENV_VAR_NAME → secret_name`. - /// Podman's libpod SpecGenerator uses `secret_env` (a flat map) for + /// Podman's libpod `SpecGenerator` uses `secret_env` (a flat map) for /// environment-variable injection, distinct from `secrets` which only /// handles file-mounted secrets under `/run/secrets/`. secret_env: BTreeMap, @@ -90,17 +90,21 @@ struct ContainerSpec { /// the gateway server running on the host in rootless mode. hostadd: Vec, netns: NetNS, + // Matches libpod's network spec format, which is `{name: {opts}}` where + // empty opts is a unit struct rather than `()`. Keep as a map so JSON + // serialization matches the API exactly. + #[allow(clippy::zero_sized_map_values)] networks: BTreeMap, #[serde(skip_serializing_if = "Option::is_none")] devices: Option>, - /// Extra mounts for the libpod SpecGenerator (e.g. tmpfs entries). + /// Extra mounts for the libpod `SpecGenerator` (e.g. tmpfs entries). mounts: Vec, - /// Port mappings from host to container. Using host_port=0 requests an + /// Port mappings from host to container. Using `host_port=0` requests an /// ephemeral port, readable back from the inspect response. portmappings: Vec, } -/// A port mapping entry for the libpod SpecGenerator. +/// A port mapping entry for the libpod `SpecGenerator`. #[derive(Serialize)] struct PortMapping { host_port: u16, @@ -112,12 +116,12 @@ struct PortMapping { /// /// Unlike `volumes` (named Podman volumes) or `image_volumes` (OCI image /// mounts resolved at the libpod layer), these mounts are passed to the -/// libpod SpecGenerator and support arbitrary mount types (e.g. tmpfs). +/// libpod `SpecGenerator` and support arbitrary mount types (e.g. tmpfs). /// Field names must be lowercase to match the libpod JSON schema. #[derive(Serialize)] struct Mount { #[serde(rename = "type")] - mount_type: String, + kind: String, source: String, destination: String, #[serde(skip_serializing_if = "Vec::is_empty")] @@ -331,6 +335,9 @@ pub fn build_container_spec(sandbox: &DriverSandbox, config: &PodmanComputeConfi let devices = build_devices(sandbox); // Network configuration -- always bridge mode. + // Matches libpod's network spec format `{name: {opts}}`; the unit-struct + // value mirrors empty opts in the JSON. + #[allow(clippy::zero_sized_map_values)] let mut networks = BTreeMap::new(); networks.insert(config.network_name.clone(), NetworkAttachment {}); @@ -456,7 +463,7 @@ pub fn build_container_spec(sandbox: &DriverSandbox, config: &PodmanComputeConfi // fails with EPERM. A private tmpfs gives the supervisor its own writable // /run/netns without needing host filesystem access. mounts: vec![Mount { - mount_type: "tmpfs".into(), + kind: "tmpfs".into(), source: "tmpfs".into(), destination: "/run/netns".into(), options: vec!["rw".into(), "nosuid".into(), "nodev".into()], diff --git a/crates/openshell-driver-podman/src/driver.rs b/crates/openshell-driver-podman/src/driver.rs index dff95a532..ae9492d74 100644 --- a/crates/openshell-driver-podman/src/driver.rs +++ b/crates/openshell-driver-podman/src/driver.rs @@ -154,8 +154,8 @@ impl PodmanComputeDriver { } /// Report driver capabilities. - pub async fn capabilities(&self) -> Result { - let supports_gpu = self.has_gpu_capacity(); + pub fn capabilities(&self) -> Result { + let supports_gpu = Self::has_gpu_capacity(); Ok(GetCapabilitiesResponse { driver_name: "podman".to_string(), driver_version: openshell_core::VERSION.to_string(), @@ -175,17 +175,17 @@ impl PodmanComputeDriver { /// The Podman system info response doesn't directly list CDI devices in all /// versions. As a heuristic, check if the NVIDIA device node exists (this /// works for both rootful and rootless). - fn has_gpu_capacity(&self) -> bool { + fn has_gpu_capacity() -> bool { std::path::Path::new("/dev/nvidia0").exists() } /// Validate a sandbox before creation. - pub async fn validate_sandbox_create( + pub fn validate_sandbox_create( &self, sandbox: &DriverSandbox, ) -> Result<(), ComputeDriverError> { let gpu_requested = sandbox.spec.as_ref().is_some_and(|s| s.gpu); - if gpu_requested && !self.has_gpu_capacity() { + if gpu_requested && !Self::has_gpu_capacity() { return Err(ComputeDriverError::Precondition( "GPU sandbox requested, but no NVIDIA GPU devices are available.".to_string(), )); @@ -664,7 +664,7 @@ mod tests { .len(); let socket_path_for_task = socket_path.clone(); let log_for_task = request_log.clone(); - let queue_for_task = response_queue.clone(); + let queue_for_task = response_queue; let handle = tokio::spawn(async move { for _ in 0..expected { let (stream, _) = listener.accept().await.expect("test stub should accept"); @@ -677,11 +677,10 @@ mod tests { let log = log.clone(); let queue = queue.clone(); async move { - let path = req - .uri() - .path_and_query() - .map(|pq| pq.as_str().to_string()) - .unwrap_or_else(|| req.uri().path().to_string()); + let path = req.uri().path_and_query().map_or_else( + || req.uri().path().to_string(), + |pq| pq.as_str().to_string(), + ); log.lock() .expect("request log lock should not be poisoned") .push(format!("{} {}", req.method(), path)); diff --git a/crates/openshell-driver-podman/src/grpc.rs b/crates/openshell-driver-podman/src/grpc.rs index d11f95b78..2b413b5e4 100644 --- a/crates/openshell-driver-podman/src/grpc.rs +++ b/crates/openshell-driver-podman/src/grpc.rs @@ -35,7 +35,6 @@ impl ComputeDriver for ComputeDriverService { ) -> Result, Status> { self.driver .capabilities() - .await .map(Response::new) .map_err(status_from_driver_error) } @@ -50,7 +49,6 @@ impl ComputeDriver for ComputeDriverService { .ok_or_else(|| Status::invalid_argument("sandbox is required"))?; self.driver .validate_sandbox_create(&sandbox) - .await .map_err(status_from_driver_error)?; Ok(Response::new(ValidateSandboxCreateResponse {})) } @@ -247,7 +245,7 @@ mod tests { .len(); let socket_path_for_task = socket_path.clone(); let log_for_task = request_log.clone(); - let queue_for_task = response_queue.clone(); + let queue_for_task = response_queue; let handle = tokio::spawn(async move { for _ in 0..expected { let (stream, _) = listener.accept().await.expect("test stub should accept"); @@ -260,11 +258,10 @@ mod tests { let log = log.clone(); let queue = queue.clone(); async move { - let path = req - .uri() - .path_and_query() - .map(|pq| pq.as_str().to_string()) - .unwrap_or_else(|| req.uri().path().to_string()); + let path = req.uri().path_and_query().map_or_else( + || req.uri().path().to_string(), + |pq| pq.as_str().to_string(), + ); log.lock() .expect("request log lock should not be poisoned") .push(format!("{} {}", req.method(), path)); diff --git a/crates/openshell-driver-podman/src/watcher.rs b/crates/openshell-driver-podman/src/watcher.rs index 69cab6d88..54606ea44 100644 --- a/crates/openshell-driver-podman/src/watcher.rs +++ b/crates/openshell-driver-podman/src/watcher.rs @@ -103,12 +103,12 @@ pub async fn start_watch(client: PodmanClient) -> Result Result { - if let Some(we) = map_podman_event(&event, &client).await { - if tx.send(Ok(we)).await.is_err() { - return; - } + if let Some(we) = map_podman_event(&event, &client).await + && tx.send(Ok(we)).await.is_err() + { + return; } } Err(e) => { @@ -292,7 +292,7 @@ pub fn driver_sandbox_from_inspect(inspect: &ContainerInspect) -> Option Option { +pub fn driver_sandbox_from_list_entry(entry: &ContainerListEntry) -> Option { let sandbox_id = entry.labels.get(LABEL_SANDBOX_ID)?.clone(); let sandbox_name = entry .labels @@ -508,9 +508,8 @@ mod tests { }; let payload = event.payload.unwrap(); - let sandbox_event = match payload { - watch_sandboxes_event::Payload::Sandbox(s) => s, - _ => panic!("expected Sandbox payload"), + let watch_sandboxes_event::Payload::Sandbox(sandbox_event) = payload else { + panic!("expected Sandbox payload") }; let status = sandbox_event.sandbox.unwrap().status.unwrap(); assert_eq!(status.conditions.len(), 1); diff --git a/crates/openshell-driver-vm/build.rs b/crates/openshell-driver-vm/build.rs index 981cf8ff8..6b9800ef6 100644 --- a/crates/openshell-driver-vm/build.rs +++ b/crates/openshell-driver-vm/build.rs @@ -7,7 +7,7 @@ //! artifacts it needs to boot base VMs without depending on the openshell-vm //! binary or crate. -use std::path::PathBuf; +use std::path::{Path, PathBuf}; use std::{env, fs}; fn main() { @@ -145,7 +145,7 @@ fn main() { } } -fn generate_stub_resources(out_dir: &std::path::Path, names: &[&str]) { +fn generate_stub_resources(out_dir: &Path, names: &[&str]) { for name in names { let path = out_dir.join(name); if !path.exists() { diff --git a/crates/openshell-driver-vm/src/driver.rs b/crates/openshell-driver-vm/src/driver.rs index e5520b5a1..a0c69cef6 100644 --- a/crates/openshell-driver-vm/src/driver.rs +++ b/crates/openshell-driver-vm/src/driver.rs @@ -270,7 +270,10 @@ impl VmDriver { } } - pub async fn validate_sandbox(&self, sandbox: &Sandbox) -> Result<(), Status> { + // `tonic::Status` is large but is the standard error type across the + // gRPC API surface; boxing here would diverge from every other handler. + #[allow(clippy::result_large_err)] + pub fn validate_sandbox(&self, sandbox: &Sandbox) -> Result<(), Status> { validate_vm_sandbox(sandbox, self.config.gpu_enabled) } @@ -547,14 +550,14 @@ impl VmDriver { sandbox_name: &str, ) -> Result, Status> { let registry = self.registry.lock().await; - let sandbox = if !sandbox_id.is_empty() { + let sandbox = if sandbox_id.is_empty() { registry - .get(sandbox_id) + .values() + .find(|record| record.snapshot.name == sandbox_name) .map(|record| record.snapshot.clone()) } else { registry - .values() - .find(|record| record.snapshot.name == sandbox_name) + .get(sandbox_id) .map(|record| record.snapshot.clone()) }; Ok(sandbox) @@ -633,10 +636,10 @@ impl VmDriver { }; if let Some(status) = exit_status { - let message = match status.code() { - Some(code) => format!("VM process exited with status {code}"), - None => "VM process exited".to_string(), - }; + let message = status.code().map_or_else( + || "VM process exited".to_string(), + |code| format!("VM process exited with status {code}"), + ); if let Some(snapshot) = self .set_snapshot_condition( &sandbox_id, @@ -727,7 +730,7 @@ impl ComputeDriver for VmDriver { .into_inner() .sandbox .ok_or_else(|| Status::invalid_argument("sandbox is required"))?; - self.validate_sandbox(&sandbox).await?; + self.validate_sandbox(&sandbox)?; Ok(Response::new(ValidateSandboxCreateResponse {})) } @@ -842,7 +845,7 @@ impl ComputeDriver for VmDriver { return; } } - Err(broadcast::error::RecvError::Lagged(_)) => continue, + Err(broadcast::error::RecvError::Lagged(_)) => {} Err(broadcast::error::RecvError::Closed) => return, } } @@ -853,7 +856,7 @@ impl ComputeDriver for VmDriver { } #[cfg(target_os = "linux")] -#[allow(unsafe_code)] +#[allow(unsafe_code)] // libc::geteuid is a thin syscall wrapper fn check_gpu_privileges() -> Result<(), String> { if unsafe { libc::geteuid() } != 0 { return Err( @@ -865,6 +868,9 @@ fn check_gpu_privileges() -> Result<(), String> { Ok(()) } +// `tonic::Status` is ~176 bytes; it's the standard error type across the +// gRPC API surface, so boxing here would diverge from every other handler. +#[allow(clippy::result_large_err)] fn validate_vm_sandbox(sandbox: &Sandbox, gpu_enabled: bool) -> Result<(), Status> { let spec = sandbox .spec @@ -1052,7 +1058,7 @@ async fn copy_guest_tls_material( async fn terminate_vm_process(child: &mut Child) -> Result<(), std::io::Error> { if let Some(pid) = child.id() - && let Err(err) = kill(Pid::from_raw(pid as i32), Signal::SIGTERM) + && let Err(err) = kill(Pid::from_raw(pid.cast_signed()), Signal::SIGTERM) && err != Errno::ESRCH { return Err(std::io::Error::other(format!( @@ -1146,7 +1152,9 @@ fn platform_event(source: &str, event_type: &str, reason: &str, message: String) fn current_time_ms() -> i64 { std::time::SystemTime::now() .duration_since(std::time::UNIX_EPOCH) - .map_or(0, |duration| duration.as_millis() as i64) + .map_or(0, |duration| { + i64::try_from(duration.as_millis()).unwrap_or(i64::MAX) + }) } #[cfg(test)] @@ -1210,13 +1218,12 @@ mod tests { spec: Some(SandboxSpec { template: Some(SandboxTemplate { platform_config: Some(Struct { - fields: [( + fields: std::iter::once(( "runtime_class_name".to_string(), Value { kind: Some(Kind::StringValue("kata".to_string())), }, - )] - .into_iter() + )) .collect(), }), ..Default::default() diff --git a/crates/openshell-driver-vm/src/embedded_runtime.rs b/crates/openshell-driver-vm/src/embedded_runtime.rs index 63f83b874..a59c80b45 100644 --- a/crates/openshell-driver-vm/src/embedded_runtime.rs +++ b/crates/openshell-driver-vm/src/embedded_runtime.rs @@ -134,10 +134,11 @@ fn runtime_cache_key() -> String { let sample = &chunk[..chunk.len().min(64)]; let mut word: u64 = 0; for (offset, byte) in sample.iter().enumerate() { - word ^= (*byte as u64) << ((offset % 8) * 8); + word ^= u64::from(*byte) << ((offset % 8) * 8); } - fp ^= word.rotate_left((index as u32) * 13 + 7); - fp ^= (chunk.len() as u64).rotate_left((index as u32) * 17 + 3); + let index_u32 = u32::try_from(index).unwrap_or(u32::MAX); + fp ^= word.rotate_left(index_u32 * 13 + 7); + fp ^= (chunk.len() as u64).rotate_left(index_u32 * 17 + 3); } format!("{VERSION}-{fp:016x}") } diff --git a/crates/openshell-driver-vm/src/ffi.rs b/crates/openshell-driver-vm/src/ffi.rs index a81b150af..db5d3ec10 100644 --- a/crates/openshell-driver-vm/src/ffi.rs +++ b/crates/openshell-driver-vm/src/ffi.rs @@ -59,6 +59,9 @@ type KrunAddNetUnixstream = unsafe extern "C" fn( flags: u32, ) -> i32; +// Field names mirror the libkrun C API symbol names (`krun_*`); preserving +// the prefix keeps the FFI binding 1:1 with the upstream library. +#[allow(clippy::struct_field_names)] pub struct LibKrun { pub krun_init_log: KrunInitLog, pub krun_create_ctx: KrunCreateCtx, @@ -155,7 +158,10 @@ fn preload_runtime_support_libraries(runtime_dir: &Path) -> Result, .is_some_and(|name| { #[cfg(target_os = "macos")] { - name.starts_with("libkrunfw") && name.ends_with(".dylib") + name.starts_with("libkrunfw") + && Path::new(name) + .extension() + .is_some_and(|ext| ext.eq_ignore_ascii_case("dylib")) } #[cfg(not(target_os = "macos"))] { diff --git a/crates/openshell-driver-vm/src/procguard.rs b/crates/openshell-driver-vm/src/procguard.rs index 1d91880f7..fd4d3c872 100644 --- a/crates/openshell-driver-vm/src/procguard.rs +++ b/crates/openshell-driver-vm/src/procguard.rs @@ -43,11 +43,13 @@ pub fn die_with_parent() -> Result<(), String> { die_with_parent_cleanup(|| ()) } -/// Like [`die_with_parent`], but run `cleanup` (best-effort, -/// async-signal-unsafe — it runs on the helper thread) immediately -/// before terminating the process. Use this when we own children that -/// cannot arm their own procguard; the cleanup hook is the only chance -/// we get to send them SIGTERM after the kernel reparents us. +/// Like [`die_with_parent`], but run `cleanup` before terminating. +/// +/// The cleanup hook is best-effort and async-signal-unsafe — it runs on +/// the helper thread immediately before terminating the process. Use this +/// when we own children that cannot arm their own procguard; the cleanup +/// hook is the only chance we get to send them SIGTERM after the kernel +/// reparents us. /// /// On Linux the cleanup is a no-op: `PR_SET_PDEATHSIG` delivers SIGKILL /// directly to us, there is no Rust-controlled moment between "parent diff --git a/crates/openshell-driver-vm/src/runtime.rs b/crates/openshell-driver-vm/src/runtime.rs index 62f2e314c..09576bee0 100644 --- a/crates/openshell-driver-vm/src/runtime.rs +++ b/crates/openshell-driver-vm/src/runtime.rs @@ -30,6 +30,20 @@ pub enum VmBackend { Qemu, } +// virtio-net feature bits (see Linux `include/uapi/linux/virtio_net.h`). +const NET_FEATURE_CSUM: u32 = 1 << 0; +const NET_FEATURE_GUEST_CSUM: u32 = 1 << 1; +const NET_FEATURE_GUEST_TSO4: u32 = 1 << 7; +const NET_FEATURE_GUEST_UFO: u32 = 1 << 10; +const NET_FEATURE_HOST_TSO4: u32 = 1 << 11; +const NET_FEATURE_HOST_UFO: u32 = 1 << 14; +const COMPAT_NET_FEATURES: u32 = NET_FEATURE_CSUM + | NET_FEATURE_GUEST_CSUM + | NET_FEATURE_GUEST_TSO4 + | NET_FEATURE_GUEST_UFO + | NET_FEATURE_HOST_TSO4 + | NET_FEATURE_HOST_UFO; + pub struct VmLaunchConfig { pub rootfs: PathBuf, pub vcpus: u8, @@ -778,7 +792,7 @@ fn run_libkrun_vm(config: &VmLaunchConfig) -> Result<(), String> { // The procguard cleanup reads GVPROXY_PID atomically. Storing it // here makes the callback able to SIGTERM gvproxy if the driver // dies from this moment onward. - GVPROXY_PID.store(child.id() as i32, Ordering::Relaxed); + GVPROXY_PID.store(child.id().cast_signed(), Ordering::Relaxed); wait_for_path(&net_sock, Duration::from_secs(5), "gvproxy data socket")?; @@ -786,18 +800,6 @@ fn run_libkrun_vm(config: &VmLaunchConfig) -> Result<(), String> { vm.add_vsock(0)?; let mac: [u8; 6] = [0x5a, 0x94, 0xef, 0xe4, 0x0c, 0xee]; - const NET_FEATURE_CSUM: u32 = 1 << 0; - const NET_FEATURE_GUEST_CSUM: u32 = 1 << 1; - const NET_FEATURE_GUEST_TSO4: u32 = 1 << 7; - const NET_FEATURE_GUEST_UFO: u32 = 1 << 10; - const NET_FEATURE_HOST_TSO4: u32 = 1 << 11; - const NET_FEATURE_HOST_UFO: u32 = 1 << 14; - const COMPAT_NET_FEATURES: u32 = NET_FEATURE_CSUM - | NET_FEATURE_GUEST_CSUM - | NET_FEATURE_GUEST_TSO4 - | NET_FEATURE_GUEST_UFO - | NET_FEATURE_HOST_TSO4 - | NET_FEATURE_HOST_UFO; #[cfg(target_os = "linux")] vm.add_net_unixstream(&net_sock, &mac, COMPAT_NET_FEATURES)?; @@ -980,7 +982,7 @@ impl VmContext { Ok(Self { krun, - ctx_id: ctx_id as u32, + ctx_id: ctx_id.cast_unsigned(), }) } @@ -1078,10 +1080,10 @@ impl VmContext { fn set_exec(&self, exec_path: &str, args: &[String], env: &[String]) -> Result<(), String> { let exec_c = CString::new(exec_path).map_err(|e| format!("invalid exec path: {e}"))?; - let argv_strs: Vec<&str> = args.iter().map(String::as_str).collect(); - let (_argv_owners, argv_ptrs) = c_string_array(&argv_strs)?; - let env_strs: Vec<&str> = env.iter().map(String::as_str).collect(); - let (_env_owners, env_ptrs) = c_string_array(&env_strs)?; + let argv_slices: Vec<&str> = args.iter().map(String::as_str).collect(); + let (_argv_owners, argv_ptrs) = c_string_array(&argv_slices)?; + let env_slices: Vec<&str> = env.iter().map(String::as_str).collect(); + let (_env_owners, env_ptrs) = c_string_array(&env_slices)?; check( unsafe { @@ -1154,24 +1156,26 @@ fn wait_for_path(path: &Path, timeout: Duration, label: &str) -> Result<(), Stri } fn hash_path_id(path: &Path) -> String { - let mut hash: u64 = 0xcbf29ce484222325; + let mut hash: u64 = 0xcbf2_9ce4_8422_2325; for byte in path.to_string_lossy().as_bytes() { hash ^= u64::from(*byte); - hash = hash.wrapping_mul(0x100000001b3); + hash = hash.wrapping_mul(0x0100_0000_01b3); } format!("{:012x}", hash & 0x0000_ffff_ffff_ffff) } fn secure_socket_base(subdir: &str) -> Result { - let base = if let Some(xdg) = std::env::var_os("XDG_RUNTIME_DIR") { - PathBuf::from(xdg) - } else { - let mut base = PathBuf::from("/tmp"); - if !base.is_dir() { - base = std::env::temp_dir(); - } - base - }; + let base = std::env::var_os("XDG_RUNTIME_DIR").map_or_else( + || { + let fallback = PathBuf::from("/tmp"); + if fallback.is_dir() { + fallback + } else { + std::env::temp_dir() + } + }, + PathBuf::from, + ); let dir = base.join(subdir); if dir.exists() { @@ -1298,7 +1302,7 @@ fn path_to_cstring(path: &Path) -> Result { let path = path .to_str() .ok_or_else(|| format!("path is not valid UTF-8: {}", path.display()))?; - CString::new(path).map_err(|e| format!("invalid path string {}: {e}", path)) + CString::new(path).map_err(|e| format!("invalid path string {path}: {e}")) } #[cfg(target_os = "linux")] diff --git a/crates/openshell-ocsf/src/format/shorthand.rs b/crates/openshell-ocsf/src/format/shorthand.rs index 7e2296de9..42b30fbae 100644 --- a/crates/openshell-ocsf/src/format/shorthand.rs +++ b/crates/openshell-ocsf/src/format/shorthand.rs @@ -50,7 +50,6 @@ pub fn severity_char(severity_id: u8) -> char { #[must_use] pub fn severity_tag(severity_id: u8) -> &'static str { match severity_id { - 1 => "[INFO]", 2 => "[LOW]", 3 => "[MED]", 4 => "[HIGH]", diff --git a/crates/openshell-ocsf/src/tracing_layers/jsonl_layer.rs b/crates/openshell-ocsf/src/tracing_layers/jsonl_layer.rs index 1f7022ef8..920483700 100644 --- a/crates/openshell-ocsf/src/tracing_layers/jsonl_layer.rs +++ b/crates/openshell-ocsf/src/tracing_layers/jsonl_layer.rs @@ -60,10 +60,10 @@ where } // If an enabled flag is set and it reads `false`, skip writing. - if let Some(ref flag) = self.enabled { - if !flag.load(Ordering::Relaxed) { - return; - } + if let Some(ref flag) = self.enabled + && !flag.load(Ordering::Relaxed) + { + return; } if let Some(ocsf_event) = clone_current_event() diff --git a/crates/openshell-ocsf/src/tracing_layers/shorthand_layer.rs b/crates/openshell-ocsf/src/tracing_layers/shorthand_layer.rs index ea75cf0dc..dee9848ad 100644 --- a/crates/openshell-ocsf/src/tracing_layers/shorthand_layer.rs +++ b/crates/openshell-ocsf/src/tracing_layers/shorthand_layer.rs @@ -94,6 +94,20 @@ impl tracing::field::Visit for MessageVisitor<'_> { } } +/// Test helper: wraps `Arc>>` so it implements `Write + Send`. +#[cfg(test)] +struct SyncWriter(std::sync::Arc>>); + +#[cfg(test)] +impl Write for SyncWriter { + fn write(&mut self, buf: &[u8]) -> std::io::Result { + self.0.lock().unwrap().write(buf) + } + fn flush(&mut self) -> std::io::Result<()> { + self.0.lock().unwrap().flush() + } +} + #[cfg(test)] mod tests { use super::*; @@ -139,17 +153,3 @@ mod tests { ); } } - -/// Test helper: wraps `Arc>>` so it implements `Write + Send`. -#[cfg(test)] -struct SyncWriter(std::sync::Arc>>); - -#[cfg(test)] -impl Write for SyncWriter { - fn write(&mut self, buf: &[u8]) -> std::io::Result { - self.0.lock().unwrap().write(buf) - } - fn flush(&mut self) -> std::io::Result<()> { - self.0.lock().unwrap().flush() - } -} diff --git a/crates/openshell-policy/src/lib.rs b/crates/openshell-policy/src/lib.rs index 0296879af..f1abda06b 100644 --- a/crates/openshell-policy/src/lib.rs +++ b/crates/openshell-policy/src/lib.rs @@ -1,7 +1,7 @@ // SPDX-FileCopyrightText: Copyright (c) 2025-2026 NVIDIA CORPORATION & AFFILIATES. All rights reserved. // SPDX-License-Identifier: Apache-2.0 -//! Shared sandbox policy parsing and defaults for OpenShell. +//! Shared sandbox policy parsing and defaults for `OpenShell`. //! //! Provides bidirectional YAML↔proto conversion for sandbox policies. //! @@ -117,6 +117,8 @@ struct NetworkEndpointDef { allow_encoded_slash: bool, } +// Signature dictated by serde's `skip_serializing_if`, which requires `&T`. +#[allow(clippy::trivially_copy_pass_by_ref)] fn is_zero(v: &u16) -> bool { *v == 0 } @@ -369,12 +371,12 @@ fn from_proto(policy: &SandboxPolicy) -> PolicyFile { .query .into_iter() .map(|(key, matcher)| { - let yaml_matcher = if !matcher.any.is_empty() { + let yaml_matcher = if matcher.any.is_empty() { + QueryMatcherDef::Glob(matcher.glob) + } else { QueryMatcherDef::Any(QueryAnyDef { any: matcher.any, }) - } else { - QueryMatcherDef::Glob(matcher.glob) }; (key, yaml_matcher) }) @@ -395,12 +397,12 @@ fn from_proto(policy: &SandboxPolicy) -> PolicyFile { .query .iter() .map(|(key, matcher)| { - let yaml_matcher = if !matcher.any.is_empty() { + let yaml_matcher = if matcher.any.is_empty() { + QueryMatcherDef::Glob(matcher.glob.clone()) + } else { QueryMatcherDef::Any(QueryAnyDef { any: matcher.any.clone(), }) - } else { - QueryMatcherDef::Glob(matcher.glob.clone()) }; (key.clone(), yaml_matcher) }) @@ -535,9 +537,7 @@ pub fn restrictive_default_policy() -> SandboxPolicy { /// the required `"sandbox"` value. Call this before validation so that /// policies without an explicit process section get the correct default. pub fn ensure_sandbox_process_identity(policy: &mut SandboxPolicy) { - let process = policy - .process - .get_or_insert_with(|| ProcessPolicy::default()); + let process = policy.process.get_or_insert_with(ProcessPolicy::default); if process.run_as_user.is_empty() { process.run_as_user = "sandbox".into(); } @@ -812,7 +812,7 @@ network_policies: /// Verify that the network policy `name` field survives the round-trip. #[test] fn round_trip_preserves_policy_name() { - let yaml = r#" + let yaml = r" version: 1 network_policies: my_api: @@ -822,7 +822,7 @@ network_policies: port: 443 binaries: - path: /usr/bin/curl -"#; +"; let proto1 = parse_sandbox_policy(yaml).expect("parse failed"); assert_eq!(proto1.network_policies["my_api"].name, "my-custom-api-name"); @@ -891,7 +891,7 @@ network_policies: #[test] fn parse_policy_with_network_rules() { - let yaml = r#" + let yaml = r" version: 1 network_policies: test: @@ -900,7 +900,7 @@ network_policies: - { host: example.com, port: 443 } binaries: - { path: /usr/bin/curl } -"#; +"; let policy = parse_sandbox_policy(yaml).expect("should parse"); assert_eq!(policy.network_policies.len(), 1); let rule = &policy.network_policies["test"]; @@ -1277,7 +1277,7 @@ network_policies: #[test] fn parse_ports_array() { - let yaml = r#" + let yaml = r" version: 1 network_policies: test: @@ -1286,7 +1286,7 @@ network_policies: - { host: api.example.com, ports: [80, 443] } binaries: - { path: /usr/bin/curl } -"#; +"; let policy = parse_sandbox_policy(yaml).expect("should parse"); let ep = &policy.network_policies["test"].endpoints[0]; assert_eq!(ep.ports, vec![80, 443]); @@ -1296,7 +1296,7 @@ network_policies: #[test] fn parse_single_port_normalized_to_ports() { - let yaml = r#" + let yaml = r" version: 1 network_policies: test: @@ -1305,7 +1305,7 @@ network_policies: - { host: api.example.com, port: 443 } binaries: - { path: /usr/bin/curl } -"#; +"; let policy = parse_sandbox_policy(yaml).expect("should parse"); let ep = &policy.network_policies["test"].endpoints[0]; assert_eq!(ep.ports, vec![443]); @@ -1314,7 +1314,7 @@ network_policies: #[test] fn round_trip_preserves_multi_port() { - let yaml = r#" + let yaml = r" version: 1 network_policies: test: @@ -1326,7 +1326,7 @@ network_policies: - 443 binaries: - { path: /usr/bin/curl } -"#; +"; let proto1 = parse_sandbox_policy(yaml).expect("parse failed"); let yaml_out = serialize_sandbox_policy(&proto1).expect("serialize failed"); let proto2 = parse_sandbox_policy(&yaml_out).expect("re-parse failed"); @@ -1339,7 +1339,7 @@ network_policies: #[test] fn serialize_single_port_uses_compact_form() { - let yaml = r#" + let yaml = r" version: 1 network_policies: test: @@ -1348,7 +1348,7 @@ network_policies: - { host: api.example.com, port: 443 } binaries: - { path: /usr/bin/curl } -"#; +"; let proto = parse_sandbox_policy(yaml).expect("parse failed"); let yaml_out = serialize_sandbox_policy(&proto).expect("serialize failed"); // Should use compact `port: 443` form, not `ports: [443]` @@ -1493,7 +1493,7 @@ network_policies: #[test] fn parse_rejects_unknown_fields_in_deny_rule() { - let yaml = r#" + let yaml = r" version: 1 network_policies: test: @@ -1504,20 +1504,20 @@ network_policies: - method: POST path: /foo bogus: true -"#; +"; assert!(parse_sandbox_policy(yaml).is_err()); } #[test] fn rejects_port_above_65535() { - let yaml = r#" + let yaml = r" version: 1 network_policies: test: endpoints: - host: example.com port: 70000 -"#; +"; assert!( parse_sandbox_policy(yaml).is_err(), "port >65535 should fail to parse" diff --git a/crates/openshell-policy/src/merge.rs b/crates/openshell-policy/src/merge.rs index 5f5d2d40d..0d008b90f 100644 --- a/crates/openshell-policy/src/merge.rs +++ b/crates/openshell-policy/src/merge.rs @@ -1,7 +1,7 @@ // SPDX-FileCopyrightText: Copyright (c) 2025-2026 NVIDIA CORPORATION & AFFILIATES. All rights reserved. // SPDX-License-Identifier: Apache-2.0 -use std::collections::HashSet; +use std::collections::{HashMap, HashSet}; use openshell_core::proto::{ L7Allow, L7DenyRule, L7Rule, NetworkBinary, NetworkEndpoint, NetworkPolicyRule, SandboxPolicy, @@ -387,7 +387,7 @@ fn merge_endpoint( .unwrap_or(0); if existing.host.is_empty() { - existing.host = incoming.host.clone(); + existing.host.clone_from(&incoming.host); } merge_endpoint_ports(existing, incoming); @@ -446,7 +446,7 @@ fn merge_endpoint( incoming: incoming.access.clone(), }); } else if existing.access.is_empty() { - existing.access = incoming.access.clone(); + existing.access.clone_from(&incoming.access); } else if existing.access != incoming.access { warnings.push(PolicyMergeWarning::ExistingAccessRetained { host, @@ -488,8 +488,8 @@ fn merge_endpoint_ports(existing: &mut NetworkEndpoint, incoming: &NetworkEndpoi } ports.sort_unstable(); ports.dedup(); - existing.ports = ports.clone(); existing.port = ports.first().copied().unwrap_or(0); + existing.ports = ports; } fn rules_share_endpoint( @@ -626,7 +626,7 @@ fn expand_access_preset(access: &str) -> Option> { method: method.to_string(), path: "**".to_string(), command: String::new(), - query: Default::default(), + query: HashMap::default(), }), }) .collect(), @@ -678,8 +678,8 @@ fn normalize_endpoint(endpoint: &mut NetworkEndpoint) { let mut ports = canonical_ports(endpoint); ports.sort_unstable(); ports.dedup(); - endpoint.ports = ports.clone(); endpoint.port = ports.first().copied().unwrap_or(0); + endpoint.ports = ports; dedup_strings(&mut endpoint.allowed_ips); dedup_l7_rules(&mut endpoint.rules); dedup_deny_rules(&mut endpoint.deny_rules); @@ -745,8 +745,8 @@ fn remove_endpoint(policy: &mut SandboxPolicy, rule_name: Option<&str>, host: &s return false; } - endpoint.ports = remaining_ports.clone(); endpoint.port = remaining_ports[0]; + endpoint.ports = remaining_ports; true }); diff --git a/crates/openshell-prover/src/lib.rs b/crates/openshell-prover/src/lib.rs index feff7d1d5..82922253d 100644 --- a/crates/openshell-prover/src/lib.rs +++ b/crates/openshell-prover/src/lib.rs @@ -1,7 +1,7 @@ // SPDX-FileCopyrightText: Copyright (c) 2025-2026 NVIDIA CORPORATION & AFFILIATES. All rights reserved. // SPDX-License-Identifier: Apache-2.0 -//! Formal policy verification for OpenShell sandboxes. +//! Formal policy verification for `OpenShell` sandboxes. //! //! Encodes sandbox policies, binary capabilities, and credential scopes as Z3 //! SMT constraints, then checks reachability queries to detect data exfiltration @@ -91,7 +91,7 @@ mod tests { #[test] fn test_parse_policy() { let path = testdata_dir().join("policy.yaml"); - let model = policy::parse_policy(&path).expect("failed to parse policy"); + let model = parse_policy(&path).expect("failed to parse policy"); assert_eq!(model.version, 1); assert!(model.network_policies.contains_key("github_api")); let rule = &model.network_policies["github_api"]; @@ -104,7 +104,7 @@ mod tests { #[test] fn test_filesystem_policy() { let path = testdata_dir().join("policy.yaml"); - let model = policy::parse_policy(&path).expect("failed to parse policy"); + let model = parse_policy(&path).expect("failed to parse policy"); let readable = model.filesystem_policy.readable_paths(); assert!(readable.contains(&"/usr".to_owned())); assert!(readable.contains(&"/sandbox".to_owned())); @@ -114,12 +114,12 @@ mod tests { // 3. Workdir NOT included by default (matches runtime behavior). #[test] fn test_include_workdir_default() { - let yaml = r#" + let yaml = r" version: 1 filesystem_policy: read_only: - /usr -"#; +"; let model = policy::parse_policy_str(yaml).expect("parse"); let readable = model.filesystem_policy.readable_paths(); assert!(!readable.contains(&"/sandbox".to_owned())); @@ -128,13 +128,13 @@ filesystem_policy: // 4. Workdir excluded when include_workdir: false. #[test] fn test_include_workdir_false() { - let yaml = r#" + let yaml = r" version: 1 filesystem_policy: include_workdir: false read_only: - /usr -"#; +"; let model = policy::parse_policy_str(yaml).expect("parse"); let readable = model.filesystem_policy.readable_paths(); assert!(!readable.contains(&"/sandbox".to_owned())); @@ -143,14 +143,14 @@ filesystem_policy: // 5. No duplicate when workdir already in read_write. #[test] fn test_include_workdir_no_duplicate() { - let yaml = r#" + let yaml = r" version: 1 filesystem_policy: include_workdir: true read_write: - /sandbox - /tmp -"#; +"; let model = policy::parse_policy_str(yaml).expect("parse"); let readable = model.filesystem_policy.readable_paths(); let sandbox_count = readable.iter().filter(|p| *p == "/sandbox").count(); @@ -163,12 +163,12 @@ filesystem_policy: let policy_path = testdata_dir().join("policy.yaml"); let creds_path = testdata_dir().join("credentials.yaml"); - let pol = policy::parse_policy(&policy_path).expect("parse policy"); + let pol = parse_policy(&policy_path).expect("parse policy"); let cred_set = credentials::load_credential_set_embedded(&creds_path).expect("load creds"); let bin_reg = registry::load_embedded_binary_registry().expect("load registry"); - let z3_model = model::build_model(pol, cred_set, bin_reg); - let findings = queries::run_all_queries(&z3_model); + let z3_model = build_model(pol, cred_set, bin_reg); + let findings = run_all_queries(&z3_model); let query_types: std::collections::HashSet<&str> = findings.iter().map(|f| f.query.as_str()).collect(); @@ -195,12 +195,12 @@ filesystem_policy: let policy_path = testdata_dir().join("empty-policy.yaml"); let creds_path = testdata_dir().join("credentials.yaml"); - let pol = policy::parse_policy(&policy_path).expect("parse policy"); + let pol = parse_policy(&policy_path).expect("parse policy"); let cred_set = credentials::load_credential_set_embedded(&creds_path).expect("load creds"); let bin_reg = registry::load_embedded_binary_registry().expect("load registry"); - let z3_model = model::build_model(pol, cred_set, bin_reg); - let findings = queries::run_all_queries(&z3_model); + let z3_model = build_model(pol, cred_set, bin_reg); + let findings = run_all_queries(&z3_model); assert!( findings.is_empty(), diff --git a/crates/openshell-prover/src/model.rs b/crates/openshell-prover/src/model.rs index b96e7fef2..bf52993d4 100644 --- a/crates/openshell-prover/src/model.rs +++ b/crates/openshell-prover/src/model.rs @@ -27,7 +27,7 @@ impl EndpointId { } } -/// Z3-backed reachability model for an OpenShell sandbox policy. +/// Z3-backed reachability model for an `OpenShell` sandbox policy. pub struct ReachabilityModel { pub policy: PolicyModel, pub credentials: CredentialSet, @@ -169,18 +169,18 @@ impl ReachabilityModel { allowed.iter().any(|m| write_set.contains(m.as_str())) }; - let l7w_var = Bool::new_const(format!("l7_allows_write_{ek}")); + let l7_write_var = Bool::new_const(format!("l7_allows_write_{ek}")); if ep.is_l7_enforced() { if has_write { - self.solver.assert(&l7w_var); + self.solver.assert(&l7_write_var); } else { - self.solver.assert(&!l7w_var.clone()); + self.solver.assert(&!l7_write_var.clone()); } } else { // L4-only: all methods pass through - self.solver.assert(&l7w_var); + self.solver.assert(&l7_write_var); } - self.l7_allows_write.insert(ek, l7w_var); + self.l7_allows_write.insert(ek, l7_write_var); } } } @@ -256,13 +256,14 @@ impl ReachabilityModel { } self.credential_has_write.insert(host.clone(), cw_var); - let cd_var = Bool::new_const(format!("credential_has_destructive_{host}")); + let destructive_var = Bool::new_const(format!("credential_has_destructive_{host}")); if has_destructive { - self.solver.assert(&cd_var); + self.solver.assert(&destructive_var); } else { - self.solver.assert(&!cd_var.clone()); + self.solver.assert(&!destructive_var.clone()); } - self.credential_has_destructive.insert(host.clone(), cd_var); + self.credential_has_destructive + .insert(host.clone(), destructive_var); } } @@ -364,7 +365,7 @@ impl ReachabilityModel { has_access, exfil, Bool::or(&[ - Bool::and(&[!l7_enforced.clone(), http.clone()]), + Bool::and(&[!l7_enforced, http.clone()]), Bool::and(&[l7_write, http]), bypass, ]), diff --git a/crates/openshell-prover/src/policy.rs b/crates/openshell-prover/src/policy.rs index b116fd5b0..8aea4b7d0 100644 --- a/crates/openshell-prover/src/policy.rs +++ b/crates/openshell-prover/src/policy.rs @@ -249,23 +249,13 @@ pub struct NetworkPolicyRule { } /// Filesystem access policy. -#[derive(Debug, Clone)] +#[derive(Debug, Clone, Default)] pub struct FilesystemPolicy { pub include_workdir: bool, pub read_only: Vec, pub read_write: Vec, } -impl Default for FilesystemPolicy { - fn default() -> Self { - Self { - include_workdir: false, - read_only: Vec::new(), - read_write: Vec::new(), - } - } -} - impl FilesystemPolicy { /// All readable paths (union of `read_only` and `read_write`), with workdir /// added when `include_workdir` is true and not already present. @@ -302,7 +292,7 @@ impl Default for PolicyModel { } impl PolicyModel { - /// All (policy_name, endpoint) pairs. + /// All (`policy_name`, endpoint) pairs. pub fn all_endpoints(&self) -> Vec<(&str, &Endpoint)> { let mut result = Vec::new(); for (name, rule) in &self.network_policies { @@ -327,7 +317,7 @@ impl PolicyModel { result } - /// All (binary, policy_name, endpoint) triples. + /// All (binary, `policy_name`, endpoint) triples. pub fn binary_endpoint_pairs(&self) -> Vec<(&Binary, &str, &Endpoint)> { let mut result = Vec::new(); for (name, rule) in &self.network_policies { @@ -345,7 +335,7 @@ impl PolicyModel { // Parsing // --------------------------------------------------------------------------- -/// Parse an OpenShell policy YAML file into a [`PolicyModel`]. +/// Parse an `OpenShell` policy YAML file into a [`PolicyModel`]. pub fn parse_policy(path: &Path) -> Result { let contents = std::fs::read_to_string(path) .into_diagnostic() diff --git a/crates/openshell-prover/src/queries.rs b/crates/openshell-prover/src/queries.rs index 001255cea..6a0c7f6a6 100644 --- a/crates/openshell-prover/src/queries.rs +++ b/crates/openshell-prover/src/queries.rs @@ -44,8 +44,7 @@ pub fn check_data_exfiltration(model: &ReachabilityModel) -> Vec { ( "l4_only".to_owned(), format!( - "L4-only endpoint — no HTTP inspection, {} can send arbitrary data", - bpath + "L4-only endpoint — no HTTP inspection, {bpath} can send arbitrary data" ), ) } else { diff --git a/crates/openshell-prover/src/registry.rs b/crates/openshell-prover/src/registry.rs index 3acd52f82..63a3fce3b 100644 --- a/crates/openshell-prover/src/registry.rs +++ b/crates/openshell-prover/src/registry.rs @@ -129,7 +129,7 @@ impl BinaryCapability { /// Whether the binary can perform write actions. pub fn can_write(&self) -> bool { - self.protocols.iter().any(|p| p.can_write()) || self.can_construct_http + self.protocols.iter().any(BinaryProtocol::can_write) || self.can_construct_http } /// Short mechanisms by which this binary can write. @@ -170,12 +170,11 @@ impl BinaryRegistry { return cap.clone(); } for (reg_path, cap) in &self.binaries { - if reg_path.contains('*') { - if let Ok(pattern) = glob::Pattern::new(reg_path) { - if pattern.matches(path) { - return cap.clone(); - } - } + if reg_path.contains('*') + && let Ok(pattern) = glob::Pattern::new(reg_path) + && pattern.matches(path) + { + return cap.clone(); } } BinaryCapability { diff --git a/crates/openshell-router/src/backend.rs b/crates/openshell-router/src/backend.rs index fbca70ae1..88a6e213a 100644 --- a/crates/openshell-router/src/backend.rs +++ b/crates/openshell-router/src/backend.rs @@ -33,7 +33,7 @@ struct ValidationProbe { protocol: &'static str, body: bytes::Bytes, /// Alternate body to try when the primary probe fails with HTTP 400. - /// Used for OpenAI chat completions where newer models require + /// Used for `OpenAI` chat completions where newer models require /// `max_completion_tokens` while legacy/self-hosted backends only /// accept `max_tokens`. fallback_body: Option, @@ -158,7 +158,7 @@ fn prepare_backend_request( body: bytes::Bytes, ) -> Result<(reqwest::RequestBuilder, String), RouterError> { let url = build_backend_url(&route.endpoint, path); - let headers = sanitize_request_headers(route, &headers); + let headers = sanitize_request_headers(route, headers); let reqwest_method: reqwest::Method = method .parse() @@ -353,18 +353,18 @@ pub async fn verify_backend_endpoint( // there is a fallback body, retry with the alternate token parameter. // This handles the split between `max_completion_tokens` (GPT-5+) and // `max_tokens` (legacy/self-hosted backends). - if let (Err(err), Some(fallback_body)) = (&result, probe.fallback_body) { - if err.kind == ValidationFailureKind::RequestShape { - return try_validation_request( - client, - route, - probe.path, - probe.protocol, - headers, - fallback_body, - ) - .await; - } + if let (Err(err), Some(fallback_body)) = (&result, probe.fallback_body) + && err.kind == ValidationFailureKind::RequestShape + { + return try_validation_request( + client, + route, + probe.path, + probe.protocol, + headers, + fallback_body, + ) + .await; } result @@ -768,7 +768,7 @@ mod tests { assert_eq!(validated.protocol, "openai_chat_completions"); } - /// Non-chat-completions probes (e.g. anthropic_messages) should not + /// Non-chat-completions probes (e.g. `anthropic_messages`) should not /// have a fallback — a 400 remains a hard failure. #[tokio::test] async fn verify_non_chat_completions_no_fallback() { diff --git a/crates/openshell-router/src/config.rs b/crates/openshell-router/src/config.rs index 660509d9e..ef5d90946 100644 --- a/crates/openshell-router/src/config.rs +++ b/crates/openshell-router/src/config.rs @@ -163,7 +163,7 @@ mod tests { #[test] fn load_from_file_valid_yaml_round_trip() { - let yaml = r#" + let yaml = r" routes: - name: inference.local endpoint: http://localhost:8000/v1 @@ -175,7 +175,7 @@ routes: model: gpt-4o protocols: [openai_chat_completions, anthropic_messages] api_key: sk-prod-key -"#; +"; let mut f = tempfile::NamedTempFile::new().unwrap(); f.write_all(yaml.as_bytes()).unwrap(); @@ -208,13 +208,13 @@ routes: #[test] fn load_from_file_missing_api_key_returns_error() { - let yaml = r#" + let yaml = r" routes: - name: inference.local endpoint: http://localhost:8000/v1 model: llama-3 protocols: [openai_chat_completions] -"#; +"; let mut f = tempfile::NamedTempFile::new().unwrap(); f.write_all(yaml.as_bytes()).unwrap(); @@ -231,15 +231,16 @@ routes: } #[test] + #[allow(unsafe_code)] // std::env::set_var/remove_var require unsafe in Rust 2024 fn load_from_file_api_key_env_resolves_from_environment() { - let yaml = r#" + let yaml = r" routes: - name: inference.local endpoint: http://localhost:8000/v1 model: llama-3 protocols: [openai_chat_completions] api_key_env: NAV_TEST_API_KEY_FOR_YAML_TEST -"#; +"; // SAFETY: this test runs single-threaded; no other thread reads this var. unsafe { std::env::set_var("NAV_TEST_API_KEY_FOR_YAML_TEST", "from-env") }; let mut f = tempfile::NamedTempFile::new().unwrap(); diff --git a/crates/openshell-sandbox/src/bypass_monitor.rs b/crates/openshell-sandbox/src/bypass_monitor.rs index dfa46cab1..3cef9e4d6 100644 --- a/crates/openshell-sandbox/src/bypass_monitor.rs +++ b/crates/openshell-sandbox/src/bypass_monitor.rs @@ -170,17 +170,14 @@ pub fn spawn( } }; - let stdout = match child.stdout.take() { - Some(s) => s, - None => { - let event = NetworkActivityBuilder::new(crate::ocsf_ctx()) - .activity(ActivityId::Other) - .severity(SeverityId::Low) - .message("dmesg --follow produced no stdout; bypass monitor will not run") - .build(); - ocsf_emit!(event); - return; - } + let Some(stdout) = child.stdout.take() else { + let event = NetworkActivityBuilder::new(crate::ocsf_ctx()) + .activity(ActivityId::Other) + .severity(SeverityId::Low) + .message("dmesg --follow produced no stdout; bypass monitor will not run") + .build(); + ocsf_emit!(event); + return; }; let reader = std::io::BufReader::new(stdout); diff --git a/crates/openshell-sandbox/src/child_env.rs b/crates/openshell-sandbox/src/child_env.rs index 914e06ea5..e764afdfe 100644 --- a/crates/openshell-sandbox/src/child_env.rs +++ b/crates/openshell-sandbox/src/child_env.rs @@ -5,7 +5,7 @@ use std::path::Path; const LOCAL_NO_PROXY: &str = "127.0.0.1,localhost,::1"; -pub(crate) fn proxy_env_vars(proxy_url: &str) -> [(&'static str, String); 9] { +pub fn proxy_env_vars(proxy_url: &str) -> [(&'static str, String); 9] { [ ("ALL_PROXY", proxy_url.to_owned()), ("HTTP_PROXY", proxy_url.to_owned()), @@ -21,14 +21,14 @@ pub(crate) fn proxy_env_vars(proxy_url: &str) -> [(&'static str, String); 9] { ] } -pub(crate) fn tls_env_vars( +pub fn tls_env_vars( ca_cert_path: &Path, combined_bundle_path: &Path, ) -> [(&'static str, String); 5] { let ca_cert_path = ca_cert_path.display().to_string(); let combined_bundle_path = combined_bundle_path.display().to_string(); [ - ("NODE_EXTRA_CA_CERTS", ca_cert_path.clone()), + ("NODE_EXTRA_CA_CERTS", ca_cert_path), ("SSL_CERT_FILE", combined_bundle_path.clone()), ("REQUESTS_CA_BUNDLE", combined_bundle_path.clone()), ("CURL_CA_BUNDLE", combined_bundle_path.clone()), diff --git a/crates/openshell-sandbox/src/grpc_client.rs b/crates/openshell-sandbox/src/grpc_client.rs index 0af6476c5..68f710d8c 100644 --- a/crates/openshell-sandbox/src/grpc_client.rs +++ b/crates/openshell-sandbox/src/grpc_client.rs @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 //! gRPC client for fetching sandbox policy, provider environment, and inference -//! route bundles from OpenShell server. +//! route bundles from `OpenShell` server. use std::collections::HashMap; use std::time::Duration; @@ -17,7 +17,7 @@ use openshell_core::proto::{ use tonic::transport::{Certificate, Channel, ClientTlsConfig, Endpoint, Identity}; use tracing::debug; -/// Create a channel to the OpenShell server. +/// Create a channel to the `OpenShell` server. /// /// When the endpoint uses `https://`, mTLS is configured using these env vars: /// - `OPENSHELL_TLS_CA` -- path to the CA certificate @@ -78,18 +78,18 @@ async fn connect_channel(endpoint: &str) -> Result { .wrap_err("failed to connect to OpenShell server") } -/// Create a channel to the OpenShell server (public for use by supervisor_session). +/// Create a channel to the `OpenShell` server (public for use by `supervisor_session`). pub async fn connect_channel_pub(endpoint: &str) -> Result { connect_channel(endpoint).await } -/// Connect to the OpenShell server (mTLS or plaintext based on endpoint scheme). +/// Connect to the `OpenShell` server (mTLS or plaintext based on endpoint scheme). async fn connect(endpoint: &str) -> Result> { let channel = connect_channel(endpoint).await?; Ok(OpenShellClient::new(channel)) } -/// Fetch sandbox policy from OpenShell server via gRPC. +/// Fetch sandbox policy from `OpenShell` server via gRPC. /// /// Returns `Ok(Some(policy))` when the server has a policy configured, /// or `Ok(None)` when the sandbox was created without a policy (the sandbox @@ -191,7 +191,7 @@ pub async fn sync_policy(endpoint: &str, sandbox: &str, policy: &ProtoSandboxPol sync_policy_with_client(&mut client, sandbox, policy).await } -/// Fetch provider environment variables for a sandbox from OpenShell server via gRPC. +/// Fetch provider environment variables for a sandbox from `OpenShell` server via gRPC. /// /// Returns a map of environment variable names to values derived from provider /// credentials configured on the sandbox. Returns an empty map if the sandbox @@ -214,7 +214,7 @@ pub async fn fetch_provider_environment( Ok(response.into_inner().environment) } -/// A reusable gRPC client for the OpenShell service. +/// A reusable gRPC client for the `OpenShell` service. /// /// Wraps a tonic channel connected once and reused for policy polling /// and status reporting, avoiding per-request TLS handshake overhead. @@ -231,7 +231,7 @@ pub struct SettingsPollResult { pub config_revision: u64, pub policy_source: PolicySource, /// Effective settings keyed by name. - pub settings: std::collections::HashMap, + pub settings: HashMap, /// When `policy_source` is `Global`, the version of the global policy revision. pub global_policy_version: u32, } diff --git a/crates/openshell-sandbox/src/l7/mod.rs b/crates/openshell-sandbox/src/l7/mod.rs index 3b26f8122..3e0ee2cd3 100644 --- a/crates/openshell-sandbox/src/l7/mod.rs +++ b/crates/openshell-sandbox/src/l7/mod.rs @@ -144,7 +144,7 @@ pub fn parse_l7_config(val: ®orus::Value) -> Option { pub fn parse_tls_mode(val: ®orus::Value) -> TlsMode { match get_object_str(val, "tls").as_deref() { Some("skip") => TlsMode::Skip, - Some("terminate") | Some("passthrough") => TlsMode::Auto, // deprecation logged by parse_l7_config + // "terminate" and "passthrough" are deprecated aliases (logged by parse_l7_config); fall through to Auto. _ => TlsMode::Auto, } } @@ -252,17 +252,16 @@ pub fn validate_l7_policies(data_json: &serde_json::Value) -> (Vec, Vec< let host = ep.get("host").and_then(|v| v.as_str()).unwrap_or(""); // Read ports from either "ports" array or scalar "port". - let ports: Vec = ep - .get("ports") - .and_then(|v| v.as_array()) - .map(|arr| arr.iter().filter_map(|v| v.as_u64()).collect()) - .unwrap_or_else(|| { + let ports: Vec = ep.get("ports").and_then(|v| v.as_array()).map_or_else( + || { ep.get("port") .and_then(serde_json::Value::as_u64) .filter(|p| *p > 0) .into_iter() .collect() - }); + }, + |arr| arr.iter().filter_map(serde_json::Value::as_u64).collect(), + ); let loc = format!("{name}.endpoints[{i}]"); // Validate host wildcard patterns. @@ -392,10 +391,10 @@ pub fn validate_l7_policies(data_json: &serde_json::Value) -> (Vec, Vec< } // Validate path glob syntax - if let Some(path) = deny_rule.get("path").and_then(|p| p.as_str()) { - if let Some(warning) = check_glob_syntax(path) { - warnings.push(format!("{deny_loc}.path: {warning}")); - } + if let Some(path) = deny_rule.get("path").and_then(|p| p.as_str()) + && let Some(warning) = check_glob_syntax(path) + { + warnings.push(format!("{deny_loc}.path: {warning}")); } // Validate query matchers — mirrors allow-side validation exactly @@ -498,12 +497,12 @@ pub fn validate_l7_policies(data_json: &serde_json::Value) -> (Vec, Vec< } // SQL command validation - if let Some(command) = deny_rule.get("command").and_then(|c| c.as_str()) { - if !command.is_empty() && protocol == "rest" { - warnings.push(format!( - "{deny_loc}: command is for SQL protocol, not REST" - )); - } + if let Some(command) = deny_rule.get("command").and_then(|c| c.as_str()) + && !command.is_empty() + && protocol == "rest" + { + warnings + .push(format!("{deny_loc}: command is for SQL protocol, not REST")); } } } diff --git a/crates/openshell-sandbox/src/l7/path.rs b/crates/openshell-sandbox/src/l7/path.rs index 1b425b5f4..db2e4e984 100644 --- a/crates/openshell-sandbox/src/l7/path.rs +++ b/crates/openshell-sandbox/src/l7/path.rs @@ -136,15 +136,12 @@ pub fn canonicalize_request_target( }; // 4. Handle absolute-form by stripping scheme://authority. - let raw_path = if let Some(idx) = path_part.find("://") { + let raw_path = path_part.find("://").map_or(path_part, |idx| { let after_scheme = &path_part[idx + 3..]; - match after_scheme.find('/') { - Some(slash) => &after_scheme[slash..], - None => "/", - } - } else { - path_part - }; + after_scheme + .find('/') + .map_or("/", |slash| &after_scheme[slash..]) + }); // 5. Empty is equivalent to "/". let raw_path = if raw_path.is_empty() { "/" } else { raw_path }; @@ -170,7 +167,7 @@ pub fn canonicalize_request_target( // 10. Reconstruct. Strip `;params` per segment if requested; re-encode // any byte that must be percent-encoded in the pchar set. - let canonical = build_canonical_path(&resolved, decoded.last().copied() == Some(b'/'), opts); + let canonical = build_canonical_path(&resolved, decoded.last().copied() == Some(b'/'), *opts); let rewritten = canonical != raw_path; Ok(( @@ -270,7 +267,7 @@ fn resolve_dot_segments(segments: Vec<&[u8]>) -> Result>, Canonicali fn build_canonical_path( segments: &[Vec], _trailing_slash_hint: bool, - opts: &CanonicalizeOptions, + opts: CanonicalizeOptions, ) -> String { let mut out = String::from("/"); for (idx, seg) in segments.iter().enumerate() { diff --git a/crates/openshell-sandbox/src/l7/relay.rs b/crates/openshell-sandbox/src/l7/relay.rs index a0e54062d..02636c713 100644 --- a/crates/openshell-sandbox/src/l7/relay.rs +++ b/crates/openshell-sandbox/src/l7/relay.rs @@ -17,7 +17,7 @@ use openshell_ocsf::{ }; use std::sync::{Arc, Mutex}; use tokio::io::{AsyncRead, AsyncWrite, AsyncWriteExt}; -use tracing::{debug, info, warn}; +use tracing::{debug, warn}; /// Context for L7 request policy evaluation. pub struct L7EvalContext { @@ -218,13 +218,8 @@ where // Uses redacted_target (path only, no query params) to avoid logging secrets. { let (action_id, disposition_id, severity) = match decision_str { - "allow" => ( - ActionId::Allowed, - DispositionId::Allowed, - SeverityId::Informational, - ), "deny" => (ActionId::Denied, DispositionId::Blocked, SeverityId::Medium), - "audit" => ( + "allow" | "audit" => ( ActionId::Allowed, DispositionId::Allowed, SeverityId::Informational, diff --git a/crates/openshell-sandbox/src/l7/rest.rs b/crates/openshell-sandbox/src/l7/rest.rs index 4d8909b9e..737190e2c 100644 --- a/crates/openshell-sandbox/src/l7/rest.rs +++ b/crates/openshell-sandbox/src/l7/rest.rs @@ -12,7 +12,7 @@ use crate::secrets::rewrite_http_header_block; use miette::{IntoDiagnostic, Result, miette}; use std::collections::HashMap; use tokio::io::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt}; -use tracing::{debug, warn}; +use tracing::debug; const MAX_HEADER_BYTES: usize = 16384; // 16 KiB for HTTP headers const RELAY_BUF_SIZE: usize = 8192; @@ -233,6 +233,8 @@ fn rewrite_request_line_target( Ok(out) } +// Used only in tests; kept as a `pub(crate)` helper for clarity. +#[allow(dead_code)] pub(crate) fn parse_target_query(target: &str) -> Result<(String, HashMap>)> { match target.split_once('?') { Some((path, query)) => Ok((path.to_string(), parse_query_params(query)?)), @@ -475,12 +477,12 @@ fn parse_body_length(headers: &str) -> Result { let len: u64 = val .parse() .map_err(|_| miette!("Request contains invalid Content-Length value"))?; - if let Some(prev) = cl_value { - if prev != len { - return Err(miette!( - "Request contains multiple Content-Length headers with differing values ({prev} vs {len})" - )); - } + if let Some(prev) = cl_value + && prev != len + { + return Err(miette!( + "Request contains multiple Content-Length headers with differing values ({prev} vs {len})" + )); } cl_value = Some(len); } @@ -652,6 +654,9 @@ fn find_crlf(buf: &[u8], start: usize) -> Option { /// /// Note: callers that receive `Upgraded` are responsible for switching to /// raw bidirectional relay and forwarding the overflow bytes. +// Public helper retained as part of the relay API surface; internal callers +// currently use `relay_response` directly. +#[allow(dead_code)] pub(crate) async fn relay_response_to_client( upstream: &mut U, client: &mut C, @@ -931,6 +936,13 @@ fn is_benign_close(err: &std::io::Error) -> bool { } #[cfg(test)] +#[allow( + clippy::iter_on_single_items, + clippy::manual_string_new, + clippy::collapsible_if, + clippy::cast_possible_truncation, + reason = "Test code: test fixtures and explicit value-shape assertions are idiomatic in tests." +)] mod tests { use super::*; use crate::secrets::SecretResolver; diff --git a/crates/openshell-sandbox/src/l7/tls.rs b/crates/openshell-sandbox/src/l7/tls.rs index a11674da1..70e198f42 100644 --- a/crates/openshell-sandbox/src/l7/tls.rs +++ b/crates/openshell-sandbox/src/l7/tls.rs @@ -313,10 +313,10 @@ pub fn parse_pem_certs(path: &Path) -> Result>> { } /// Peek the first bytes of a stream and determine if it looks like a TLS -/// ClientHello handshake. +/// `ClientHello` handshake. /// /// A TLS record starts with: -/// - byte 0: `0x16` (ContentType::Handshake) +/// - byte 0: `0x16` (`ContentType::Handshake`) /// - bytes 1-2: TLS version (0x0301 = TLS 1.0, 0x0302 = TLS 1.1, 0x0303 = TLS 1.2/1.3) /// /// Returns `true` if the peeked bytes match the TLS handshake pattern. diff --git a/crates/openshell-sandbox/src/lib.rs b/crates/openshell-sandbox/src/lib.rs index 262859c33..f4e87fffa 100644 --- a/crates/openshell-sandbox/src/lib.rs +++ b/crates/openshell-sandbox/src/lib.rs @@ -1,7 +1,7 @@ // SPDX-FileCopyrightText: Copyright (c) 2025-2026 NVIDIA CORPORATION & AFFILIATES. All rights reserved. // SPDX-License-Identifier: Apache-2.0 -//! OpenShell Sandbox library. +//! `OpenShell` Sandbox library. //! //! This crate provides process sandboxing and monitoring capabilities. @@ -133,27 +133,27 @@ fn disable_inference_on_empty_routes(source: InferenceRouteSource) -> bool { } fn route_refresh_interval_secs() -> u64 { - match std::env::var("OPENSHELL_ROUTE_REFRESH_INTERVAL_SECS") { - Ok(value) => match value.parse::() { - Ok(interval) if interval > 0 => interval, - Ok(_) => { - warn!( - default_interval_secs = DEFAULT_ROUTE_REFRESH_INTERVAL_SECS, - "Ignoring zero route refresh interval" - ); - DEFAULT_ROUTE_REFRESH_INTERVAL_SECS - } - Err(error) => { - warn!( - interval = %value, - error = %error, - default_interval_secs = DEFAULT_ROUTE_REFRESH_INTERVAL_SECS, - "Ignoring invalid route refresh interval" - ); - DEFAULT_ROUTE_REFRESH_INTERVAL_SECS - } - }, - Err(_) => DEFAULT_ROUTE_REFRESH_INTERVAL_SECS, + let Ok(value) = std::env::var("OPENSHELL_ROUTE_REFRESH_INTERVAL_SECS") else { + return DEFAULT_ROUTE_REFRESH_INTERVAL_SECS; + }; + match value.parse::() { + Ok(interval) if interval > 0 => interval, + Ok(_) => { + warn!( + default_interval_secs = DEFAULT_ROUTE_REFRESH_INTERVAL_SECS, + "Ignoring zero route refresh interval" + ); + DEFAULT_ROUTE_REFRESH_INTERVAL_SECS + } + Err(error) => { + warn!( + interval = %value, + error = %error, + default_interval_secs = DEFAULT_ROUTE_REFRESH_INTERVAL_SECS, + "Ignoring invalid route refresh interval" + ); + DEFAULT_ROUTE_REFRESH_INTERVAL_SECS + } } } @@ -227,9 +227,10 @@ pub async fn run_sandbox( // Proxy IP/port use defaults here; they are only significant for network // events which happen after the netns is created. { - let hostname = std::fs::read_to_string("/etc/hostname") - .map(|s| s.trim().to_string()) - .unwrap_or_else(|_| "openshell-sandbox".to_string()); + let hostname = std::fs::read_to_string("/etc/hostname").map_or_else( + |_| "openshell-sandbox".to_string(), + |s| s.trim().to_string(), + ); if OCSF_CTX .set(SandboxContext { @@ -903,6 +904,9 @@ pub async fn run_sandbox( /// wins and the cluster bundle is not fetched. /// /// Returns `None` if neither source is configured (inference routing disabled). +// `routes`/`router` are intentionally distinct nouns (the route list vs the +// router that consumes them); both names are clearer than alternatives. +#[allow(clippy::similar_names)] async fn build_inference_context( sandbox_id: Option<&str>, openshell_endpoint: Option<&str>, @@ -1429,6 +1433,14 @@ fn enrich_sandbox_baseline_paths(policy: &mut SandboxPolicy) { } #[cfg(test)] +#[allow( + clippy::needless_raw_string_hashes, + clippy::iter_on_single_items, + clippy::similar_names, + clippy::manual_string_new, + clippy::doc_markdown, + reason = "Test code: test fixtures often use idiomatic forms not flagged in production." +)] mod baseline_tests { use super::*; use crate::policy::{FilesystemPolicy, LandlockPolicy, ProcessPolicy}; @@ -1692,55 +1704,51 @@ async fn load_policy( let proto_policy = grpc_retry("Policy fetch", || grpc_client::fetch_policy(endpoint, id)).await?; - let mut proto_policy = match proto_policy { - Some(p) => p, - None => { - // No policy configured on the server. Discover from disk or - // fall back to the restrictive default, then sync to the - // gateway so it becomes the authoritative baseline. - ocsf_emit!( - ConfigStateChangeBuilder::new(ocsf_ctx()) - .severity(SeverityId::Informational) - .status(StatusId::Success) - .state(StateId::Other, "discovery") - .message("Server returned no policy; attempting local discovery") - .build() - ); - let mut discovered = discover_policy_from_disk_or_default(); - // Enrich before syncing so the gateway baseline includes - // baseline paths from the start. - enrich_proto_baseline_paths(&mut discovered); - let sandbox = sandbox.as_deref().ok_or_else(|| { - miette::miette!( - "Cannot sync discovered policy: sandbox not available.\n\ - Set OPENSHELL_SANDBOX or --sandbox to enable policy sync." - ) - })?; + let mut proto_policy = if let Some(p) = proto_policy { + p + } else { + // No policy configured on the server. Discover from disk or + // fall back to the restrictive default, then sync to the + // gateway so it becomes the authoritative baseline. + ocsf_emit!( + ConfigStateChangeBuilder::new(ocsf_ctx()) + .severity(SeverityId::Informational) + .status(StatusId::Success) + .state(StateId::Other, "discovery") + .message("Server returned no policy; attempting local discovery") + .build() + ); + let mut discovered = discover_policy_from_disk_or_default(); + // Enrich before syncing so the gateway baseline includes + // baseline paths from the start. + enrich_proto_baseline_paths(&mut discovered); + let sandbox = sandbox.as_deref().ok_or_else(|| { + miette::miette!( + "Cannot sync discovered policy: sandbox not available.\n\ + Set OPENSHELL_SANDBOX or --sandbox to enable policy sync." + ) + })?; - // Sync and re-fetch over a single connection to avoid extra - // TLS handshakes. - grpc_retry("Policy discovery sync", || { - grpc_client::discover_and_sync_policy(endpoint, id, sandbox, &discovered) - }) - .await? - } + // Sync and re-fetch over a single connection to avoid extra + // TLS handshakes. + grpc_retry("Policy discovery sync", || { + grpc_client::discover_and_sync_policy(endpoint, id, sandbox, &discovered) + }) + .await? }; // Ensure baseline filesystem paths are present for proxy-mode // sandboxes. If the policy was enriched, sync the updated version // back to the gateway so users can see the effective policy. let enriched = enrich_proto_baseline_paths(&mut proto_policy); - if enriched { - if let Some(sandbox_name) = sandbox.as_deref() { - if let Err(e) = - grpc_client::sync_policy(endpoint, sandbox_name, &proto_policy).await - { - warn!( - error = %e, - "Failed to sync enriched policy back to gateway (non-fatal)" - ); - } - } + if enriched + && let Some(sandbox_name) = sandbox.as_deref() + && let Err(e) = grpc_client::sync_policy(endpoint, sandbox_name, &proto_policy).await + { + warn!( + error = %e, + "Failed to sync enriched policy back to gateway (non-fatal)" + ); } // Build OPA engine from baked-in rules + typed proto data. @@ -1802,76 +1810,71 @@ fn discover_policy_from_path(path: &std::path::Path) -> openshell_core::proto::S parse_sandbox_policy, restrictive_default_policy, validate_sandbox_policy, }; - match std::fs::read_to_string(path) { - Ok(yaml) => { - ocsf_emit!( - ConfigStateChangeBuilder::new(ocsf_ctx()) - .severity(SeverityId::Informational) - .status(StatusId::Success) - .state(StateId::Enabled, "loaded") + let Ok(yaml) = std::fs::read_to_string(path) else { + ocsf_emit!( + ConfigStateChangeBuilder::new(ocsf_ctx()) + .severity(SeverityId::Informational) + .status(StatusId::Success) + .state(StateId::Enabled, "default") + .message(format!( + "No policy file on disk, using restrictive default [path:{}]", + path.display() + )) + .build() + ); + return restrictive_default_policy(); + }; + ocsf_emit!( + ConfigStateChangeBuilder::new(ocsf_ctx()) + .severity(SeverityId::Informational) + .status(StatusId::Success) + .state(StateId::Enabled, "loaded") + .message(format!( + "Loaded sandbox policy from container disk [path:{}]", + path.display() + )) + .build() + ); + match parse_sandbox_policy(&yaml) { + Ok(policy) => { + // Validate the disk-loaded policy for safety. + if let Err(violations) = validate_sandbox_policy(&policy) { + let messages: Vec = violations.iter().map(ToString::to_string).collect(); + ocsf_emit!(DetectionFindingBuilder::new(ocsf_ctx()) + .activity(ActivityId::Open) + .severity(SeverityId::Medium) + .action(ActionId::Denied) + .disposition(DispositionId::Blocked) + .finding_info( + FindingInfo::new( + "unsafe-disk-policy", + "Unsafe Disk Policy Content", + ) + .with_desc(&format!( + "Disk policy at {} contains unsafe content: {}", + path.display(), + messages.join("; "), + )), + ) .message(format!( - "Loaded sandbox policy from container disk [path:{}]", + "Disk policy contains unsafe content, using restrictive default [path:{}]", path.display() )) - .build() - ); - match parse_sandbox_policy(&yaml) { - Ok(policy) => { - // Validate the disk-loaded policy for safety. - if let Err(violations) = validate_sandbox_policy(&policy) { - let messages: Vec = - violations.iter().map(ToString::to_string).collect(); - ocsf_emit!(DetectionFindingBuilder::new(ocsf_ctx()) - .activity(ActivityId::Open) - .severity(SeverityId::Medium) - .action(ActionId::Denied) - .disposition(DispositionId::Blocked) - .finding_info( - FindingInfo::new( - "unsafe-disk-policy", - "Unsafe Disk Policy Content", - ) - .with_desc(&format!( - "Disk policy at {} contains unsafe content: {}", - path.display(), - messages.join("; "), - )), - ) - .message(format!( - "Disk policy contains unsafe content, using restrictive default [path:{}]", - path.display() - )) - .build()); - return restrictive_default_policy(); - } - policy - } - Err(e) => { - ocsf_emit!(ConfigStateChangeBuilder::new(ocsf_ctx()) - .severity(SeverityId::Medium) - .status(StatusId::Failure) - .state(StateId::Other, "fallback") - .message(format!( - "Failed to parse disk policy, using restrictive default [path:{} error:{e}]", - path.display() - )) - .build()); - restrictive_default_policy() - } + .build()); + return restrictive_default_policy(); } + policy } - Err(_) => { - ocsf_emit!( - ConfigStateChangeBuilder::new(ocsf_ctx()) - .severity(SeverityId::Informational) - .status(StatusId::Success) - .state(StateId::Enabled, "default") - .message(format!( - "No policy file on disk, using restrictive default [path:{}]", - path.display() - )) - .build() - ); + Err(e) => { + ocsf_emit!(ConfigStateChangeBuilder::new(ocsf_ctx()) + .severity(SeverityId::Medium) + .status(StatusId::Failure) + .state(StateId::Other, "fallback") + .message(format!( + "Failed to parse disk policy, using restrictive default [path:{} error:{e}]", + path.display() + )) + .build()); restrictive_default_policy() } } @@ -2036,7 +2039,7 @@ async fn flush_proposals_to_gateway( .map(|s| DenialSummary { sandbox_id: String::new(), host: s.host, - port: s.port as u32, + port: u32::from(s.port), binary: s.binary, ancestors: s.ancestors, deny_reason: s.deny_reason, @@ -2202,13 +2205,13 @@ async fn run_policy_poll_loop( .build() ); } - if result.version > 0 && result.policy_source == PolicySource::Sandbox { - if let Err(e) = client + if result.version > 0 + && result.policy_source == PolicySource::Sandbox + && let Err(e) = client .report_policy_status(sandbox_id, result.version, true, "") .await - { - warn!(error = %e, "Failed to report policy load success"); - } + { + warn!(error = %e, "Failed to report policy load success"); } } Err(e) => { @@ -2223,13 +2226,13 @@ async fn run_policy_poll_loop( result.version )) .build()); - if result.version > 0 && result.policy_source == PolicySource::Sandbox { - if let Err(report_err) = client + if result.version > 0 + && result.policy_source == PolicySource::Sandbox + && let Err(report_err) = client .report_policy_status(sandbox_id, result.version, false, &e.to_string()) .await - { - warn!(error = %report_err, "Failed to report policy load failure"); - } + { + warn!(error = %report_err, "Failed to report policy load failure"); } } } @@ -2281,8 +2284,8 @@ fn log_setting_changes( .status(StatusId::Success) .state(StateId::Enabled, "updated") .unmapped("key", serde_json::json!(key)) - .unmapped("old", serde_json::json!(old_val.to_string())) - .unmapped("new", serde_json::json!(new_val.to_string())) + .unmapped("old", serde_json::json!(old_val.clone())) + .unmapped("new", serde_json::json!(new_val.clone())) .message(format!( "Setting changed [key:{key} old:{old_val} new:{new_val}]" )) @@ -2297,7 +2300,7 @@ fn log_setting_changes( .status(StatusId::Success) .state(StateId::Enabled, "enabled") .unmapped("key", serde_json::json!(key)) - .unmapped("value", serde_json::json!(new_val.to_string())) + .unmapped("value", serde_json::json!(new_val.clone())) .message(format!("Setting added [key:{key} value:{new_val}]")) .build() ); @@ -2332,6 +2335,14 @@ fn format_setting_value(es: &openshell_core::proto::EffectiveSetting) -> String } #[cfg(test)] +#[allow( + clippy::needless_raw_string_hashes, + clippy::iter_on_single_items, + clippy::similar_names, + clippy::manual_string_new, + clippy::doc_markdown, + reason = "Test code: test fixtures often use idiomatic forms not flagged in production." +)] mod tests { use super::*; use crate::policy::{FilesystemPolicy, LandlockPolicy, ProcessPolicy}; diff --git a/crates/openshell-sandbox/src/log_push.rs b/crates/openshell-sandbox/src/log_push.rs index 17f9bcc3d..8e053f79f 100644 --- a/crates/openshell-sandbox/src/log_push.rs +++ b/crates/openshell-sandbox/src/log_push.rs @@ -1,7 +1,7 @@ // SPDX-FileCopyrightText: Copyright (c) 2025-2026 NVIDIA CORPORATION & AFFILIATES. All rights reserved. // SPDX-License-Identifier: Apache-2.0 -//! Push sandbox tracing events to the OpenShell server via gRPC. +//! Push sandbox tracing events to the `OpenShell` server via gRPC. //! //! A [`tracing`] layer captures log events and sends them through an mpsc //! channel to a background task. The task batches lines and streams them to @@ -15,7 +15,7 @@ use tracing::{Event, Subscriber}; use tracing_subscriber::Layer; use tracing_subscriber::layer::Context; -/// Tracing layer that pushes log events to the OpenShell server. +/// Tracing layer that pushes log events to the `OpenShell` server. /// /// Events are sent best-effort via `try_send` — if the channel is full the /// event is dropped. Logging must never block the sandbox. @@ -248,7 +248,7 @@ async fn drain_during_backoff( let deadline = tokio::time::Instant::now() + delay; loop { tokio::select! { - _ = tokio::time::sleep_until(deadline) => { return; } + () = tokio::time::sleep_until(deadline) => { return; } line = rx.recv() => { match line { Some(l) => { diff --git a/crates/openshell-sandbox/src/main.rs b/crates/openshell-sandbox/src/main.rs index c7aa41a8f..a62129582 100644 --- a/crates/openshell-sandbox/src/main.rs +++ b/crates/openshell-sandbox/src/main.rs @@ -1,7 +1,7 @@ // SPDX-FileCopyrightText: Copyright (c) 2025-2026 NVIDIA CORPORATION & AFFILIATES. All rights reserved. // SPDX-License-Identifier: Apache-2.0 -//! OpenShell Sandbox - process sandbox and monitor. +//! `OpenShell` Sandbox - process sandbox and monitor. use std::sync::Arc; use std::sync::atomic::AtomicBool; @@ -16,7 +16,7 @@ use tracing_subscriber::{Layer, layer::SubscriberExt, util::SubscriberInitExt}; use openshell_sandbox::run_sandbox; -/// OpenShell Sandbox - process isolation and monitoring. +/// `OpenShell` Sandbox - process isolation and monitoring. #[derive(Parser, Debug)] #[command(name = "openshell-sandbox")] #[command(version = openshell_core::VERSION)] @@ -40,7 +40,7 @@ struct Args { #[arg(long, short = 'i')] interactive: bool, - /// Sandbox ID for fetching policy via gRPC from OpenShell server. + /// Sandbox ID for fetching policy via gRPC from `OpenShell` server. /// Requires --openshell-endpoint to be set. #[arg(long, env = "OPENSHELL_SANDBOX_ID")] sandbox_id: Option, @@ -50,7 +50,7 @@ struct Args { #[arg(long, env = "OPENSHELL_SANDBOX")] sandbox: Option, - /// OpenShell server gRPC endpoint for fetching policy. + /// `OpenShell` server gRPC endpoint for fetching policy. /// Required when using --sandbox-id. #[arg(long, env = "OPENSHELL_ENDPOINT")] openshell_endpoint: Option, diff --git a/crates/openshell-sandbox/src/mechanistic_mapper.rs b/crates/openshell-sandbox/src/mechanistic_mapper.rs index 3cfdf3f54..825353b52 100644 --- a/crates/openshell-sandbox/src/mechanistic_mapper.rs +++ b/crates/openshell-sandbox/src/mechanistic_mapper.rs @@ -178,13 +178,13 @@ pub async fn generate_proposals(summaries: &[DenialSummary]) -> Vec .map(|(_, name)| format!(" ({name})")) .unwrap_or_default(); - let private_ip_note = if !allowed_ips.is_empty() { + let private_ip_note = if allowed_ips.is_empty() { + String::new() + } else { format!( " Host resolves to private IP ({}); allowed_ips included for SSRF override.", allowed_ips.join(", ") ) - } else { - String::new() }; // Note: hit_count in the DB accumulates across flush cycles, so we @@ -226,7 +226,7 @@ pub async fn generate_proposals(summaries: &[DenialSummary]) -> Vec decided_at_ms: 0, stage, supersedes_chunk_id: String::new(), - hit_count: total_count as i32, + hit_count: total_count.cast_signed(), first_seen_ms, last_seen_ms, binary: binary.clone(), @@ -336,16 +336,15 @@ fn generate_security_notes(host: &str, port: u16, is_ssrf: bool) -> String { /// Falls back to the exact observed path when no pattern applies. fn build_l7_rules(samples: &HashMap<(String, String), u32>) -> Vec { // Deduplicate after generalisation. - let mut seen: HashMap<(String, String), ()> = HashMap::new(); + let mut seen: std::collections::HashSet<(String, String)> = std::collections::HashSet::new(); let mut rules = Vec::new(); for (method, path) in samples.keys() { let generalised = generalise_path(path); let key = (method.clone(), generalised.clone()); - if seen.contains_key(&key) { + if !seen.insert(key) { continue; } - seen.insert(key, ()); rules.push(L7Rule { allow: Some(L7Allow { @@ -405,7 +404,7 @@ fn looks_like_id(segment: &str) -> bool { return true; } // UUID-ish (contains dashes, 32+ hex chars) - let hex_only: String = segment.chars().filter(|c| c.is_ascii_hexdigit()).collect(); + let hex_only: String = segment.chars().filter(char::is_ascii_hexdigit).collect(); if hex_only.len() >= 24 && segment.contains('-') { return true; } @@ -450,10 +449,11 @@ async fn resolve_allowed_ips_if_private(host: &str, port: u32) -> Vec { let addrs = match tokio::net::lookup_host(&addr).await { Ok(addrs) => addrs.collect::>(), Err(e) => { + let port_u16 = u16::try_from(port).unwrap_or(u16::MAX); let event = openshell_ocsf::NetworkActivityBuilder::new(crate::ocsf_ctx()) .activity(openshell_ocsf::ActivityId::Fail) .severity(openshell_ocsf::SeverityId::Low) - .dst_endpoint(openshell_ocsf::Endpoint::from_domain(host, port as u16)) + .dst_endpoint(openshell_ocsf::Endpoint::from_domain(host, port_u16)) .message(format!("DNS resolution failed for allowed_ips check: {e}")) .build(); openshell_ocsf::ocsf_emit!(event); @@ -462,10 +462,11 @@ async fn resolve_allowed_ips_if_private(host: &str, port: u32) -> Vec { }; if addrs.is_empty() { + let port_u16 = u16::try_from(port).unwrap_or(u16::MAX); let event = openshell_ocsf::NetworkActivityBuilder::new(crate::ocsf_ctx()) .activity(openshell_ocsf::ActivityId::Fail) .severity(openshell_ocsf::SeverityId::Low) - .dst_endpoint(openshell_ocsf::Endpoint::from_domain(host, port as u16)) + .dst_endpoint(openshell_ocsf::Endpoint::from_domain(host, port_u16)) .message(format!( "DNS resolution returned no addresses for {host}:{port}" )) diff --git a/crates/openshell-sandbox/src/opa.rs b/crates/openshell-sandbox/src/opa.rs index 0069bcc3e..1e2860d00 100644 --- a/crates/openshell-sandbox/src/opa.rs +++ b/crates/openshell-sandbox/src/opa.rs @@ -137,7 +137,7 @@ impl OpaEngine { .severity(openshell_ocsf::SeverityId::Medium) .status(openshell_ocsf::StatusId::Success) .state(openshell_ocsf::StateId::Enabled, "validated") - .unmapped("warning", serde_json::json!(w.to_string())) + .unmapped("warning", serde_json::json!(w.clone())) .message(format!("L7 policy validation warning: {w}")) .build() ); @@ -278,15 +278,14 @@ impl OpaEngine { Some(value_to_string(&matched)) }; - match action_str.as_str() { - "allow" => Ok(NetworkAction::Allow { matched_policy }), - _ => { - let reason_val = engine - .eval_rule("data.openshell.sandbox.deny_reason".into()) - .map_err(|e| miette::miette!("{e}"))?; - let reason = value_to_string(&reason_val); - Ok(NetworkAction::Deny { reason }) - } + if action_str == "allow" { + Ok(NetworkAction::Allow { matched_policy }) + } else { + let reason_val = engine + .eval_rule("data.openshell.sandbox.deny_reason".into()) + .map_err(|e| miette::miette!("{e}"))?; + let reason = value_to_string(&reason_val); + Ok(NetworkAction::Deny { reason }) } } @@ -435,10 +434,10 @@ impl OpaEngine { /// match. This is used by the proxy to decide between full SSRF blocking /// and allowlist-based IP validation. pub fn query_allowed_ips(&self, input: &NetworkInput) -> Result> { - match self.query_endpoint_config(input)? { - Some(val) => Ok(get_str_array(&val, "allowed_ips")), - None => Ok(vec![]), - } + Ok(self + .query_endpoint_config(input)? + .map(|val| get_str_array(&val, "allowed_ips")) + .unwrap_or_default()) } /// Clone the inner regorus engine for per-tunnel L7 evaluation. @@ -557,7 +556,7 @@ fn preprocess_yaml_data(yaml_str: &str) -> Result { .severity(openshell_ocsf::SeverityId::Medium) .status(openshell_ocsf::StatusId::Success) .state(openshell_ocsf::StateId::Enabled, "validated") - .unmapped("warning", serde_json::json!(w.to_string())) + .unmapped("warning", serde_json::json!(w.clone())) .message(format!("L7 policy validation warning: {w}")) .build() ); @@ -594,9 +593,8 @@ fn normalize_endpoint_ports(data: &mut serde_json::Value) { }; for ep in endpoints.iter_mut() { - let ep_obj = match ep.as_object_mut() { - Some(obj) => obj, - None => continue, + let Some(ep_obj) = ep.as_object_mut() else { + continue; }; // If "ports" already exists and is non-empty, keep it. @@ -637,10 +635,12 @@ fn normalize_endpoint_ports(data: &mut serde_json::Value) { /// - Path is not a symlink /// - Resolution fails (binary doesn't exist in container) /// - Resolved path equals the original +/// /// Normalize a path by resolving `.` and `..` components without touching /// the filesystem. Only works correctly for absolute paths. -fn normalize_path(path: &std::path::Path) -> std::path::PathBuf { - let mut result = std::path::PathBuf::new(); +#[cfg(test)] +fn normalize_path(path: &Path) -> PathBuf { + let mut result = PathBuf::new(); for component in path.components() { match component { std::path::Component::ParentDir => { @@ -946,6 +946,13 @@ fn proto_to_opa_data_json(proto: &ProtoSandboxPolicy, entrypoint_pid: u32) -> St } #[cfg(test)] +#[allow( + clippy::needless_raw_string_hashes, + clippy::similar_names, + clippy::doc_markdown, + clippy::match_wildcard_for_single_variants, + reason = "Test code: test fixtures and panic-on-unexpected matches are idiomatic in tests." +)] mod tests { use super::*; diff --git a/crates/openshell-sandbox/src/process.rs b/crates/openshell-sandbox/src/process.rs index 85a57b4e7..b491b19ff 100644 --- a/crates/openshell-sandbox/src/process.rs +++ b/crates/openshell-sandbox/src/process.rs @@ -35,13 +35,13 @@ fn scrub_sensitive_env(cmd: &mut Command) { } #[cfg(unix)] -#[allow(unsafe_code)] -pub(crate) fn harden_child_process() -> Result<()> { +#[allow(unsafe_code, clippy::borrow_as_ptr)] +pub fn harden_child_process() -> Result<()> { let core_limit = libc::rlimit { rlim_cur: 0, rlim_max: 0, }; - let rc = unsafe { libc::setrlimit(libc::RLIMIT_CORE, &core_limit) }; + let rc = unsafe { libc::setrlimit(libc::RLIMIT_CORE, &raw const core_limit) }; if rc != 0 { return Err(miette::miette!( "Failed to disable core dumps: {}", @@ -57,7 +57,7 @@ pub(crate) fn harden_child_process() -> Result<()> { rlim_cur: 512, rlim_max: 512, }; - let rc = unsafe { libc::setrlimit(libc::RLIMIT_NPROC, &nproc_limit) }; + let rc = unsafe { libc::setrlimit(libc::RLIMIT_NPROC, &raw const nproc_limit) }; if rc != 0 { return Err(miette::miette!( "Failed to set RLIMIT_NPROC: {}", @@ -427,7 +427,10 @@ impl Drop for ProcessHandle { } } +// `effective_gid`/`effective_uid` are intentionally parallel names (same role +// for different identifiers) and the noise from renaming would obscure intent. #[cfg(unix)] +#[allow(clippy::similar_names)] pub fn drop_privileges(policy: &SandboxPolicy) -> Result<()> { let user_name = match policy.process.run_as_user.as_deref() { Some(name) if !name.is_empty() => Some(name), @@ -740,11 +743,7 @@ mod tests { let written = unsafe { libc::write(fds[1], bytes.as_ptr().cast(), bytes.len()) }; unsafe { libc::close(fds[1]); - libc::_exit(if written == bytes.len() as isize { - 0 - } else { - 1 - }); + libc::_exit(i32::from(written != bytes.len().cast_signed())); } } ForkResult::Parent { child } => { @@ -753,7 +752,7 @@ mod tests { let read = unsafe { libc::read(fds[0], bytes.as_mut_ptr().cast(), bytes.len()) }; unsafe { libc::close(fds[0]) }; assert_eq!( - read as usize, + read.cast_unsigned(), bytes.len(), "expected {} probe bytes, got {}", bytes.len(), diff --git a/crates/openshell-sandbox/src/procfs.rs b/crates/openshell-sandbox/src/procfs.rs index 9a6d3f9dd..f831615c1 100644 --- a/crates/openshell-sandbox/src/procfs.rs +++ b/crates/openshell-sandbox/src/procfs.rs @@ -480,7 +480,7 @@ pub fn file_sha256(path: &Path) -> Result { let mut file = std::fs::File::open(path) .map_err(|e| miette::miette!("Failed to open {}: {e}", path.display()))?; let mut hasher = Sha256::new(); - let mut buf = [0u8; 65536]; + let mut buf = vec![0u8; 65536].into_boxed_slice(); let mut total_read = 0u64; loop { let n = file diff --git a/crates/openshell-sandbox/src/proxy.rs b/crates/openshell-sandbox/src/proxy.rs index 69ff97b04..f8df885d5 100644 --- a/crates/openshell-sandbox/src/proxy.rs +++ b/crates/openshell-sandbox/src/proxy.rs @@ -79,6 +79,9 @@ pub struct InferenceContext { } impl InferenceContext { + // `router`/`routes` are intentionally distinct nouns (the router and the + // route list it consumes); both names are clearer than alternatives. + #[allow(clippy::similar_names)] pub fn new( patterns: Vec, router: openshell_router::Router, @@ -295,6 +298,10 @@ fn emit_denial_simple( } } +// Many distinct, non-related context parameters are required for a CONNECT +// dispatch; bundling them into a struct would just shift the noise into call +// sites. +#[allow(clippy::too_many_arguments)] async fn handle_tcp_connection( mut client: TcpStream, opa_engine: Arc, @@ -497,6 +504,9 @@ async fn handle_tcp_connection( // Defense-in-depth: resolve DNS and reject connections to internal IPs. let dns_connect_start = std::time::Instant::now(); + // The "non-empty" branch is the explicit-allowlist path; reading it first + // matches the policy decision narrative. + #[allow(clippy::if_not_else)] let mut upstream = if !raw_allowed_ips.is_empty() { // allowed_ips mode: validate resolved IPs against CIDR allowlist. // Loopback and link-local are still always blocked. @@ -1597,7 +1607,7 @@ fn query_l7_config( // Only query if action is Allow (not Deny) let has_policy = match &decision.action { NetworkAction::Allow { matched_policy } => matched_policy.is_some(), - _ => false, + NetworkAction::Deny { .. } => false, }; if !has_policy { return None; @@ -1640,7 +1650,7 @@ fn query_tls_mode( ) -> crate::l7::TlsMode { let has_policy = match &decision.action { NetworkAction::Allow { matched_policy } => matched_policy.is_some(), - _ => false, + NetworkAction::Deny { .. } => false, }; if !has_policy { return crate::l7::TlsMode::Auto; @@ -1763,7 +1773,10 @@ async fn resolve_from_sandbox_hosts( if addrs.is_empty() { None } else { Some(addrs) } } +// Mirrors the Linux signature so call sites can `.await` uniformly across +// platforms; the non-Linux path has nothing to await. #[cfg(not(target_os = "linux"))] +#[allow(clippy::unused_async)] async fn resolve_from_sandbox_hosts( _host: &str, _port: u16, @@ -1969,7 +1982,7 @@ fn parse_allowed_ips(raw: &[String]) -> std::result::Result, S } nets.push(n); } - Err(_) => errors.push(format!("invalid CIDR/IP in allowed_ips: {entry}")), + Err(()) => errors.push(format!("invalid CIDR/IP in allowed_ips: {entry}")), } } @@ -1980,7 +1993,7 @@ fn parse_allowed_ips(raw: &[String]) -> std::result::Result, S } } -/// Query allowed_ips from the matched endpoint config for a CONNECT decision. +/// Query `allowed_ips` from the matched endpoint config for a CONNECT decision. fn query_allowed_ips( engine: &OpaEngine, decision: &ConnectDecision, @@ -1990,7 +2003,7 @@ fn query_allowed_ips( // Only query if action is Allow with a matched policy let has_policy = match &decision.action { NetworkAction::Allow { matched_policy } => matched_policy.is_some(), - _ => false, + NetworkAction::Deny { .. } => false, }; if !has_policy { return vec![]; @@ -2049,15 +2062,12 @@ fn normalize_inference_path(path: &str) -> String { fn extract_host_from_uri(uri: &str) -> String { // Absolute-form URIs look like "http://host[:port]/path" // Strip the scheme prefix, then extract the authority (host[:port]) before the first '/'. - let after_scheme = uri.find("://").map(|i| &uri[i + 3..]).unwrap_or(uri); + let after_scheme = uri.find("://").map_or(uri, |i| &uri[i + 3..]); let authority = after_scheme.split('/').next().unwrap_or(after_scheme); // Strip port if present (handle IPv6 bracket notation) let host = if authority.starts_with('[') { // IPv6: [::1]:port - authority - .find(']') - .map(|i| &authority[..=i]) - .unwrap_or(authority) + authority.find(']').map_or(authority, |i| &authority[..=i]) } else { authority.split(':').next().unwrap_or(authority) }; @@ -2092,14 +2102,12 @@ fn parse_proxy_uri(uri: &str) -> Result<(String, String, u16, String)> { .find(']') .ok_or_else(|| miette::miette!("Unclosed IPv6 bracket in URI: {uri}"))?; let after_bracket = &rest[bracket_end + 1..]; - if let Some(slash_pos) = after_bracket.find('/') { + after_bracket.find('/').map_or((rest, "/"), |slash_pos| { ( - &rest[..bracket_end + 1 + slash_pos], + &rest[..=bracket_end + slash_pos], &after_bracket[slash_pos..], ) - } else { - (&rest[..], "/") - } + }) } else if let Some(slash_pos) = rest.find('/') { (&rest[..slash_pos], &rest[slash_pos..]) } else { @@ -2210,10 +2218,10 @@ fn rewrite_forward_request( continue; } - let rewritten_line = match secret_resolver { - Some(resolver) => rewrite_header_line(line, resolver), - None => line.to_string(), - }; + let rewritten_line = secret_resolver.map_or_else( + || line.to_string(), + |resolver| rewrite_header_line(line, resolver), + ); output.extend_from_slice(rewritten_line.as_bytes()); output.extend_from_slice(b"\r\n"); @@ -2256,6 +2264,9 @@ fn rewrite_forward_request( /// Private IPs require explicit `allowed_ips` on the endpoint config (SSRF /// override). Rewrites the absolute-form request to origin-form, connects /// upstream, and relays the response using `copy_bidirectional` for streaming. +// Many distinct, non-related context parameters are required for forward proxy +// dispatch; bundling them into a struct would just shift the noise into call sites. +#[allow(clippy::too_many_arguments)] async fn handle_forward_proxy( method: &str, target_uri: &str, @@ -2466,10 +2477,11 @@ async fn handle_forward_proxy( let query_params = match crate::l7::path::canonicalize_request_target(&path, &canonicalize_options) { Ok((canon, query)) => { - let params = match query.as_deref() { - Some(q) => crate::l7::rest::parse_query_params(q).unwrap_or_default(), - None => std::collections::HashMap::new(), - }; + let params = query + .as_deref() + .map_or_else(std::collections::HashMap::new, |q| { + crate::l7::rest::parse_query_params(q).unwrap_or_default() + }); path = canon.path; params } @@ -2525,13 +2537,8 @@ async fn handle_forward_proxy( { let (action_id, disposition_id, severity) = match decision_str { - "allow" => ( - ActionId::Allowed, - DispositionId::Allowed, - SeverityId::Informational, - ), "deny" => (ActionId::Denied, DispositionId::Blocked, SeverityId::Medium), - "audit" => ( + "allow" | "audit" => ( ActionId::Allowed, DispositionId::Allowed, SeverityId::Informational, @@ -2603,6 +2610,9 @@ async fn handle_forward_proxy( raw_allowed_ips = implicit_allowed_ips_for_ip_host(&host); } + // The "non-empty" branch is the explicit-allowlist path; reading it first + // matches the policy decision narrative. + #[allow(clippy::if_not_else)] let addrs = if !raw_allowed_ips.is_empty() { // allowed_ips mode: validate resolved IPs against CIDR allowlist. @@ -2916,6 +2926,12 @@ fn is_benign_relay_error(err: &miette::Report) -> bool { } #[cfg(test)] +#[allow( + clippy::needless_raw_string_hashes, + clippy::iter_on_single_items, + clippy::needless_continue, + reason = "Test code: test fixtures and explicit control-flow markers are idiomatic in tests." +)] mod tests { use super::*; use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; diff --git a/crates/openshell-sandbox/src/secrets.rs b/crates/openshell-sandbox/src/secrets.rs index a27537c91..63e253e50 100644 --- a/crates/openshell-sandbox/src/secrets.rs +++ b/crates/openshell-sandbox/src/secrets.rs @@ -8,7 +8,7 @@ use std::fmt; const PLACEHOLDER_PREFIX: &str = "openshell:resolve:env:"; /// Public access to the placeholder prefix for fail-closed scanning in other modules. -pub(crate) const PLACEHOLDER_PREFIX_PUBLIC: &str = PLACEHOLDER_PREFIX; +pub const PLACEHOLDER_PREFIX_PUBLIC: &str = PLACEHOLDER_PREFIX; /// Characters that are valid in an env var key name (used to extract /// placeholder boundaries within concatenated strings like path segments). @@ -23,7 +23,7 @@ fn is_env_key_char(b: u8) -> bool { /// Error returned when a placeholder cannot be resolved or a resolved secret /// contains prohibited characters. #[derive(Debug)] -pub(crate) struct UnresolvedPlaceholderError { +pub struct UnresolvedPlaceholderError { pub location: &'static str, // "header", "query_param", "path" } @@ -39,18 +39,21 @@ impl fmt::Display for UnresolvedPlaceholderError { /// Result of rewriting an HTTP header block with credential resolution. #[derive(Debug)] -pub(crate) struct RewriteResult { +pub struct RewriteResult { /// The rewritten HTTP bytes (headers + body overflow). pub rewritten: Vec, /// A redacted version of the request target for logging. /// Contains `[CREDENTIAL]` in place of resolved credential values. /// `None` if the target was not modified. + // Kept on the public result struct as part of the API contract; consumed + // selectively by callers that emit redacted logs. + #[allow(dead_code)] pub redacted_target: Option, } /// Result of rewriting a request target for OPA evaluation. #[derive(Debug)] -pub(crate) struct RewriteTargetResult { +pub struct RewriteTargetResult { /// The resolved target (real secrets) — for upstream forwarding only. pub resolved: String, /// The redacted target (`[CREDENTIAL]` in place of secrets) — for OPA + logs. @@ -119,10 +122,9 @@ impl SecretResolver { .strip_prefix("Basic ") .or_else(|| trimmed.strip_prefix("basic ")) .map(str::trim) + && let Some(rewritten) = self.rewrite_basic_auth_token(encoded) { - if let Some(rewritten) = self.rewrite_basic_auth_token(encoded) { - return Some(format!("Basic {rewritten}")); - } + return Some(format!("Basic {rewritten}")); } // Prefixed placeholder: `Bearer openshell:resolve:env:KEY` @@ -172,7 +174,7 @@ impl SecretResolver { } } -pub(crate) fn placeholder_for_env_key(key: &str) -> String { +pub fn placeholder_for_env_key(key: &str) -> String { format!("{PLACEHOLDER_PREFIX}{key}") } @@ -226,12 +228,26 @@ fn percent_encode_path_segment(input: &str) -> String { let mut encoded = String::with_capacity(input.len()); for byte in input.bytes() { match byte { - // unreserved - b'A'..=b'Z' | b'a'..=b'z' | b'0'..=b'9' | b'-' | b'.' | b'_' | b'~' => { - encoded.push(byte as char); - } - // sub-delims + ":" + "@" - b'!' | b'$' | b'&' | b'\'' | b'(' | b')' | b'*' | b'+' | b',' | b';' | b'=' | b':' + // unreserved + sub-delims + ":" + "@" + b'A'..=b'Z' + | b'a'..=b'z' + | b'0'..=b'9' + | b'-' + | b'.' + | b'_' + | b'~' + | b'!' + | b'$' + | b'&' + | b'\'' + | b'(' + | b')' + | b'*' + | b'+' + | b',' + | b';' + | b'=' + | b':' | b'@' => { encoded.push(byte as char); } @@ -254,11 +270,11 @@ fn percent_decode(input: &str) -> String { let lo = bytes.next(); if let (Some(h), Some(l)) = (hi, lo) { let hex = [h, l]; - if let Ok(s) = std::str::from_utf8(&hex) { - if let Ok(val) = u8::from_str_radix(s, 16) { - decoded.push(val); - continue; - } + if let Ok(s) = std::str::from_utf8(&hex) + && let Ok(val) = u8::from_str_radix(s, 16) + { + decoded.push(val); + continue; } // Invalid percent encoding — preserve verbatim decoded.push(b'%'); @@ -318,46 +334,34 @@ struct RewriteLineResult { /// /// Given a request line like `GET /bot{TOKEN}/path?key={APIKEY} HTTP/1.1`, /// resolves placeholders in both path segments and query parameter values. +// `resolver` (the credential resolver) and `resolved` (the resolved string +// output) are intentionally distinct nouns; renaming would obscure intent. +#[allow(clippy::similar_names)] fn rewrite_request_line( line: &str, resolver: &SecretResolver, ) -> Result { // Request line format: METHOD SP REQUEST-URI SP HTTP-VERSION let mut parts = line.splitn(3, ' '); - let method = match parts.next() { - Some(m) => m, - None => { - return Ok(RewriteLineResult { - line: line.to_string(), - redacted_target: None, - }); - } + let unchanged = || { + Ok(RewriteLineResult { + line: line.to_string(), + redacted_target: None, + }) }; - let uri = match parts.next() { - Some(u) => u, - None => { - return Ok(RewriteLineResult { - line: line.to_string(), - redacted_target: None, - }); - } + let Some(method) = parts.next() else { + return unchanged(); }; - let version = match parts.next() { - Some(v) => v, - None => { - return Ok(RewriteLineResult { - line: line.to_string(), - redacted_target: None, - }); - } + let Some(uri) = parts.next() else { + return unchanged(); + }; + let Some(version) = parts.next() else { + return unchanged(); }; // Only rewrite if the URI contains a placeholder if !uri.contains(PLACEHOLDER_PREFIX) { - return Ok(RewriteLineResult { - line: line.to_string(), - redacted_target: None, - }); + return unchanged(); } // Split URI into path and query @@ -382,9 +386,10 @@ fn rewrite_request_line( }; // Reassemble - let resolved_uri = match &resolved_query { - Some(q) => format!("{resolved_path}?{q}"), - None => resolved_path.clone(), + let resolved_uri = if let Some(q) = resolved_query.as_ref() { + format!("{resolved_path}?{q}") + } else { + resolved_path }; let redacted_uri = match &redacted_query { Some(q) => format!("{redacted_path}?{q}"), @@ -404,6 +409,9 @@ fn rewrite_request_line( /// /// Returns `Some((resolved_path, redacted_path))` if any placeholders were found, /// `None` if no placeholders exist in the path. +// `resolver` and `resolved` are intentionally distinct nouns; see comment at +// `rewrite_request_line`. +#[allow(clippy::similar_names)] fn rewrite_uri_path( path: &str, resolver: &SecretResolver, @@ -446,6 +454,9 @@ fn rewrite_uri_path( /// /// Uses the placeholder grammar `openshell:resolve:env:[A-Za-z_][A-Za-z0-9_]*` /// to determine placeholder boundaries within concatenated text. +// `resolver` and `resolved` are intentionally distinct nouns; see comment at +// `rewrite_request_line`. +#[allow(clippy::similar_names)] fn rewrite_path_segment( segment: &str, resolver: &SecretResolver, @@ -561,7 +572,7 @@ fn rewrite_uri_query_params( /// /// Returns `Err` if any placeholder is detected but cannot be resolved /// (fail-closed behavior). -pub(crate) fn rewrite_http_header_block( +pub fn rewrite_http_header_block( raw: &[u8], resolver: Option<&SecretResolver>, ) -> Result { @@ -627,22 +638,25 @@ pub(crate) fn rewrite_http_header_block( }) } -pub(crate) fn rewrite_header_line(line: &str, resolver: &SecretResolver) -> String { +pub fn rewrite_header_line(line: &str, resolver: &SecretResolver) -> String { let Some((name, value)) = line.split_once(':') else { return line.to_string(); }; - match resolver.rewrite_header_value(value.trim()) { - Some(rewritten) => format!("{name}: {rewritten}"), - None => line.to_string(), - } + resolver.rewrite_header_value(value.trim()).map_or_else( + || line.to_string(), + |rewritten| format!("{name}: {rewritten}"), + ) } /// Resolve placeholders in a request target (path + query) for OPA evaluation. /// /// Returns the resolved target (real secrets, for upstream) and a redacted /// version (`[CREDENTIAL]` in place of secrets, for OPA input and logs). -pub(crate) fn rewrite_target_for_eval( +// `resolver` and `resolved` are intentionally distinct nouns; see comment at +// `rewrite_request_line`. +#[allow(clippy::similar_names)] +pub fn rewrite_target_for_eval( target: &str, resolver: &SecretResolver, ) -> Result { @@ -695,6 +709,10 @@ pub(crate) fn rewrite_target_for_eval( // --------------------------------------------------------------------------- #[cfg(test)] +#[allow( + clippy::iter_on_single_items, + reason = "Test code: single-key fixtures are clearer as array literals than std::iter::once." +)] mod tests { use super::*; diff --git a/crates/openshell-sandbox/src/ssh.rs b/crates/openshell-sandbox/src/ssh.rs index 4b2cd1572..34607a04e 100644 --- a/crates/openshell-sandbox/src/ssh.rs +++ b/crates/openshell-sandbox/src/ssh.rs @@ -32,14 +32,16 @@ use tracing::warn; /// Perform SSH server initialization: generate a host key, build the config, /// and bind the Unix socket listener. Extracted so that startup errors can be /// forwarded through the readiness channel rather than being silently logged. -async fn ssh_server_init( - listen_path: &Path, - ca_file_paths: &Option<(PathBuf, PathBuf)>, -) -> Result<( +type SshServerInit = ( UnixListener, Arc, Option>, -)> { +); + +fn ssh_server_init( + listen_path: &Path, + ca_file_paths: &Option<(PathBuf, PathBuf)>, +) -> Result { let mut rng = OsRng; let host_key = PrivateKey::random(&mut rng, Algorithm::Ed25519).into_diagnostic()?; @@ -105,7 +107,7 @@ pub async fn run_ssh_server( ca_file_paths: Option<(PathBuf, PathBuf)>, provider_env: HashMap, ) -> Result<()> { - let (listener, config, ca_paths) = match ssh_server_init(&listen_path, &ca_file_paths).await { + let (listener, config, ca_paths) = match ssh_server_init(&listen_path, &ca_file_paths) { Ok(v) => { // Signal that the SSH server has bound the socket and is ready to // accept connections. The parent task awaits this before spawning @@ -265,8 +267,8 @@ impl russh::server::Handler for SshHandler { /// Clean up per-channel state when the channel is closed. /// /// This is the final cleanup and subsumes `channel_eof` — if `channel_close` - /// fires without a preceding `channel_eof`, all resources (pty_master File, - /// input_sender) are dropped here. + /// fires without a preceding `channel_eof`, all resources (`pty_master` File, + /// `input_sender`) are dropped here. async fn channel_close( &mut self, channel: ChannelId, @@ -317,7 +319,9 @@ impl russh::server::Handler for SshHandler { } let host = host_to_connect.to_string(); - let port = port_to_connect as u16; + // SSH protocol port is bounded by u32 but only u16 is meaningful; + // saturate as a guard for malformed clients. + let port = u16::try_from(port_to_connect).unwrap_or(u16::MAX); let netns_fd = self.netns_fd; tokio::spawn(async move { @@ -652,8 +656,10 @@ fn session_user_and_home(policy: &SandboxPolicy) -> (String, String) { let home = nix::unistd::User::from_name(user) .ok() .flatten() - .map(|u| u.dir.to_string_lossy().into_owned()) - .unwrap_or_else(|| format!("/home/{user}")); + .map_or_else( + || format!("/home/{user}"), + |u| u.dir.to_string_lossy().into_owned(), + ); (user.to_string(), home) } _ => ("sandbox".to_string(), "/sandbox".to_string()), @@ -1182,6 +1188,11 @@ fn is_loopback_host(host: &str) -> bool { } #[cfg(test)] +#[allow( + clippy::doc_markdown, + unsafe_code, + reason = "Test code: doc text references identifiers and uses libc::winsize zero-init." +)] mod tests { use super::*; use std::process::Stdio; diff --git a/crates/openshell-sandbox/tests/websocket_upgrade.rs b/crates/openshell-sandbox/tests/websocket_upgrade.rs index 81eb46e1f..e4cd232ce 100644 --- a/crates/openshell-sandbox/tests/websocket_upgrade.rs +++ b/crates/openshell-sandbox/tests/websocket_upgrade.rs @@ -1,6 +1,13 @@ // SPDX-FileCopyrightText: Copyright (c) 2025-2026 NVIDIA CORPORATION & AFFILIATES. All rights reserved. // SPDX-License-Identifier: Apache-2.0 +#![allow( + clippy::match_same_arms, + clippy::cast_possible_truncation, + clippy::doc_markdown, + reason = "Integration test: dual-arm patterns and small-payload casts are idiomatic in test setup." +)] + //! Integration test: WebSocket upgrade through the L7 relay. //! //! Spins up a dummy WebSocket echo server, connects a client through the diff --git a/crates/openshell-server/src/auth.rs b/crates/openshell-server/src/auth.rs index b896d062c..e6ff2f0d0 100644 --- a/crates/openshell-server/src/auth.rs +++ b/crates/openshell-server/src/auth.rs @@ -96,32 +96,29 @@ async fn auth_connect( let safe_gateway = html_escape(&gateway_display); - match cf_token { - Some(token) => { - let nonce = uuid::Uuid::new_v4().to_string(); - let csp = format!( - "default-src 'none'; script-src 'nonce-{nonce}'; style-src 'unsafe-inline'; connect-src http://127.0.0.1:*" - ); - ( - [(header::CONTENT_SECURITY_POLICY, csp)], - Html(render_connect_page( - &safe_gateway, - params.callback_port, - &token, - ¶ms.code, - &nonce, - )), - ) - .into_response() - } - None => { - let csp = "default-src 'none'; style-src 'unsafe-inline'".to_string(); - ( - [(header::CONTENT_SECURITY_POLICY, csp)], - Html(render_waiting_page(params.callback_port, ¶ms.code)), - ) - .into_response() - } + if let Some(token) = cf_token { + let nonce = uuid::Uuid::new_v4().to_string(); + let csp = format!( + "default-src 'none'; script-src 'nonce-{nonce}'; style-src 'unsafe-inline'; connect-src http://127.0.0.1:*" + ); + ( + [(header::CONTENT_SECURITY_POLICY, csp)], + Html(render_connect_page( + &safe_gateway, + params.callback_port, + &token, + ¶ms.code, + &nonce, + )), + ) + .into_response() + } else { + let csp = "default-src 'none'; style-src 'unsafe-inline'".to_string(); + ( + [(header::CONTENT_SECURITY_POLICY, csp)], + Html(render_waiting_page(params.callback_port, ¶ms.code)), + ) + .into_response() } } diff --git a/crates/openshell-server/src/cli.rs b/crates/openshell-server/src/cli.rs index 2df0b06f4..d9d81eed7 100644 --- a/crates/openshell-server/src/cli.rs +++ b/crates/openshell-server/src/cli.rs @@ -79,7 +79,7 @@ struct Args { #[arg(long, env = "OPENSHELL_SANDBOX_IMAGE")] sandbox_image: Option, - /// Kubernetes imagePullPolicy for sandbox pods (Always, IfNotPresent, Never). + /// Kubernetes `imagePullPolicy` for sandbox pods (Always, `IfNotPresent`, Never). #[arg(long, env = "OPENSHELL_SANDBOX_IMAGE_PULL_POLICY")] sandbox_image_pull_policy: Option, diff --git a/crates/openshell-server/src/compute/mod.rs b/crates/openshell-server/src/compute/mod.rs index a218d18a8..8b614efbb 100644 --- a/crates/openshell-server/src/compute/mod.rs +++ b/crates/openshell-server/src/compute/mod.rs @@ -81,7 +81,7 @@ trait StartupResume: Send + Sync { #[tonic::async_trait] impl StartupResume for DockerComputeDriver { async fn resume_sandbox(&self, sandbox_id: &str, sandbox_name: &str) -> Result { - DockerComputeDriver::resume_sandbox(self, sandbox_id, sandbox_name) + Self::resume_sandbox(self, sandbox_id, sandbox_name) .await .map_err(|err| err.to_string()) } @@ -97,7 +97,7 @@ const ORPHAN_GRACE_PERIOD: Duration = Duration::from_secs(300); pub use openshell_core::ComputeDriverError as ComputeError; #[derive(Debug)] -pub(crate) struct ManagedDriverProcess { +pub struct ManagedDriverProcess { child: std::sync::Mutex>, socket_path: std::path::PathBuf, } @@ -210,9 +210,7 @@ impl ComputeDriver for RemoteComputeDriver { ) -> Result, Status> { let mut client = self.client(); let response = client.watch_sandboxes(request).await?; - let stream = response - .into_inner() - .map(|item| item.map_err(|status| status)); + let stream = response.into_inner(); Ok(tonic::Response::new(Box::pin(stream))) } } @@ -1132,8 +1130,6 @@ fn driver_sandbox_template_from_public(template: &SandboxTemplate) -> DriverSand fn extract_typed_resources( resources: &Option, ) -> Option { - let s = resources.as_ref()?; - fn get_quantity(s: &prost_types::Struct, section: &str, key: &str) -> String { s.fields .get(section) @@ -1148,6 +1144,8 @@ fn extract_typed_resources( .unwrap_or_default() } + let s = resources.as_ref()?; + let req = DriverResourceRequirements { cpu_request: get_quantity(s, "requests", "cpu"), cpu_limit: get_quantity(s, "limits", "cpu"), @@ -1169,7 +1167,7 @@ fn extract_typed_resources( } /// Build the opaque `platform_config` Struct from platform-specific public -/// template fields (runtime_class_name, annotations, volume_claim_templates) +/// template fields (`runtime_class_name`, annotations, `volume_claim_templates`) /// plus any resource fields beyond CPU/memory. fn build_platform_config(template: &SandboxTemplate) -> Option { use prost_types::{Struct, Value, value::Kind}; @@ -1617,6 +1615,7 @@ mod tests { CreateSandboxResponse, DeleteSandboxResponse, GetCapabilitiesResponse, GetSandboxRequest, GetSandboxResponse, StopSandboxRequest, StopSandboxResponse, ValidateSandboxCreateResponse, }; + use std::collections::HashMap; use std::sync::Arc; use tokio::sync::{mpsc, oneshot}; @@ -1790,8 +1789,8 @@ mod tests { metadata: Some(openshell_core::proto::datamodel::v1::ObjectMeta { id: id.to_string(), name: name.to_string(), - created_at_ms: 1000000, - labels: std::collections::HashMap::new(), + created_at_ms: 1_000_000, + labels: HashMap::new(), }), phase: phase as i32, ..Default::default() @@ -2333,7 +2332,6 @@ mod tests { deleting: false, }), }], - ..Default::default() })) .await; @@ -2391,7 +2389,6 @@ mod tests { last_transition_time: String::new(), })), }], - ..Default::default() })) .await; @@ -2524,8 +2521,8 @@ mod tests { #[derive(Default)] struct RecordingResume { - calls: tokio::sync::Mutex>, - results: tokio::sync::Mutex>>, + calls: Mutex>, + results: Mutex>>, } impl RecordingResume { diff --git a/crates/openshell-server/src/compute/vm.rs b/crates/openshell-server/src/compute/vm.rs index 622bc393e..844b14240 100644 --- a/crates/openshell-server/src/compute/vm.rs +++ b/crates/openshell-server/src/compute/vm.rs @@ -136,10 +136,10 @@ impl Default for VmComputeConfig { #[cfg(unix)] #[derive(Debug, Clone, PartialEq, Eq)] -pub(crate) struct VmGuestTlsPaths { - pub(crate) ca: PathBuf, - pub(crate) cert: PathBuf, - pub(crate) key: PathBuf, +pub struct VmGuestTlsPaths { + pub ca: PathBuf, + pub cert: PathBuf, + pub key: PathBuf, } /// Resolve the `openshell-driver-vm` binary path. @@ -152,7 +152,7 @@ pub(crate) struct VmGuestTlsPaths { /// `/usr/local/libexec`. /// 3. Sibling of the gateway's own executable (last-resort fallback so /// local development builds still work out of the box). -pub(crate) fn resolve_compute_driver_bin(vm_config: &VmComputeConfig) -> Result { +pub fn resolve_compute_driver_bin(vm_config: &VmComputeConfig) -> Result { let mut searched: Vec = Vec::new(); // 1. Configured driver directory, or the conventional install locations @@ -191,11 +191,10 @@ pub(crate) fn resolve_compute_driver_bin(vm_config: &VmComputeConfig) -> Result< } fn resolve_driver_search_dirs(vm_config: &VmComputeConfig) -> Vec { - if let Some(dir) = vm_config.driver_dir.clone() { - vec![dir] - } else { - VmComputeConfig::default_driver_search_dirs(std::env::var_os("HOME").map(PathBuf::from)) - } + vm_config.driver_dir.clone().map_or_else( + || VmComputeConfig::default_driver_search_dirs(std::env::var_os("HOME").map(PathBuf::from)), + |dir| vec![dir], + ) } fn push_unique_path(paths: &mut Vec, path: PathBuf) { @@ -205,12 +204,12 @@ fn push_unique_path(paths: &mut Vec, path: PathBuf) { } /// Path of the Unix domain socket the driver will listen on. -pub(crate) fn compute_driver_socket_path(vm_config: &VmComputeConfig) -> PathBuf { +pub fn compute_driver_socket_path(vm_config: &VmComputeConfig) -> PathBuf { vm_config.state_dir.join("compute-driver.sock") } #[cfg(unix)] -pub(crate) fn compute_driver_guest_tls_paths( +pub fn compute_driver_guest_tls_paths( config: &Config, vm_config: &VmComputeConfig, ) -> Result> { @@ -261,7 +260,7 @@ pub(crate) fn compute_driver_guest_tls_paths( /// and return a gRPC `Channel` connected to it plus a process handle that /// kills the subprocess and removes the socket on drop. #[cfg(unix)] -pub(crate) async fn spawn( +pub async fn spawn( config: &Config, vm_config: &VmComputeConfig, ) -> Result<(Channel, Arc)> { @@ -333,7 +332,7 @@ pub(crate) async fn spawn( } #[cfg(not(unix))] -pub(crate) async fn spawn( +pub async fn spawn( _config: &Config, _vm_config: &VmComputeConfig, ) -> Result<(Channel, std::sync::Arc)> { @@ -349,9 +348,10 @@ async fn wait_for_compute_driver( ) -> Result { let mut last_error: Option = None; for _ in 0..100 { - if let Some(status) = child.try_wait().map_err(|e| { + let try_wait_result = child.try_wait().map_err(|e| { Error::execution(format!("failed to poll vm compute driver process: {e}")) - })? { + })?; + if let Some(status) = try_wait_result { return Err(Error::execution(format!( "vm compute driver exited before becoming ready with status {status}" ))); diff --git a/crates/openshell-server/src/grpc/mod.rs b/crates/openshell-server/src/grpc/mod.rs index 969204dad..89e639ac9 100644 --- a/crates/openshell-server/src/grpc/mod.rs +++ b/crates/openshell-server/src/grpc/mod.rs @@ -3,7 +3,7 @@ //! gRPC service implementation. -pub(crate) mod policy; +pub mod policy; mod provider; mod sandbox; mod validation; @@ -119,7 +119,9 @@ fn current_time_ms() -> Result { /// /// This is a crate-level helper that wraps the validation module's implementation. /// Use this from modules outside of `grpc` that need to validate metadata. -pub(crate) fn validate_object_metadata( +// `tonic::Status` is large but is the API surface of gRPC handlers. +#[allow(clippy::result_large_err)] +pub fn validate_object_metadata( metadata: Option<&openshell_core::proto::datamodel::v1::ObjectMeta>, resource_type: &str, ) -> Result<(), Status> { diff --git a/crates/openshell-server/src/grpc/policy.rs b/crates/openshell-server/src/grpc/policy.rs index 8a867d2da..25a4bd17c 100644 --- a/crates/openshell-server/src/grpc/policy.rs +++ b/crates/openshell-server/src/grpc/policy.rs @@ -65,7 +65,7 @@ use super::{MAX_PAGE_SIZE, StoredSettingValue, StoredSettings, clamp_limit, curr const GLOBAL_SETTINGS_OBJECT_TYPE: &str = "gateway_settings"; const GLOBAL_SETTINGS_NAME: &str = "global"; /// Internal object type for durable sandbox-scoped settings. -pub(crate) const SANDBOX_SETTINGS_OBJECT_TYPE: &str = "sandbox_settings"; +pub const SANDBOX_SETTINGS_OBJECT_TYPE: &str = "sandbox_settings"; /// Reserved settings key used to store global policy payload. const POLICY_SETTING_KEY: &str = "policy"; /// Sentinel `sandbox_id` used to store global policy revisions. @@ -135,10 +135,10 @@ fn summarize_cli_policy_merge_op(operation: &PolicyMergeOp) -> String { rule_name, host, port, - } => match rule_name { - Some(rule_name) => format!("remove-endpoint {host}:{port} from rule {rule_name}"), - None => format!("remove-endpoint {host}:{port}"), - }, + } => rule_name.as_ref().map_or_else( + || format!("remove-endpoint {host}:{port}"), + |rule_name| format!("remove-endpoint {host}:{port} from rule {rule_name}"), + ), PolicyMergeOp::RemoveRule { rule_name } => format!("remove-rule {rule_name}"), PolicyMergeOp::AddDenyRules { host, @@ -2001,15 +2001,15 @@ fn validate_rule_not_always_blocked(rule: &NetworkPolicyRule) -> Result<(), Stat for ep in &rule.endpoints { // Check if the endpoint host is a literal always-blocked IP. - if let Ok(ip) = ep.host.parse::() { - if is_always_blocked_ip(ip) { - return Err(Status::invalid_argument(format!( - "proposed rule endpoint host '{}' is an always-blocked address \ - (loopback/link-local/unspecified); the proxy will deny traffic \ - to this destination regardless of policy", - ep.host - ))); - } + if let Ok(ip) = ep.host.parse::() + && is_always_blocked_ip(ip) + { + return Err(Status::invalid_argument(format!( + "proposed rule endpoint host '{}' is an always-blocked address \ + (loopback/link-local/unspecified); the proxy will deny traffic \ + to this destination regardless of policy", + ep.host + ))); } let host_lc = ep.host.to_lowercase(); if host_lc == "localhost" || host_lc == "localhost." { @@ -2028,14 +2028,14 @@ fn validate_rule_not_always_blocked(rule: &NetworkPolicyRule) -> Result<(), Stat IpAddr::V6(v6) => ipnet::IpNet::V6(ipnet::Ipv6Net::from(v6)), }) }); - if let Ok(net) = parsed { - if is_always_blocked_net(net) { - return Err(Status::invalid_argument(format!( - "proposed rule contains always-blocked allowed_ips entry '{entry}'; \ - SSRF hardening prevents traffic to these destinations \ - regardless of policy" - ))); - } + if let Ok(net) = parsed + && is_always_blocked_net(net) + { + return Err(Status::invalid_argument(format!( + "proposed rule contains always-blocked `allowed_ips` entry '{entry}'; \ + SSRF hardening prevents traffic to these destinations \ + regardless of policy" + ))); } // Invalid entries are not our concern here — the sandbox's // parse_allowed_ips handles syntax validation. @@ -2603,7 +2603,7 @@ mod tests { metadata: Some(openshell_core::proto::datamodel::v1::ObjectMeta { id: "sb-no-policy".to_string(), name: "no-policy-sandbox".to_string(), - created_at_ms: 1000000, + created_at_ms: 1_000_000, labels: std::collections::HashMap::new(), }), spec: Some(SandboxSpec { @@ -2633,7 +2633,7 @@ mod tests { metadata: Some(openshell_core::proto::datamodel::v1::ObjectMeta { id: "sb-backfill".to_string(), name: "backfill-sandbox".to_string(), - created_at_ms: 1000000, + created_at_ms: 1_000_000, labels: std::collections::HashMap::new(), }), spec: Some(SandboxSpec { @@ -3184,7 +3184,7 @@ mod tests { let sandbox_id = "sb-merge"; let initial_policy = SandboxPolicy { - network_policies: [( + network_policies: std::iter::once(( "test_server".to_string(), NetworkPolicyRule { name: "test_server".to_string(), @@ -3198,8 +3198,7 @@ mod tests { ..Default::default() }], }, - )] - .into_iter() + )) .collect(), ..Default::default() }; @@ -3284,7 +3283,7 @@ mod tests { let sandbox_id = "sb-new"; let initial_policy = SandboxPolicy { - network_policies: [( + network_policies: std::iter::once(( "existing_rule".to_string(), NetworkPolicyRule { name: "existing_rule".to_string(), @@ -3298,8 +3297,7 @@ mod tests { ..Default::default() }], }, - )] - .into_iter() + )) .collect(), ..Default::default() }; @@ -3370,7 +3368,7 @@ mod tests { let sandbox_id = "sb-concurrent-merge"; let initial_policy = SandboxPolicy { - network_policies: [( + network_policies: std::iter::once(( "github".to_string(), NetworkPolicyRule { name: "github".to_string(), @@ -3384,8 +3382,7 @@ mod tests { }], ..Default::default() }, - )] - .into_iter() + )) .collect(), ..Default::default() }; @@ -3601,8 +3598,7 @@ mod tests { let encoded = hex::encode(policy.encode_to_vec()); let global = StoredSettings { revision: 1, - settings: [("policy".to_string(), StoredSettingValue::Bytes(encoded))] - .into_iter() + settings: std::iter::once(("policy".to_string(), StoredSettingValue::Bytes(encoded))) .collect(), }; @@ -3751,20 +3747,18 @@ mod tests { fn merge_effective_settings_policy_key_is_excluded() { let global = StoredSettings { revision: 1, - settings: [( + settings: std::iter::once(( "policy".to_string(), StoredSettingValue::Bytes("deadbeef".to_string()), - )] - .into_iter() + )) .collect(), }; let sandbox = StoredSettings { revision: 1, - settings: [( + settings: std::iter::once(( "policy".to_string(), StoredSettingValue::Bytes("cafebabe".to_string()), - )] - .into_iter() + )) .collect(), }; diff --git a/crates/openshell-server/src/grpc/provider.rs b/crates/openshell-server/src/grpc/provider.rs index 2e3876fb8..a9b18b7eb 100644 --- a/crates/openshell-server/src/grpc/provider.rs +++ b/crates/openshell-server/src/grpc/provider.rs @@ -426,8 +426,8 @@ mod tests { metadata: Some(openshell_core::proto::datamodel::v1::ObjectMeta { id: String::new(), name: "gitlab-local".to_string(), - created_at_ms: 1000000, - labels: std::collections::HashMap::new(), + created_at_ms: 1_000_000, + labels: HashMap::new(), }), r#type: "gitlab".to_string(), credentials: std::iter::once(( @@ -499,7 +499,7 @@ mod tests { metadata: Some(openshell_core::proto::datamodel::v1::ObjectMeta { id: String::new(), name: "bad-provider".to_string(), - created_at_ms: 1000000, + created_at_ms: 1_000_000, labels: HashMap::new(), }), r#type: String::new(), @@ -523,7 +523,7 @@ mod tests { metadata: Some(openshell_core::proto::datamodel::v1::ObjectMeta { id: String::new(), name: "missing".to_string(), - created_at_ms: 1000000, + created_at_ms: 1_000_000, labels: HashMap::new(), }), r#type: String::new(), @@ -551,7 +551,7 @@ mod tests { metadata: Some(openshell_core::proto::datamodel::v1::ObjectMeta { id: String::new(), name: "noop-test".to_string(), - created_at_ms: 1000000, + created_at_ms: 1_000_000, labels: HashMap::new(), }), r#type: String::new(), @@ -614,13 +614,13 @@ mod tests { updated.credentials.get("API_TOKEN"), Some(&"REDACTED".to_string()) ); - assert!(updated.credentials.get("SECONDARY").is_none()); + assert!(!updated.credentials.contains_key("SECONDARY")); assert_eq!(updated.config.len(), 1); assert_eq!( updated.config.get("endpoint"), Some(&"https://example.com".to_string()) ); - assert!(updated.config.get("region").is_none()); + assert!(!updated.config.contains_key("region")); let stored: Provider = store .get_message_by_name("delete-key-test") .await @@ -631,7 +631,7 @@ mod tests { stored.credentials.get("API_TOKEN"), Some(&"token-123".to_string()) ); - assert!(stored.credentials.get("SECONDARY").is_none()); + assert!(!stored.credentials.contains_key("SECONDARY")); } #[tokio::test] @@ -916,7 +916,7 @@ mod tests { metadata: Some(openshell_core::proto::datamodel::v1::ObjectMeta { id: String::new(), name: "my-claude".to_string(), - created_at_ms: 1000000, + created_at_ms: 1_000_000, labels: HashMap::new(), }), r#type: "claude".to_string(), @@ -935,8 +935,8 @@ mod tests { metadata: Some(openshell_core::proto::datamodel::v1::ObjectMeta { id: "sandbox-001".to_string(), name: "test-sandbox".to_string(), - created_at_ms: 1000000, - labels: std::collections::HashMap::new(), + created_at_ms: 1_000_000, + labels: HashMap::new(), }), spec: Some(SandboxSpec { providers: vec!["my-claude".to_string()], @@ -971,8 +971,8 @@ mod tests { metadata: Some(openshell_core::proto::datamodel::v1::ObjectMeta { id: "sandbox-002".to_string(), name: "empty-sandbox".to_string(), - created_at_ms: 1000000, - labels: std::collections::HashMap::new(), + created_at_ms: 1_000_000, + labels: HashMap::new(), }), spec: Some(SandboxSpec::default()), status: None, diff --git a/crates/openshell-server/src/grpc/sandbox.rs b/crates/openshell-server/src/grpc/sandbox.rs index 60bca2a65..2b6de3b59 100644 --- a/crates/openshell-server/src/grpc/sandbox.rs +++ b/crates/openshell-server/src/grpc/sandbox.rs @@ -46,6 +46,8 @@ pub(super) async fn handle_create_sandbox( state: &Arc, request: Request, ) -> Result, Status> { + use crate::persistence::current_time_ms; + let request = request.into_inner(); let spec = request .spec @@ -91,7 +93,6 @@ pub(super) async fn handle_create_sandbox( request.name.clone() }; - use crate::persistence::current_time_ms; let now_ms = current_time_ms() .map_err(|e| Status::internal(format!("failed to get current time: {e}")))?; @@ -160,8 +161,14 @@ pub(super) async fn handle_list_sandboxes( let request = request.into_inner(); let limit = clamp_limit(request.limit, 100, MAX_PAGE_SIZE); - // If label selector is provided, validate and use filtered list - let records = if !request.label_selector.is_empty() { + // If no label selector is provided, use the unfiltered list path + let records = if request.label_selector.is_empty() { + state + .store + .list(Sandbox::object_type(), limit, request.offset) + .await + .map_err(|e| Status::internal(format!("list sandboxes failed: {e}")))? + } else { crate::grpc::validation::validate_label_selector(&request.label_selector)?; state .store @@ -173,12 +180,6 @@ pub(super) async fn handle_list_sandboxes( ) .await .map_err(|e| Status::internal(format!("list sandboxes with selector failed: {e}")))? - } else { - state - .store - .list(Sandbox::object_type(), limit, request.offset) - .await - .map_err(|e| Status::internal(format!("list sandboxes failed: {e}")))? }; let mut sandboxes = Vec::with_capacity(records.len()); @@ -441,6 +442,8 @@ pub(super) async fn handle_exec_sandbox( state: &Arc, request: Request, ) -> Result>>, Status> { + use openshell_core::ObjectId; + let req = request.into_inner(); if req.sandbox_id.is_empty() { return Err(Status::invalid_argument("sandbox_id is required")); @@ -482,7 +485,6 @@ pub(super) async fn handle_exec_sandbox( let timeout_seconds = req.timeout_seconds; let request_tty = req.tty; - use openshell_core::ObjectId; let sandbox_id = sandbox.object_id().to_string(); let (tx, rx) = mpsc::channel::>(256); @@ -781,7 +783,7 @@ async fn stream_exec_over_relay( Ok(()) } -/// Create a localhost SSH proxy that bridges to a relay DuplexStream. +/// Create a localhost SSH proxy that bridges to a relay `DuplexStream`. /// /// The proxy forwards raw SSH bytes between the `russh` client and the relay. /// The supervisor bridges the relay to its Unix-socket SSH daemon; filesystem @@ -999,9 +1001,7 @@ mod tests { "-c".to_string(), "print('ok')".to_string(), ], - environment: [("HOME".to_string(), "/home/user".to_string())] - .into_iter() - .collect(), + environment: std::iter::once(("HOME".to_string(), "/home/user".to_string())).collect(), workdir: "/workspace".to_string(), ..Default::default() }; diff --git a/crates/openshell-server/src/grpc/validation.rs b/crates/openshell-server/src/grpc/validation.rs index 5e4a48cb5..160b7e031 100644 --- a/crates/openshell-server/src/grpc/validation.rs +++ b/crates/openshell-server/src/grpc/validation.rs @@ -241,11 +241,7 @@ pub(super) fn validate_string_map( /// Validate field sizes on a `Provider` before persisting. pub(super) fn validate_provider_fields(provider: &Provider) -> Result<(), Status> { - let name_len = provider - .metadata - .as_ref() - .map(|m| m.name.len()) - .unwrap_or(0); + let name_len = provider.metadata.as_ref().map_or(0, |m| m.name.len()); if name_len > MAX_NAME_LEN { return Err(Status::invalid_argument(format!( "provider.name exceeds maximum length ({name_len} > {MAX_NAME_LEN})" @@ -291,7 +287,7 @@ pub(super) fn validate_provider_fields(provider: &Provider) -> Result<(), Status /// /// Examples: `app`, `kubernetes.io/app`, `example.com/my-label` /// -/// See: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ +/// See: pub(super) fn validate_label_key(key: &str) -> Result<(), Status> { if key.is_empty() { return Err(Status::invalid_argument("label key cannot be empty")); @@ -401,7 +397,7 @@ pub(super) fn validate_label_key(key: &str) -> Result<(), Status> { /// - If non-empty, must contain only alphanumeric, hyphens, underscores, and dots /// - If non-empty, must start and end with alphanumeric character /// -/// See: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ +/// See: pub(super) fn validate_label_value(value: &str) -> Result<(), Status> { // Empty values are allowed in Kubernetes if value.is_empty() { @@ -631,8 +627,7 @@ pub(super) fn level_matches(log_level: &str, min_level: &str) -> bool { let to_num = |s: &str| match s.to_uppercase().as_str() { "ERROR" => 0, "WARN" => 1, - "INFO" => 2, - "OCSF" => 2, + "INFO" | "OCSF" => 2, "DEBUG" => 3, "TRACE" => 4, _ => 5, // unknown levels always pass @@ -877,7 +872,7 @@ mod tests { metadata: Some(openshell_core::proto::datamodel::v1::ObjectMeta { id: String::new(), name: name.to_string(), - created_at_ms: 1000000, + created_at_ms: 1_000_000, labels: HashMap::new(), }), r#type: provider_type.to_string(), diff --git a/crates/openshell-server/src/inference.rs b/crates/openshell-server/src/inference.rs index be983667b..b52700f0d 100644 --- a/crates/openshell-server/src/inference.rs +++ b/crates/openshell-server/src/inference.rs @@ -1,6 +1,8 @@ // SPDX-FileCopyrightText: Copyright (c) 2025-2026 NVIDIA CORPORATION & AFFILIATES. All rights reserved. // SPDX-License-Identifier: Apache-2.0 +#![allow(clippy::result_large_err)] // gRPC handlers return Result, Status> + use openshell_core::proto::{ ClusterInferenceConfig, GetClusterInferenceRequest, GetClusterInferenceResponse, GetInferenceBundleRequest, GetInferenceBundleResponse, InferenceRoute, Provider, ResolvedRoute, @@ -390,10 +392,13 @@ async fn resolve_inference_bundle(store: &Store) -> Result { + () = &mut shutdown => { info!("Shutdown signal received; stopping gateway"); break; } @@ -323,8 +322,8 @@ async fn shutdown_signal() { #[cfg(unix)] { tokio::select! { - _ = ctrl_c_signal() => {} - _ = terminate_signal() => {} + () = ctrl_c_signal() => {} + () = terminate_signal() => {} } } @@ -352,6 +351,9 @@ async fn terminate_signal() { let _ = signal.recv().await; } +// Internal wiring helper: each argument is a distinct piece of runtime state +// that must be passed through, so the count is justified. +#[allow(clippy::too_many_arguments)] async fn build_compute_runtime( config: &Config, vm_config: &VmComputeConfig, @@ -421,8 +423,10 @@ async fn build_compute_runtime( let socket_path = std::env::var("OPENSHELL_PODMAN_SOCKET") .ok() .filter(|s| !s.is_empty()) - .map(std::path::PathBuf::from) - .unwrap_or_else(openshell_driver_podman::PodmanComputeConfig::default_socket_path); + .map_or_else( + openshell_driver_podman::PodmanComputeConfig::default_socket_path, + std::path::PathBuf::from, + ); let network_name = std::env::var("OPENSHELL_NETWORK_NAME") .ok() @@ -472,10 +476,12 @@ fn configured_compute_driver(config: &Config) -> Result { [] => Err(Error::config( "at least one compute driver must be configured", )), - [driver @ ComputeDriverKind::Kubernetes] - | [driver @ ComputeDriverKind::Vm] - | [driver @ ComputeDriverKind::Docker] - | [driver @ ComputeDriverKind::Podman] => Ok(*driver), + [ + driver @ (ComputeDriverKind::Kubernetes + | ComputeDriverKind::Vm + | ComputeDriverKind::Docker + | ComputeDriverKind::Podman), + ] => Ok(*driver), drivers => Err(Error::config(format!( "multiple compute drivers are not supported yet; configured drivers: {}", drivers diff --git a/crates/openshell-server/src/persistence/mod.rs b/crates/openshell-server/src/persistence/mod.rs index 80810c173..a62cf58f7 100644 --- a/crates/openshell-server/src/persistence/mod.rs +++ b/crates/openshell-server/src/persistence/mod.rs @@ -13,6 +13,7 @@ pub use openshell_core::proto::{ use openshell_core::{Error as CoreError, Result as CoreResult}; use prost::Message; use rand::Rng; +use std::collections::HashMap; use std::time::{SystemTime, UNIX_EPOCH}; use thiserror::Error; @@ -233,7 +234,7 @@ impl Store { ) -> PersistenceResult<()> { // Serialize labels to JSON let labels_map = message.object_labels(); - let labels_json = if labels_map.as_ref().map_or(true, |m| m.is_empty()) { + let labels_json = if labels_map.as_ref().is_none_or(HashMap::is_empty) { None } else { Some(serde_json::to_string(&labels_map).map_err(|e| { @@ -323,19 +324,17 @@ fn map_migrate_error(error: &sqlx::migrate::MigrateError) -> PersistenceError { /// Parse a simple label selector string into key-value pairs. /// Format: "key1=value1,key2=value2" -/// Returns a HashMap of label requirements. +/// Returns a `HashMap` of label requirements. /// /// Note: Input validation should be performed at the gRPC layer using /// `grpc::validation::validate_label_selector()` before calling this function. /// Errors returned here indicate unexpected internal errors, not user input errors. -pub fn parse_label_selector( - selector: &str, -) -> PersistenceResult> { +pub fn parse_label_selector(selector: &str) -> PersistenceResult> { if selector.is_empty() { - return Ok(std::collections::HashMap::new()); + return Ok(HashMap::new()); } - let mut labels = std::collections::HashMap::new(); + let mut labels = HashMap::new(); for pair in selector.split(',') { let pair = pair.trim(); if pair.is_empty() { diff --git a/crates/openshell-server/src/persistence/postgres.rs b/crates/openshell-server/src/persistence/postgres.rs index 770f3891a..2cd6a046f 100644 --- a/crates/openshell-server/src/persistence/postgres.rs +++ b/crates/openshell-server/src/persistence/postgres.rs @@ -50,7 +50,7 @@ impl PostgresStore { ) -> PersistenceResult<()> { let now_ms = current_time_ms()?; let labels_jsonb: Option = labels - .map(|s| serde_json::from_str(s)) + .map(serde_json::from_str) .transpose() .map_err(|e| super::PersistenceError::Encode(format!("invalid labels JSON: {e}")))?; diff --git a/crates/openshell-server/src/persistence/sqlite.rs b/crates/openshell-server/src/persistence/sqlite.rs index 8fad865bb..fafb07597 100644 --- a/crates/openshell-server/src/persistence/sqlite.rs +++ b/crates/openshell-server/src/persistence/sqlite.rs @@ -204,7 +204,7 @@ LIMIT ?2 OFFSET ?3 required_labels .iter() - .all(|(key, value)| labels.get(key).map(|v| v == value).unwrap_or(false)) + .all(|(key, value)| labels.get(key).is_some_and(|v| v == value)) }) .skip(offset as usize) .take(limit as usize) diff --git a/crates/openshell-server/src/persistence/tests.rs b/crates/openshell-server/src/persistence/tests.rs index 6ae002e27..bef95d4b6 100644 --- a/crates/openshell-server/src/persistence/tests.rs +++ b/crates/openshell-server/src/persistence/tests.rs @@ -743,14 +743,14 @@ fn parse_label_selector_accepts_empty_value() { // Kubernetes allows empty label values, so selectors should accept "key=" format let result = super::parse_label_selector("env=").unwrap(); assert_eq!(result.len(), 1); - assert_eq!(result.get("env"), Some(&"".to_string())); + assert_eq!(result.get("env"), Some(&String::new())); } #[test] fn parse_label_selector_multiple_with_empty_value() { let result = super::parse_label_selector("env=,tier=frontend").unwrap(); assert_eq!(result.len(), 2); - assert_eq!(result.get("env"), Some(&"".to_string())); + assert_eq!(result.get("env"), Some(&String::new())); assert_eq!(result.get("tier"), Some(&"frontend".to_string())); } diff --git a/crates/openshell-server/src/sandbox_watch.rs b/crates/openshell-server/src/sandbox_watch.rs index 73cc4bf26..ac38eba8d 100644 --- a/crates/openshell-server/src/sandbox_watch.rs +++ b/crates/openshell-server/src/sandbox_watch.rs @@ -59,6 +59,16 @@ impl SandboxWatchBus { } } +/// Helper to translate broadcast lag into a gRPC status. +pub fn broadcast_to_status(err: broadcast::error::RecvError) -> Status { + match err { + broadcast::error::RecvError::Closed => Status::cancelled("stream closed"), + broadcast::error::RecvError::Lagged(n) => { + Status::resource_exhausted(format!("watch stream lagged; dropped {n} messages")) + } + } +} + #[cfg(test)] mod tests { use super::*; @@ -105,13 +115,3 @@ mod tests { bus.remove("nonexistent"); } } - -/// Helper to translate broadcast lag into a gRPC status. -pub fn broadcast_to_status(err: broadcast::error::RecvError) -> Status { - match err { - broadcast::error::RecvError::Closed => Status::cancelled("stream closed"), - broadcast::error::RecvError::Lagged(n) => { - Status::resource_exhausted(format!("watch stream lagged; dropped {n} messages")) - } - } -} diff --git a/crates/openshell-server/src/ssh_tunnel.rs b/crates/openshell-server/src/ssh_tunnel.rs index 6b0232fa0..bd317d53f 100644 --- a/crates/openshell-server/src/ssh_tunnel.rs +++ b/crates/openshell-server/src/ssh_tunnel.rs @@ -106,10 +106,13 @@ async fn ssh_connect( // Check token expiry (0 means no expiry for backward compatibility). if session.expires_at_ms > 0 { - let now_ms = std::time::SystemTime::now() - .duration_since(std::time::UNIX_EPOCH) - .unwrap_or_default() - .as_millis() as i64; + let now_ms = i64::try_from( + std::time::SystemTime::now() + .duration_since(std::time::UNIX_EPOCH) + .unwrap_or_default() + .as_millis(), + ) + .unwrap_or(i64::MAX); if now_ms > session.expires_at_ms { return StatusCode::UNAUTHORIZED.into_response(); } @@ -268,10 +271,13 @@ pub fn spawn_session_reaper(store: Arc, interval: Duration) { } async fn reap_expired_sessions(store: &Store) -> Result<(), String> { - let now_ms = std::time::SystemTime::now() - .duration_since(std::time::UNIX_EPOCH) - .unwrap_or_default() - .as_millis() as i64; + let now_ms = i64::try_from( + std::time::SystemTime::now() + .duration_since(std::time::UNIX_EPOCH) + .unwrap_or_default() + .as_millis(), + ) + .unwrap_or(i64::MAX); let records = store .list(SshSession::object_type(), 1000, 0) @@ -321,7 +327,7 @@ mod tests { SshSession { metadata: Some(openshell_core::proto::datamodel::v1::ObjectMeta { id: id.to_string(), - name: format!("session-{}", id), + name: format!("session-{id}"), created_at_ms: 1000, labels: HashMap::new(), }), @@ -333,10 +339,13 @@ mod tests { } fn now_ms() -> i64 { - std::time::SystemTime::now() - .duration_since(std::time::UNIX_EPOCH) - .unwrap() - .as_millis() as i64 + i64::try_from( + std::time::SystemTime::now() + .duration_since(std::time::UNIX_EPOCH) + .unwrap() + .as_millis(), + ) + .unwrap_or(i64::MAX) } // ---- Connection limit tests ---- diff --git a/crates/openshell-server/src/supervisor_session.rs b/crates/openshell-server/src/supervisor_session.rs index cd250459a..94c352ba5 100644 --- a/crates/openshell-server/src/supervisor_session.rs +++ b/crates/openshell-server/src/supervisor_session.rs @@ -70,9 +70,9 @@ impl openshell_driver_docker::SupervisorReadiness for SupervisorSessionRegistry /// Registry of active supervisor sessions and pending relay channels. #[derive(Default)] pub struct SupervisorSessionRegistry { - /// sandbox_id -> live session handle. + /// `sandbox_id` -> live session handle. sessions: Mutex>, - /// channel_id -> oneshot sender for the reverse CONNECT stream. + /// `channel_id` -> oneshot sender for the reverse CONNECT stream. pending_relays: Mutex>, } @@ -287,10 +287,12 @@ impl SupervisorSessionRegistry { Ok((channel_id, relay_rx)) } - /// Claim a pending relay channel. Called by the /relay/{channel_id} HTTP handler + /// Claim a pending relay channel. Called by the `/relay/{channel_id}` HTTP handler /// when the supervisor's reverse CONNECT arrives. /// - /// Returns the DuplexStream half that the supervisor side should read/write. + /// Returns the `DuplexStream` half that the supervisor side should read/write. + // `tonic::Status` is large but is the API surface of gRPC handlers. + #[allow(clippy::result_large_err)] pub fn claim_relay(&self, channel_id: &str) -> Result { let pending = { let mut map = self.pending_relays.lock().unwrap(); @@ -377,15 +379,16 @@ async fn require_persisted_sandbox( // RelayStream gRPC handler // --------------------------------------------------------------------------- -/// Size of chunks read from the gateway-side DuplexStream when forwarding +/// Size of chunks read from the gateway-side `DuplexStream` when forwarding /// bytes back to the supervisor over the gRPC response stream. const RELAY_STREAM_CHUNK_SIZE: usize = 16 * 1024; -/// Handle a RelayStream RPC from a supervisor. The first inbound `RelayFrame` -/// must carry a `RelayInit` identifying the pending relay; subsequent frames -/// carry raw bytes forward to the gateway-side waiter. Bytes flowing the other -/// way are chunked and sent as `RelayFrame::data` messages back over the -/// response stream. +/// Handle a `RelayStream` RPC from a supervisor. +/// +/// The first inbound `RelayFrame` must carry a `RelayInit` identifying the +/// pending relay; subsequent frames carry raw bytes forward to the +/// gateway-side waiter. Bytes flowing the other way are chunked and sent as +/// `RelayFrame::data` messages back over the response stream. pub async fn handle_relay_stream( registry: &SupervisorSessionRegistry, request: Request>, @@ -456,7 +459,7 @@ pub async fn handle_relay_stream( // Gateway → supervisor: read the DuplexStream and emit RelayFrame::data messages. let (out_tx, out_rx) = mpsc::channel::>(16); - let channel_id_out = channel_id.clone(); + let channel_id_out = channel_id; tokio::spawn(async move { let mut buf = vec![0u8; RELAY_STREAM_CHUNK_SIZE]; loop { @@ -735,8 +738,8 @@ mod tests { metadata: Some(openshell_core::proto::datamodel::v1::ObjectMeta { id: id.to_string(), name: name.to_string(), - created_at_ms: 1000000, - labels: std::collections::HashMap::new(), + created_at_ms: 1_000_000, + labels: HashMap::new(), }), ..Default::default() } @@ -1002,6 +1005,8 @@ mod tests { #[tokio::test] async fn open_relay_uses_newest_session_after_supersede() { + use tokio::sync::mpsc::error::TryRecvError; + let registry = SupervisorSessionRegistry::new(); let (tx_old, mut rx_old) = mpsc::channel::(4); let (tx_new, mut rx_new) = mpsc::channel(4); @@ -1040,7 +1045,6 @@ mod tests { // The old session must have received no messages — the channel is // still open but empty. - use tokio::sync::mpsc::error::TryRecvError; match rx_old.try_recv() { Err(TryRecvError::Empty) => {} other => panic!("expected Empty on superseded session, got {other:?}"), @@ -1191,7 +1195,9 @@ mod tests { PendingRelay { sender: relay_tx, sandbox_id: "sbx-test".to_string(), - created_at: Instant::now() - Duration::from_secs(60), + created_at: Instant::now() + .checked_sub(Duration::from_secs(60)) + .expect("test instant subtraction underflow"), }, ); @@ -1269,7 +1275,9 @@ mod tests { PendingRelay { sender: relay_tx, sandbox_id: "sbx-test".to_string(), - created_at: Instant::now() - Duration::from_secs(60), + created_at: Instant::now() + .checked_sub(Duration::from_secs(60)) + .expect("test instant subtraction underflow"), }, ); diff --git a/crates/openshell-server/tests/edge_tunnel_auth.rs b/crates/openshell-server/tests/edge_tunnel_auth.rs index e8c7e0038..15df2f9d8 100644 --- a/crates/openshell-server/tests/edge_tunnel_auth.rs +++ b/crates/openshell-server/tests/edge_tunnel_auth.rs @@ -12,7 +12,7 @@ //! //! Test matrix: //! -//! | allow_unauthenticated | client cert | bearer auth header | expected | +//! | `allow_unauthenticated` | client cert | bearer auth header | expected | //! |-----------------------|-------------|--------------------|----------| //! | false | valid | — | OK | //! | false | none | — | rejected | @@ -69,7 +69,7 @@ fn install_rustls_provider() { let _ = rustls::crypto::ring::default_provider().install_default(); } -/// Minimal OpenShell implementation for testing. +/// Minimal `OpenShell` implementation for testing. #[derive(Clone, Default)] struct TestOpenShell; @@ -317,15 +317,13 @@ impl OpenShell for TestOpenShell { Err(Status::unimplemented("not implemented in test")) } - type RelayStreamStream = tokio_stream::wrappers::ReceiverStream< - Result, - >; + type RelayStreamStream = ReceiverStream>; async fn relay_stream( &self, _request: tonic::Request>, - ) -> Result, tonic::Status> { - Err(tonic::Status::unimplemented("not implemented in test")) + ) -> Result, Status> { + Err(Status::unimplemented("not implemented in test")) } } @@ -575,7 +573,7 @@ fn https_client_no_cert( // Tests // =========================================================================== -/// Baseline: with allow_unauthenticated=false (default), mTLS connections work. +/// Baseline: with `allow_unauthenticated=false` (default), mTLS connections work. #[tokio::test] async fn baseline_mtls_works_with_mandatory_client_certs() { install_rustls_provider(); @@ -615,7 +613,7 @@ async fn baseline_mtls_works_with_mandatory_client_certs() { server.abort(); } -/// Baseline: with allow_unauthenticated=false, no-client-cert connections are +/// Baseline: with `allow_unauthenticated=false`, no-client-cert connections are /// rejected at the TLS layer. #[tokio::test] async fn baseline_no_cert_rejected_with_mandatory_mtls() { @@ -655,7 +653,7 @@ async fn baseline_no_cert_rejected_with_mandatory_mtls() { server.abort(); } -/// With allow_unauthenticated=true, mTLS connections still work (dual-auth). +/// With `allow_unauthenticated=true`, mTLS connections still work (dual-auth). #[tokio::test] async fn dual_auth_mtls_still_accepted() { install_rustls_provider(); @@ -695,7 +693,7 @@ async fn dual_auth_mtls_still_accepted() { server.abort(); } -/// With allow_unauthenticated=true, no-client-cert connections pass the TLS +/// With `allow_unauthenticated=true`, no-client-cert connections pass the TLS /// handshake. This simulates Cloudflare Tunnel re-originating a connection. /// /// The gRPC health check succeeds because there is no auth middleware yet — @@ -777,7 +775,7 @@ async fn tunnel_mode_cf_authorization_header_reaches_server() { server.abort(); } -/// With allow_unauthenticated=true, a client cert from a rogue CA is still +/// With `allow_unauthenticated=true`, a client cert from a rogue CA is still /// rejected by the TLS layer — the verifier still validates presented certs. #[tokio::test] async fn tunnel_mode_rogue_cert_still_rejected() { diff --git a/crates/openshell-server/tests/multiplex_integration.rs b/crates/openshell-server/tests/multiplex_integration.rs index 561ea2ba7..dd14c63ec 100644 --- a/crates/openshell-server/tests/multiplex_integration.rs +++ b/crates/openshell-server/tests/multiplex_integration.rs @@ -285,15 +285,13 @@ impl OpenShell for TestOpenShell { Err(Status::unimplemented("not implemented in test")) } - type RelayStreamStream = tokio_stream::wrappers::ReceiverStream< - Result, - >; + type RelayStreamStream = ReceiverStream>; async fn relay_stream( &self, _request: tonic::Request>, - ) -> Result, tonic::Status> { - Err(tonic::Status::unimplemented("not implemented in test")) + ) -> Result, Status> { + Err(Status::unimplemented("not implemented in test")) } } diff --git a/crates/openshell-server/tests/multiplex_tls_integration.rs b/crates/openshell-server/tests/multiplex_tls_integration.rs index dc51e6118..83ba76988 100644 --- a/crates/openshell-server/tests/multiplex_tls_integration.rs +++ b/crates/openshell-server/tests/multiplex_tls_integration.rs @@ -298,15 +298,13 @@ impl OpenShell for TestOpenShell { Err(Status::unimplemented("not implemented in test")) } - type RelayStreamStream = tokio_stream::wrappers::ReceiverStream< - Result, - >; + type RelayStreamStream = ReceiverStream>; async fn relay_stream( &self, _request: tonic::Request>, - ) -> Result, tonic::Status> { - Err(tonic::Status::unimplemented("not implemented in test")) + ) -> Result, Status> { + Err(Status::unimplemented("not implemented in test")) } } diff --git a/crates/openshell-server/tests/ws_tunnel_integration.rs b/crates/openshell-server/tests/ws_tunnel_integration.rs index 584f09281..949c0200a 100644 --- a/crates/openshell-server/tests/ws_tunnel_integration.rs +++ b/crates/openshell-server/tests/ws_tunnel_integration.rs @@ -311,15 +311,13 @@ impl OpenShell for TestOpenShell { Err(Status::unimplemented("not implemented in test")) } - type RelayStreamStream = tokio_stream::wrappers::ReceiverStream< - Result, - >; + type RelayStreamStream = ReceiverStream>; async fn relay_stream( &self, _request: tonic::Request>, - ) -> Result, tonic::Status> { - Err(tonic::Status::unimplemented("not implemented in test")) + ) -> Result, Status> { + Err(Status::unimplemented("not implemented in test")) } } @@ -440,9 +438,8 @@ where break; } } - Ok(Message::Close(_)) => break, + Ok(Message::Close(_)) | Err(_) => break, Ok(Message::Ping(_) | Message::Pong(_)) => {} - Err(_) => break, } } let _ = writer.shutdown().await; diff --git a/crates/openshell-tui/src/app.rs b/crates/openshell-tui/src/app.rs index f397b1809..2247581b3 100644 --- a/crates/openshell-tui/src/app.rs +++ b/crates/openshell-tui/src/app.rs @@ -359,7 +359,7 @@ pub struct CreateProviderForm { pub is_generic: bool, /// Status message (errors, validation). pub status: Option, - /// Warning shown at top of EnterKey modal (e.g. autodetect failure). + /// Warning shown at top of `EnterKey` modal (e.g. autodetect failure). pub warning: Option, /// Animation start time. pub anim_start: Option, @@ -578,6 +578,7 @@ pub fn format_labels(labels: &HashMap) -> String { } impl App { + #[allow(clippy::large_types_passed_by_value)] // Theme is Copy; one-shot ctor pub fn new( client: OpenShellClient, gateway_name: String, @@ -719,18 +720,18 @@ impl App { self.sandbox_settings = settings::REGISTERED_SETTINGS .iter() .map(|reg| { - let (value, scope) = settings - .get(reg.key) - .map(|es| { - let v = es.value.as_ref().and_then(|sv| sv.value.clone()); - let s = match es.scope { - 1 => SettingScope::Sandbox, - 2 => SettingScope::Global, - _ => SettingScope::Unset, - }; - (v, s) - }) - .unwrap_or((None, SettingScope::Unset)); + let (value, scope) = + settings + .get(reg.key) + .map_or((None, SettingScope::Unset), |es| { + let v = es.value.as_ref().and_then(|sv| sv.value.clone()); + let s = match es.scope { + 1 => SettingScope::Sandbox, + 2 => SettingScope::Global, + _ => SettingScope::Unset, + }; + (v, s) + }); SandboxSettingEntry { key: reg.key.to_string(), kind: reg.kind, @@ -1655,7 +1656,8 @@ impl App { self.sandbox_log_scroll = self.sandbox_log_scroll.saturating_sub(delta.unsigned_abs()); self.log_autoscroll = false; } else { - self.sandbox_log_scroll = (self.sandbox_log_scroll + delta as usize).min(max_scroll); + self.sandbox_log_scroll = + (self.sandbox_log_scroll + delta.cast_unsigned()).min(max_scroll); } let visible = filtered_len .saturating_sub(self.sandbox_log_scroll) @@ -1870,7 +1872,7 @@ impl App { form.status = None; form.warning = None; } - KeyCode::Char('j') | KeyCode::Down | KeyCode::Char('k') | KeyCode::Up => { + KeyCode::Char('j' | 'k') | KeyCode::Down | KeyCode::Up => { form.method_cursor = 1 - form.method_cursor; } KeyCode::Enter => { @@ -2076,7 +2078,7 @@ impl App { registry .credential_env_vars(&ptype) .first() - .map_or(String::new(), |s| s.to_string()) + .map_or(String::new(), ToString::to_string) } else { cred_key }; diff --git a/crates/openshell-tui/src/event.rs b/crates/openshell-tui/src/event.rs index e73862eb8..66f9dd962 100644 --- a/crates/openshell-tui/src/event.rs +++ b/crates/openshell-tui/src/event.rs @@ -56,7 +56,7 @@ pub enum Event { pub struct EventHandler { rx: mpsc::UnboundedReceiver, // Kept alive so the spawned task's `tx` doesn't see a closed channel. - _keepalive: mpsc::UnboundedSender, + keepalive: mpsc::UnboundedSender, /// When true, the background reader stops polling stdin. paused: Arc, } @@ -113,7 +113,7 @@ impl EventHandler { Self { rx, - _keepalive: keepalive, + keepalive, paused, } } @@ -124,7 +124,7 @@ impl EventHandler { /// Get a sender handle for dispatching events from background tasks. pub fn sender(&self) -> mpsc::UnboundedSender { - self._keepalive.clone() + self.keepalive.clone() } /// Pause stdin polling (call before suspending TUI for a child process). diff --git a/crates/openshell-tui/src/lib.rs b/crates/openshell-tui/src/lib.rs index 0a8caf675..8571ebbe1 100644 --- a/crates/openshell-tui/src/lib.rs +++ b/crates/openshell-tui/src/lib.rs @@ -7,6 +7,7 @@ mod event; pub mod theme; mod ui; +use std::collections::HashMap; use std::io; use std::path::PathBuf; use std::time::Duration; @@ -33,9 +34,9 @@ const SPLASH_DURATION: Duration = Duration::from_secs(3); // Re-export for use by the CLI crate. pub use theme::ThemeMode; -/// Launch the OpenShell TUI. +/// Launch the `OpenShell` TUI. /// -/// `channel` must be a connected gRPC channel to the OpenShell gateway. +/// `channel` must be a connected gRPC channel to the `OpenShell` gateway. /// `theme_mode` selects the color theme: `Auto` detects the terminal /// background, `Dark`/`Light` forces a specific palette. pub async fn run( @@ -188,11 +189,11 @@ pub async fn run( .next() .cloned() .unwrap_or_default(); - let masked = if let Some(val) = provider.credentials.values().next() { - mask_secret(val) - } else { - "-".to_string() - }; + let masked = provider + .credentials + .values() + .next() + .map_or_else(|| "-".to_string(), |val| mask_secret(val)); app.provider_detail = Some(app::ProviderDetailView { name: provider.object_name().to_string(), provider_type: provider.r#type.clone(), @@ -313,12 +314,11 @@ pub async fn run( }, Some(Event::Tick) => { // Auto-dismiss splash after SPLASH_DURATION. - if app.screen == Screen::Splash { - if let Some(start) = app.splash_start { - if start.elapsed() >= SPLASH_DURATION { - app.dismiss_splash(); - } - } + if app.screen == Screen::Splash + && let Some(start) = app.splash_start + && start.elapsed() >= SPLASH_DURATION + { + app.dismiss_splash(); } refresh_gateway_list(&mut app); @@ -338,90 +338,90 @@ pub async fn run( } Some(Event::Redraw) => { // Check if a buffered sandbox CreateResult is ready to finalize. - if let Some(form) = app.create_form.as_ref() { - if form.create_result.is_some() { - let elapsed = form - .anim_start - .map_or(app::MIN_CREATING_DISPLAY, |s| s.elapsed()); - if elapsed >= app::MIN_CREATING_DISPLAY { - let result = app - .create_form - .as_mut() - .and_then(|f| f.create_result.take()); - if let Some(h) = app.anim_handle.take() { - h.abort(); - } - match result { - Some(Ok(name)) => { - app.create_form = None; - let ports = std::mem::take(&mut app.pending_forward_ports); - let command = std::mem::take(&mut app.pending_exec_command); - let port_info = if ports.is_empty() { - String::new() - } else { - let list = ports - .iter() - .map(|p| p.to_string()) - .collect::>() - .join(", "); - format!(" (forwarding port(s) {list})") - }; - app.status_text = format!("Created sandbox: {name}{port_info}"); - refresh_sandboxes(&mut app).await; - - // If a command was specified, suspend TUI and exec it. - if !command.is_empty() { - handle_exec_command( - &mut app, - &mut terminal, - &events, - &name, - &command, - ) - .await; - } + if let Some(form) = app.create_form.as_ref() + && form.create_result.is_some() + { + let elapsed = form + .anim_start + .map_or(app::MIN_CREATING_DISPLAY, |s| s.elapsed()); + if elapsed >= app::MIN_CREATING_DISPLAY { + let result = app + .create_form + .as_mut() + .and_then(|f| f.create_result.take()); + if let Some(h) = app.anim_handle.take() { + h.abort(); + } + match result { + Some(Ok(name)) => { + app.create_form = None; + let ports = std::mem::take(&mut app.pending_forward_ports); + let command = std::mem::take(&mut app.pending_exec_command); + let port_info = if ports.is_empty() { + String::new() + } else { + let list = ports + .iter() + .map(ToString::to_string) + .collect::>() + .join(", "); + format!(" (forwarding port(s) {list})") + }; + app.status_text = format!("Created sandbox: {name}{port_info}"); + refresh_sandboxes(&mut app).await; + + // If a command was specified, suspend TUI and exec it. + if !command.is_empty() { + handle_exec_command( + &mut app, + &mut terminal, + &events, + &name, + &command, + ) + .await; } - Some(Err(msg)) => { - if let Some(form) = app.create_form.as_mut() { - form.phase = app::CreatePhase::Form; - form.anim_start = None; - form.status = Some(format!("Create failed: {msg}")); - } + } + Some(Err(msg)) => { + if let Some(form) = app.create_form.as_mut() { + form.phase = app::CreatePhase::Form; + form.anim_start = None; + form.status = Some(format!("Create failed: {msg}")); } - None => {} } + None => {} } } } // Check if a buffered provider CreateResult is ready to finalize. - if let Some(form) = app.create_provider_form.as_ref() { - if form.create_result.is_some() { - let elapsed = form - .anim_start - .map_or(app::MIN_CREATING_DISPLAY, |s| s.elapsed()); - if elapsed >= app::MIN_CREATING_DISPLAY { - let result = app - .create_provider_form - .as_mut() - .and_then(|f| f.create_result.take()); - if let Some(h) = app.anim_handle.take() { - h.abort(); + if let Some(form) = app.create_provider_form.as_ref() + && form.create_result.is_some() + { + let elapsed = form + .anim_start + .map_or(app::MIN_CREATING_DISPLAY, |s| s.elapsed()); + if elapsed >= app::MIN_CREATING_DISPLAY { + let result = app + .create_provider_form + .as_mut() + .and_then(|f| f.create_result.take()); + if let Some(h) = app.anim_handle.take() { + h.abort(); + } + match result { + Some(Ok(name)) => { + app.create_provider_form = None; + app.status_text = format!("Created provider: {name}"); + refresh_providers(&mut app).await; } - match result { - Some(Ok(name)) => { - app.create_provider_form = None; - app.status_text = format!("Created provider: {name}"); - refresh_providers(&mut app).await; - } - Some(Err(msg)) => { - if let Some(form) = app.create_provider_form.as_mut() { - form.phase = app::CreateProviderPhase::EnterKey; - form.anim_start = None; - form.status = Some(format!("Create failed: {msg}")); - } + Some(Err(msg)) => { + if let Some(form) = app.create_provider_form.as_mut() { + form.phase = app::CreateProviderPhase::EnterKey; + form.anim_start = None; + form.status = Some(format!("Create failed: {msg}")); } - None => {} } + None => {} } } } @@ -594,7 +594,7 @@ fn spawn_log_stream(app: &mut App, tx: mpsc::UnboundedSender) { source: String::new(), target: String::new(), message: format!("Failed to fetch logs: {}", e.message()), - fields: Default::default(), + fields: HashMap::default(), }])); return; } @@ -605,7 +605,7 @@ fn spawn_log_stream(app: &mut App, tx: mpsc::UnboundedSender) { source: String::new(), target: String::new(), message: "Timed out fetching logs.".into(), - fields: Default::default(), + fields: HashMap::default(), }])); return; } @@ -621,24 +621,20 @@ fn spawn_log_stream(app: &mut App, tx: mpsc::UnboundedSender) { ..Default::default() }; - let resp = - match tokio::time::timeout(Duration::from_secs(5), client.watch_sandbox(req)).await { - Ok(Ok(r)) => r, - Ok(Err(_)) | Err(_) => return, // Silently stop — user can re-enter logs. - }; + // Silently stop — user can re-enter logs. + let Ok(Ok(resp)) = + tokio::time::timeout(Duration::from_secs(5), client.watch_sandbox(req)).await + else { + return; + }; let mut stream = resp.into_inner(); - loop { - match stream.message().await { - Ok(Some(event)) => { - if let Some(openshell_core::proto::sandbox_stream_event::Payload::Log(log)) = - event.payload - { - let line = proto_to_log_line(log); - let _ = tx.send(Event::LogLines(vec![line])); - } - } - _ => break, // Stream ended or error. + while let Ok(Some(event)) = stream.message().await { + if let Some(openshell_core::proto::sandbox_stream_event::Payload::Log(log)) = + event.payload + { + let line = proto_to_log_line(log); + let _ = tx.send(Event::LogLines(vec![line])); } } }); @@ -717,7 +713,7 @@ async fn fetch_sandbox_detail(app: &mut App) { Ok(Ok(resp)) => { if let Some(sandbox) = resp.into_inner().sandbox { if let Some(spec) = &sandbox.spec { - app.sandbox_providers_list = spec.providers.clone(); + app.sandbox_providers_list.clone_from(&spec.providers); } let id = sandbox.object_id().to_string(); if id.is_empty() { None } else { Some(id) } @@ -796,13 +792,14 @@ async fn handle_shell_connect( name: sandbox_name.clone(), }; match tokio::time::timeout(Duration::from_secs(5), app.client.get_sandbox(req)).await { - Ok(Ok(resp)) => match resp.into_inner().sandbox { - Some(s) => s.object_id().to_string(), - None => { + Ok(Ok(resp)) => { + if let Some(s) = resp.into_inner().sandbox { + s.object_id().to_string() + } else { app.status_text = "sandbox not found".to_string(); return; } - }, + } Ok(Err(e)) => { app.status_text = format!("failed to get sandbox: {}", e.message()); return; @@ -945,13 +942,14 @@ async fn handle_exec_command( name: sandbox_name.to_string(), }; match tokio::time::timeout(Duration::from_secs(5), app.client.get_sandbox(req)).await { - Ok(Ok(resp)) => match resp.into_inner().sandbox { - Some(s) => s.object_id().to_string(), - None => { + Ok(Ok(resp)) => { + if let Some(s) = resp.into_inner().sandbox { + s.object_id().to_string() + } else { app.status_text = format!("exec: sandbox {sandbox_name} not found"); return; } - }, + } Ok(Err(e)) => { app.status_text = format!("exec: failed to get sandbox: {}", e.message()); return; @@ -1015,7 +1013,7 @@ async fn handle_exec_command( // remote shell parses it correctly. let command_str = command .split_whitespace() - .map(|word| shell_escape(word)) + .map(shell_escape) .collect::>() .join(" "); let mut ssh = std::process::Command::new("ssh"); @@ -1207,7 +1205,7 @@ fn render_policy_lines( }; lines.push(Line::from(vec![ Span::styled(" Allow: ", t.muted), - Span::styled(format!("{:<6} {}", method, target), t.text), + Span::styled(format!("{method:<6} {target}"), t.text), ])); } } @@ -1275,7 +1273,7 @@ fn spawn_create_sandbox(app: &mut App, tx: mpsc::UnboundedSender) { // Stash command so we can exec after sandbox creation + Ready. app.pending_exec_command = command; // Stash ports so we can include them in the status text. - app.pending_forward_ports = ports.clone(); + app.pending_forward_ports.clone_from(&ports); let endpoint = app.endpoint.clone(); let gateway_name = app.gateway_name.clone(); @@ -1311,7 +1309,7 @@ fn spawn_create_sandbox(app: &mut App, tx: mpsc::UnboundedSender) { policy, ..Default::default() }), - labels: std::collections::HashMap::new(), + labels: HashMap::new(), }; let sandbox_name = @@ -1353,21 +1351,19 @@ fn spawn_create_sandbox(app: &mut App, tx: mpsc::UnboundedSender) { let req = openshell_core::proto::GetSandboxRequest { name: sandbox_name.clone(), }; - match client.get_sandbox(req).await { - Ok(resp) => { - if let Some(sandbox) = resp.into_inner().sandbox { - if sandbox.phase == 2 { - break sandbox.object_id().to_string(); - } - if sandbox.phase == 3 { - let _ = tx.send(Event::CreateResult(Err( - "sandbox entered error state".to_string(), - ))); - return; - } - } + // Retry on transient errors. + if let Ok(resp) = client.get_sandbox(req).await + && let Some(sandbox) = resp.into_inner().sandbox + { + if sandbox.phase == 2 { + break sandbox.object_id().to_string(); + } + if sandbox.phase == 3 { + let _ = tx.send(Event::CreateResult(Err( + "sandbox entered error state".to_string() + ))); + return; } - Err(_) => {} // Retry on transient errors. } }; @@ -1568,11 +1564,11 @@ fn spawn_create_provider(app: &App, tx: mpsc::UnboundedSender) { id: String::new(), name: provider_name.clone(), created_at_ms: 0, - labels: std::collections::HashMap::new(), + labels: HashMap::new(), }), r#type: ptype.clone(), credentials: credentials.clone(), - config: Default::default(), + config: HashMap::default(), }), }; @@ -1649,7 +1645,7 @@ fn spawn_update_provider(app: &App, tx: mpsc::UnboundedSender) { let new_value = form.new_value.clone(); tokio::spawn(async move { - let mut credentials = std::collections::HashMap::new(); + let mut credentials = HashMap::new(); credentials.insert(cred_key, new_value); let req = openshell_core::proto::UpdateProviderRequest { @@ -1658,11 +1654,11 @@ fn spawn_update_provider(app: &App, tx: mpsc::UnboundedSender) { id: String::new(), name: name.clone(), created_at_ms: 0, - labels: std::collections::HashMap::new(), + labels: HashMap::new(), }), r#type: ptype, credentials, - config: Default::default(), + config: HashMap::default(), }), }; @@ -1970,25 +1966,25 @@ fn spawn_set_global_setting(app: &App, tx: mpsc::UnboundedSender) { let value = match kind { openshell_core::settings::SettingValueKind::Bool => { - match openshell_core::settings::parse_bool_like(&raw) { - Some(v) => setting_value::Value::BoolValue(v), - None => { - let _ = tx.send(Event::GlobalSettingSetResult(Err(format!( - "invalid bool value: {raw}" - )))); - return; - } + if let Some(v) = openshell_core::settings::parse_bool_like(&raw) { + setting_value::Value::BoolValue(v) + } else { + let _ = tx.send(Event::GlobalSettingSetResult(Err(format!( + "invalid bool value: {raw}" + )))); + return; } } - openshell_core::settings::SettingValueKind::Int => match raw.parse::() { - Ok(v) => setting_value::Value::IntValue(v), - Err(_) => { + openshell_core::settings::SettingValueKind::Int => { + if let Ok(v) = raw.parse::() { + setting_value::Value::IntValue(v) + } else { let _ = tx.send(Event::GlobalSettingSetResult(Err(format!( "invalid int value: {raw}" )))); return; } - }, + } openshell_core::settings::SettingValueKind::String => { setting_value::Value::StringValue(raw) } @@ -2074,25 +2070,25 @@ fn spawn_set_sandbox_setting(app: &App, tx: mpsc::UnboundedSender) { let value = match kind { openshell_core::settings::SettingValueKind::Bool => { - match openshell_core::settings::parse_bool_like(&raw) { - Some(v) => setting_value::Value::BoolValue(v), - None => { - let _ = tx.send(Event::SandboxSettingSetResult(Err(format!( - "invalid bool value: {raw}" - )))); - return; - } + if let Some(v) = openshell_core::settings::parse_bool_like(&raw) { + setting_value::Value::BoolValue(v) + } else { + let _ = tx.send(Event::SandboxSettingSetResult(Err(format!( + "invalid bool value: {raw}" + )))); + return; } } - openshell_core::settings::SettingValueKind::Int => match raw.parse::() { - Ok(v) => setting_value::Value::IntValue(v), - Err(_) => { + openshell_core::settings::SettingValueKind::Int => { + if let Ok(v) = raw.parse::() { + setting_value::Value::IntValue(v) + } else { let _ = tx.send(Event::SandboxSettingSetResult(Err(format!( "invalid int value: {raw}" )))); return; } - }, + } openshell_core::settings::SettingValueKind::String => { setting_value::Value::StringValue(raw) } @@ -2225,8 +2221,7 @@ async fn refresh_sandboxes(app: &mut App) { .map(|s| { s.metadata .as_ref() - .map(|m| format_age(m.created_at_ms)) - .unwrap_or_else(|| "?".to_string()) + .map_or_else(|| "?".to_string(), |m| format_age(m.created_at_ms)) }) .collect(); app.sandbox_created = sandboxes @@ -2234,8 +2229,7 @@ async fn refresh_sandboxes(app: &mut App) { .map(|s| { s.metadata .as_ref() - .map(|m| format_timestamp(m.created_at_ms)) - .unwrap_or_else(|| "?".to_string()) + .map_or_else(|| "?".to_string(), |m| format_timestamp(m.created_at_ms)) }) .collect(); @@ -2258,7 +2252,7 @@ async fn refresh_sandboxes(app: &mut App) { .map(|s| { s.object_labels() .as_ref() - .map(|labels| app::format_labels(labels)) + .map(app::format_labels) .unwrap_or_default() }) .collect(); diff --git a/crates/openshell-tui/src/theme.rs b/crates/openshell-tui/src/theme.rs index 1be202367..0c0d9b37c 100644 --- a/crates/openshell-tui/src/theme.rs +++ b/crates/openshell-tui/src/theme.rs @@ -213,7 +213,7 @@ pub fn detect(mode: ThemeMode) -> Theme { /// /// Uses `terminal-colorsaurus` to send an OSC 11 query to the terminal, /// which returns the actual background RGB color. This works reliably on -/// iTerm2, Terminal.app, WezTerm, Alacritty, and most modern terminals. +/// iTerm2, Terminal.app, `WezTerm`, Alacritty, and most modern terminals. /// /// Falls back to `false` (dark) if the terminal doesn't respond to the /// query (e.g. `TERM=dumb`, piped output, very old terminals). diff --git a/crates/openshell-tui/src/ui/create_provider.rs b/crates/openshell-tui/src/ui/create_provider.rs index c099ea6a5..9f1cc6d83 100644 --- a/crates/openshell-tui/src/ui/create_provider.rs +++ b/crates/openshell-tui/src/ui/create_provider.rs @@ -313,7 +313,6 @@ fn draw_enter_key( chunks[idx], t, ); - idx += 1; } else { // Credential rows — env var name + masked value on the same line. let max_name_len = form @@ -330,7 +329,7 @@ fn draw_enter_key( .map(|(i, (env_name, value))| { let is_focused = form.key_field == ProviderKeyField::Credential && i == form.cred_cursor; - let padded = format!("{:width$}", env_name, width = max_name_len); + let padded = format!("{env_name:max_name_len$}"); let name_style = if is_focused { t.accent_bold } else { t.text }; let mut spans = vec![Span::styled(format!(" {padded}: "), name_style)]; if value.is_empty() { @@ -353,8 +352,8 @@ fn draw_enter_key( }) .collect(); frame.render_widget(Paragraph::new(lines), chunks[idx]); - idx += 1; } + idx += 1; // Spacer. idx += 1; diff --git a/crates/openshell-tui/src/ui/create_sandbox.rs b/crates/openshell-tui/src/ui/create_sandbox.rs index c8411c618..cb90e244d 100644 --- a/crates/openshell-tui/src/ui/create_sandbox.rs +++ b/crates/openshell-tui/src/ui/create_sandbox.rs @@ -304,7 +304,7 @@ pub fn render_chase( } let frame = (elapsed_ms / 140) as usize; - let mouth_open = frame % 2 == 0; + let mouth_open = frame.is_multiple_of(2); // Characters. let pac = if mouth_open { "ᗧ" } else { "●" }; @@ -360,16 +360,14 @@ pub fn render_chase( let mut current_style = buf[0].1; for &(ch, style) in &buf { - if style == current_style { - current_str.push(ch); - } else { + if style != current_style { if !current_str.is_empty() { spans.push(Span::styled(current_str.clone(), current_style)); current_str.clear(); } current_style = style; - current_str.push(ch); } + current_str.push(ch); } if !current_str.is_empty() { spans.push(Span::styled(current_str, current_style)); diff --git a/crates/openshell-tui/src/ui/global_settings.rs b/crates/openshell-tui/src/ui/global_settings.rs index cac59b0a9..9e47fd8d6 100644 --- a/crates/openshell-tui/src/ui/global_settings.rs +++ b/crates/openshell-tui/src/ui/global_settings.rs @@ -155,7 +155,7 @@ fn draw_edit_overlay( ])); // content lines + 2 for border - let popup_height = (lines.len() + 2) as u16; + let popup_height = u16::try_from(lines.len() + 2).unwrap_or(u16::MAX); let popup = centered_rect(50, popup_height, area); frame.render_widget(Clear, popup); @@ -239,7 +239,7 @@ fn draw_confirm_delete(frame: &mut Frame<'_>, app: &App, idx: usize, area: Rect) ]; // content lines + 2 for border - let popup_height = (lines.len() + 2) as u16; + let popup_height = u16::try_from(lines.len() + 2).unwrap_or(u16::MAX); let popup = centered_rect(60, popup_height, area); frame.render_widget(Clear, popup); diff --git a/crates/openshell-tui/src/ui/mod.rs b/crates/openshell-tui/src/ui/mod.rs index b920d9cb6..13ac94c10 100644 --- a/crates/openshell-tui/src/ui/mod.rs +++ b/crates/openshell-tui/src/ui/mod.rs @@ -1,17 +1,17 @@ // SPDX-FileCopyrightText: Copyright (c) 2025-2026 NVIDIA CORPORATION & AFFILIATES. All rights reserved. // SPDX-License-Identifier: Apache-2.0 -pub(crate) mod create_provider; -pub(crate) mod create_sandbox; +pub mod create_provider; +pub mod create_sandbox; mod dashboard; -pub(crate) mod global_settings; -pub(crate) mod providers; -pub(crate) mod sandbox_detail; +pub mod global_settings; +pub mod providers; +pub mod sandbox_detail; mod sandbox_draft; -pub(crate) mod sandbox_logs; +pub mod sandbox_logs; mod sandbox_policy; -pub(crate) mod sandbox_settings; -pub(crate) mod sandboxes; +pub mod sandbox_settings; +pub mod sandboxes; mod splash; use ratatui::Frame; @@ -89,12 +89,12 @@ fn draw_sandbox_screen(frame: &mut Frame<'_>, app: &mut App, area: Rect) { } // Log detail popup renders over the full frame (not constrained to pane). - if app.focus == Focus::SandboxLogs { - if let Some(detail_idx) = app.log_detail_index { - let filtered: Vec<&app::LogLine> = app.filtered_log_lines(); - if let Some(log) = filtered.get(detail_idx) { - sandbox_logs::draw_detail_popup(frame, log, frame.size(), &app.theme); - } + if app.focus == Focus::SandboxLogs + && let Some(detail_idx) = app.log_detail_index + { + let filtered: Vec<&app::LogLine> = app.filtered_log_lines(); + if let Some(log) = filtered.get(detail_idx) { + sandbox_logs::draw_detail_popup(frame, log, frame.size(), &app.theme); } } @@ -328,8 +328,7 @@ fn draw_nav_bar(frame: &mut Frame<'_>, app: &App, area: Rect) { let selected_status = app .draft_chunks .get(app.draft_scroll + app.draft_selected) - .map(|c| c.status.as_str()) - .unwrap_or(""); + .map_or("", |c| c.status.as_str()); let mut spans = vec![ Span::styled(" ", t.text), Span::styled("[j/k]", t.key_hint), @@ -456,7 +455,7 @@ fn draw_command_bar(frame: &mut Frame<'_>, app: &App, area: Rect) { /// Center a popup rectangle within `area` using percentage-based width and /// an absolute height (in rows). -pub(crate) fn centered_popup(percent_x: u16, height: u16, area: Rect) -> Rect { +pub fn centered_popup(percent_x: u16, height: u16, area: Rect) -> Rect { let vert = Layout::default() .direction(Direction::Vertical) .constraints([ diff --git a/crates/openshell-tui/src/ui/sandbox_detail.rs b/crates/openshell-tui/src/ui/sandbox_detail.rs index 7ab725e9d..7cdbec8bd 100644 --- a/crates/openshell-tui/src/ui/sandbox_detail.rs +++ b/crates/openshell-tui/src/ui/sandbox_detail.rs @@ -78,8 +78,7 @@ pub fn draw(frame: &mut Frame<'_>, app: &App, area: Rect) { .sandbox_labels .get(idx) .filter(|s| !s.is_empty()) - .map(String::as_str) - .unwrap_or("none"); + .map_or("none", String::as_str); let row3 = Line::from(vec![ Span::styled(" Labels: ", t.muted), Span::styled(labels_str, t.text), @@ -101,8 +100,7 @@ pub fn draw(frame: &mut Frame<'_>, app: &App, area: Rect) { .sandbox_notes .get(idx) .filter(|s| !s.is_empty()) - .map(String::as_str) - .unwrap_or("none"); + .map_or("none", String::as_str); let row5 = Line::from(vec![ Span::styled(" Forwards: ", t.muted), Span::styled(forwards_str, t.text), diff --git a/crates/openshell-tui/src/ui/sandbox_draft.rs b/crates/openshell-tui/src/ui/sandbox_draft.rs index 528d1c608..a9cd6b0c9 100644 --- a/crates/openshell-tui/src/ui/sandbox_draft.rs +++ b/crates/openshell-tui/src/ui/sandbox_draft.rs @@ -324,7 +324,7 @@ pub fn draw_approve_all_popup( let count = chunks.len(); // Height: header(1) + blank(1) + chunks(count, capped at 12) + blank(1) + hints(1) + borders(2) + padding(1) let list_lines = count.min(12); - let popup_height = (7 + list_lines) as u16; + let popup_height = u16::try_from(7 + list_lines).unwrap_or(u16::MAX); let popup_height = popup_height.min(area.height.saturating_sub(4)); let popup_width = (area.width * 4 / 5).min(area.width.saturating_sub(4)); let popup_area = centered_rect(popup_width, popup_height, area); diff --git a/crates/openshell-tui/src/ui/sandbox_logs.rs b/crates/openshell-tui/src/ui/sandbox_logs.rs index 2aa793d20..da20b40c8 100644 --- a/crates/openshell-tui/src/ui/sandbox_logs.rs +++ b/crates/openshell-tui/src/ui/sandbox_logs.rs @@ -335,7 +335,7 @@ const CONNECT_FIELD_ORDER: &[&str] = &[ "reason", ]; -/// Priority field order for L7_REQUEST log lines. +/// Priority field order for `L7_REQUEST` log lines. const L7_FIELD_ORDER: &[&str] = &[ "l7_action", "l7_target", @@ -348,7 +348,7 @@ const L7_FIELD_ORDER: &[&str] = &[ ]; /// Return fields in a smart order based on the log message type. -pub(crate) fn ordered_fields<'a>(log: &'a LogLine) -> Vec<(&'a str, &'a str)> { +pub fn ordered_fields(log: &LogLine) -> Vec<(&str, &str)> { // Matches both "CONNECT" (L4-only decision) and "CONNECT_L7" (tunnel lifecycle for L7 endpoints) let order: Option<&[&str]> = if log.message.starts_with("CONNECT") { Some(CONNECT_FIELD_ORDER) @@ -358,8 +358,18 @@ pub(crate) fn ordered_fields<'a>(log: &'a LogLine) -> Vec<(&'a str, &'a str)> { None }; - match order { - Some(priority) => { + order.map_or_else( + || { + // Default: alphabetical. + let mut pairs: Vec<(&str, &str)> = log + .fields + .iter() + .map(|(k, v)| (k.as_str(), v.as_str())) + .collect(); + pairs.sort_by_key(|(k, _)| *k); + pairs + }, + |priority| { let mut result: Vec<(&str, &str)> = Vec::with_capacity(log.fields.len()); // Add priority fields first (in order). for &key in priority { @@ -377,18 +387,8 @@ pub(crate) fn ordered_fields<'a>(log: &'a LogLine) -> Vec<(&'a str, &'a str)> { remaining.sort_by_key(|(k, _)| *k); result.extend(remaining); result - } - None => { - // Default: alphabetical. - let mut pairs: Vec<(&str, &str)> = log - .fields - .iter() - .map(|(k, v)| (k.as_str(), v.as_str())) - .collect(); - pairs.sort_by_key(|(k, _)| *k); - pairs - } - } + }, + ) } // --------------------------------------------------------------------------- @@ -404,7 +404,7 @@ fn level_style(level: &str, t: &crate::theme::Theme) -> ratatui::style::Style { } } -pub(crate) fn format_short_time(epoch_ms: i64) -> String { +pub fn format_short_time(epoch_ms: i64) -> String { if epoch_ms <= 0 { return String::from("--:--:--"); } @@ -420,7 +420,7 @@ pub(crate) fn format_short_time(epoch_ms: i64) -> String { /// /// Produces the same layout as `render_log_line()` but without styles or /// truncation: `HH:MM:SS {source:<7} {level:<5} {message} [key=value ...]` -pub(crate) fn format_log_line_plain(log: &LogLine) -> String { +pub fn format_log_line_plain(log: &LogLine) -> String { let ts = format_short_time(log.timestamp_ms); let mut s = format!("{ts} {:<7} {:<5} {}", log.source, log.level, log.message); diff --git a/crates/openshell-tui/src/ui/sandbox_settings.rs b/crates/openshell-tui/src/ui/sandbox_settings.rs index c26f4a663..8ac5eb687 100644 --- a/crates/openshell-tui/src/ui/sandbox_settings.rs +++ b/crates/openshell-tui/src/ui/sandbox_settings.rs @@ -163,7 +163,7 @@ fn draw_edit_overlay( Span::styled(" Cancel", t.muted), ])); - let popup_height = (lines.len() + 2) as u16; + let popup_height = u16::try_from(lines.len() + 2).unwrap_or(u16::MAX); let popup = centered_rect(50, popup_height, area); frame.render_widget(Clear, popup); @@ -207,7 +207,7 @@ fn draw_confirm_set(frame: &mut Frame<'_>, app: &App, idx: usize, area: Rect) { ]), ]; - let popup_height = (lines.len() + 2) as u16; + let popup_height = u16::try_from(lines.len() + 2).unwrap_or(u16::MAX); let popup = centered_rect(60, popup_height, area); frame.render_widget(Clear, popup); @@ -245,7 +245,7 @@ fn draw_confirm_delete(frame: &mut Frame<'_>, app: &App, idx: usize, area: Rect) ]), ]; - let popup_height = (lines.len() + 2) as u16; + let popup_height = u16::try_from(lines.len() + 2).unwrap_or(u16::MAX); let popup = centered_rect(55, popup_height, area); frame.render_widget(Clear, popup); diff --git a/crates/openshell-vm/build.rs b/crates/openshell-vm/build.rs index 33fab9a78..f448ed0bc 100644 --- a/crates/openshell-vm/build.rs +++ b/crates/openshell-vm/build.rs @@ -12,7 +12,7 @@ //! Environment: //! `OPENSHELL_VM_RUNTIME_COMPRESSED_DIR` - Path to compressed artifacts -use std::path::PathBuf; +use std::path::{Path, PathBuf}; use std::{env, fs}; fn main() { @@ -116,7 +116,7 @@ fn main() { /// Generate stub (empty) resource files so the build can complete. /// The embedded module will fail at runtime if these stubs are used. -fn generate_stub_resources(out_dir: &PathBuf) { +fn generate_stub_resources(out_dir: &Path) { let target_os = env::var("CARGO_CFG_TARGET_OS").unwrap_or_default(); let (libkrun_name, libkrunfw_name) = match target_os.as_str() { diff --git a/crates/openshell-vm/src/embedded.rs b/crates/openshell-vm/src/embedded.rs index 731f34b10..f019385d6 100644 --- a/crates/openshell-vm/src/embedded.rs +++ b/crates/openshell-vm/src/embedded.rs @@ -229,7 +229,8 @@ pub fn cleanup_old_rootfs() -> Result<(), VmError> { } let current_version_dir = base.join(VERSION); - cleanup_old_versions_in_base(&base, ¤t_version_dir) + cleanup_old_versions_in_base(&base, ¤t_version_dir); + Ok(()) } /// Check if the rootfs is embedded (non-empty). @@ -257,12 +258,13 @@ fn runtime_cache_key() -> String { let sample = &chunk[..chunk.len().min(64)]; let mut word: u64 = 0; for (j, &b) in sample.iter().enumerate() { - word ^= (b as u64) << ((j % 8) * 8); + word ^= u64::from(b) << ((j % 8) * 8); } // Mix in resource index so identical resources don't cancel out. - fp ^= word.rotate_left((i as u32) * 13 + 7); + let i_u32 = u32::try_from(i).unwrap_or(u32::MAX); + fp ^= word.rotate_left(i_u32 * 13 + 7); // Also mix in the total length so size changes are detected. - fp ^= (chunk.len() as u64).rotate_left((i as u32) * 17 + 3); + fp ^= (chunk.len() as u64).rotate_left(i_u32 * 17 + 3); } format!("{VERSION}-{fp:016x}") } @@ -286,17 +288,17 @@ fn rootfs_cache_base() -> Result { } fn cleanup_old_versions(current_dir: &Path) -> Result<(), VmError> { - cleanup_old_versions_in_base(&runtime_cache_base()?, current_dir) + cleanup_old_versions_in_base(&runtime_cache_base()?, current_dir); + Ok(()) } -fn cleanup_old_versions_in_base(base: &Path, current_dir: &Path) -> Result<(), VmError> { +fn cleanup_old_versions_in_base(base: &Path, current_dir: &Path) { if !base.exists() { - return Ok(()); + return; } - let entries = match fs::read_dir(base) { - Ok(e) => e, - Err(_) => return Ok(()), // Can't read, skip cleanup + let Ok(entries) = fs::read_dir(base) else { + return; // Can't read, skip cleanup }; for entry in entries.filter_map(Result::ok) { @@ -316,8 +318,6 @@ fn cleanup_old_versions_in_base(base: &Path, current_dir: &Path) -> Result<(), V } } } - - Ok(()) } fn extract_resource(compressed: &[u8], dest: &Path) -> Result<(), VmError> { diff --git a/crates/openshell-vm/src/exec.rs b/crates/openshell-vm/src/exec.rs index f3198a6be..23ab23175 100644 --- a/crates/openshell-vm/src/exec.rs +++ b/crates/openshell-vm/src/exec.rs @@ -147,25 +147,26 @@ pub fn vm_exec_socket_path(rootfs: &Path) -> PathBuf { // secure_socket_base() when the gvproxy socket dir is created; here // we just compute the path. The parent directory is created (with // permission checks) at launch time via create_dir_all. - let base = if let Some(xdg) = std::env::var_os("XDG_RUNTIME_DIR") { - PathBuf::from(xdg) - } else { - let mut base = PathBuf::from("/tmp"); - if !base.is_dir() { - base = std::env::temp_dir(); - } - base - }; + let base = std::env::var_os("XDG_RUNTIME_DIR").map_or_else( + || { + let mut base = PathBuf::from("/tmp"); + if !base.is_dir() { + base = std::env::temp_dir(); + } + base + }, + PathBuf::from, + ); let dir = base.join("ovm-exec"); let id = hash_path_id(rootfs); dir.join(format!("{id}.sock")) } fn hash_path_id(path: &Path) -> String { - let mut hash: u64 = 0xcbf29ce484222325; + let mut hash: u64 = 0xcbf2_9ce4_8422_2325; for byte in path.to_string_lossy().as_bytes() { hash ^= u64::from(*byte); - hash = hash.wrapping_mul(0x100000001b3); + hash = hash.wrapping_mul(0x0100_0000_01b3); } format!("{:012x}", hash & 0x0000_ffff_ffff_ffff) } @@ -345,15 +346,15 @@ pub fn reset_runtime_state(rootfs: &Path, gateway_name: &str) -> Result<(), VmEr /// This function is a no-op if `state.db` does not exist (e.g. first boot or /// after a full `--reset`). pub fn recover_corrupt_kine_db(rootfs: &Path) -> Result<(), VmError> { + // The SQLite file format begins with a 16-byte magic string. + // Reference: https://www.sqlite.org/fileformat.html#the_database_header + const SQLITE_MAGIC: &[u8] = b"SQLite format 3\x00"; + let db_path = rootfs.join("var/lib/rancher/k3s/server/db/state.db"); if !db_path.exists() { return Ok(()); // Nothing to check — first boot or post-reset. } - // The SQLite file format begins with a 16-byte magic string. - // Reference: https://www.sqlite.org/fileformat.html#the_database_header - const SQLITE_MAGIC: &[u8] = b"SQLite format 3\x00"; - // Read only the first 100 bytes (the minimum valid SQLite header size) // instead of loading the entire database into memory. let has_invalid_header = match File::open(&db_path).and_then(|mut f| { diff --git a/crates/openshell-vm/src/ffi.rs b/crates/openshell-vm/src/ffi.rs index 7500b1c97..66213c624 100644 --- a/crates/openshell-vm/src/ffi.rs +++ b/crates/openshell-vm/src/ffi.rs @@ -93,6 +93,7 @@ type KrunAddNetUnixstream = unsafe extern "C" fn( flags: u32, ) -> i32; +#[allow(clippy::struct_field_names)] // FFI struct mirrors libkrun's symbol naming pub struct LibKrun { pub krun_init_log: KrunInitLog, pub krun_create_ctx: KrunCreateCtx, @@ -207,7 +208,10 @@ fn preload_runtime_support_libraries(runtime_dir: &Path) -> Result, .is_some_and(|name| { #[cfg(target_os = "macos")] { - name.starts_with("libkrunfw") && name.ends_with(".dylib") + name.starts_with("libkrunfw") + && Path::new(name) + .extension() + .is_some_and(|ext| ext.eq_ignore_ascii_case("dylib")) } #[cfg(not(target_os = "macos"))] { diff --git a/crates/openshell-vm/src/health.rs b/crates/openshell-vm/src/health.rs index 096a35d1f..c24015bf1 100644 --- a/crates/openshell-vm/src/health.rs +++ b/crates/openshell-vm/src/health.rs @@ -14,8 +14,11 @@ use std::path::PathBuf; use std::time::Duration; use tonic::transport::{Certificate, ClientTlsConfig, Endpoint, Identity}; +/// CA certificate, client certificate, and client key bytes for mTLS. +type MtlsMaterials = (Vec, Vec, Vec); + /// Load mTLS materials from the gateway's cert directory. -fn load_mtls_materials(gateway_name: &str) -> Result<(Vec, Vec, Vec), String> { +fn load_mtls_materials(gateway_name: &str) -> Result { let home = std::env::var("HOME").map_err(|_| "HOME not set")?; let mtls_dir = PathBuf::from(home) .join(".config/openshell/gateways") diff --git a/crates/openshell-vm/src/lib.rs b/crates/openshell-vm/src/lib.rs index 2b78a7669..99f122e45 100644 --- a/crates/openshell-vm/src/lib.rs +++ b/crates/openshell-vm/src/lib.rs @@ -611,7 +611,7 @@ impl VmContext { Ok(Self { krun, - ctx_id: ctx_id as u32, + ctx_id: ctx_id.cast_unsigned(), }) } @@ -736,8 +736,8 @@ impl VmContext { } fn set_port_map(&self, port_map: &[String]) -> Result<(), VmError> { - let port_strs: Vec<&str> = port_map.iter().map(String::as_str).collect(); - let (_port_owners, port_ptrs) = c_string_array(&port_strs)?; + let port_refs: Vec<&str> = port_map.iter().map(String::as_str).collect(); + let (_port_owners, port_ptrs) = c_string_array(&port_refs)?; unsafe { check( (self.krun.krun_set_port_map)(self.ctx_id, port_ptrs.as_ptr()), @@ -773,10 +773,10 @@ impl VmContext { fn set_exec(&self, exec_path: &str, args: &[String], env: &[String]) -> Result<(), VmError> { let exec_c = CString::new(exec_path)?; - let argv_strs: Vec<&str> = args.iter().map(String::as_str).collect(); - let (_argv_owners, argv_ptrs) = c_string_array(&argv_strs)?; - let env_strs: Vec<&str> = env.iter().map(String::as_str).collect(); - let (_env_owners, env_ptrs) = c_string_array(&env_strs)?; + let argv_refs: Vec<&str> = args.iter().map(String::as_str).collect(); + let (_argv_owners, argv_ptrs) = c_string_array(&argv_refs)?; + let env_refs: Vec<&str> = env.iter().map(String::as_str).collect(); + let (_env_owners, env_ptrs) = c_string_array(&env_refs)?; unsafe { check( @@ -942,7 +942,7 @@ fn kill_stale_gvproxy_by_port(port: u16) { for line in pids.lines() { if let Ok(pid) = line.trim().parse::() { - let pid_i32 = pid as libc::pid_t; + let pid_i32 = pid.cast_signed(); if is_process_named(pid_i32, "gvproxy") { kill_gvproxy_pid(pid); } @@ -951,7 +951,7 @@ fn kill_stale_gvproxy_by_port(port: u16) { } fn kill_gvproxy_pid(gvproxy_pid: u32) { - let pid_i32 = gvproxy_pid as libc::pid_t; + let pid_i32 = gvproxy_pid.cast_signed(); let is_alive = unsafe { libc::kill(pid_i32, 0) } == 0; if is_alive { // Verify the process is actually gvproxy before killing. @@ -1090,10 +1090,10 @@ fn state_disk_sync_mode() -> u32 { } fn hash_path_id(path: &Path) -> String { - let mut hash: u64 = 0xcbf29ce484222325; + let mut hash: u64 = 0xcbf2_9ce4_8422_2325; for byte in path.to_string_lossy().as_bytes() { hash ^= u64::from(*byte); - hash = hash.wrapping_mul(0x100000001b3); + hash = hash.wrapping_mul(0x0100_0000_01b3); } format!("{:012x}", hash & 0x0000_ffff_ffff_ffff) } @@ -1104,15 +1104,16 @@ fn hash_path_id(path: &Path) -> String { /// falls back to `/tmp`. After `create_dir_all`, validates the directory /// is not a symlink and is owned by the current user. fn secure_socket_base(subdir: &str) -> Result { - let base = if let Some(xdg) = std::env::var_os("XDG_RUNTIME_DIR") { - PathBuf::from(xdg) - } else { - let mut base = PathBuf::from("/tmp"); - if !base.is_dir() { - base = std::env::temp_dir(); - } - base - }; + let base = std::env::var_os("XDG_RUNTIME_DIR").map_or_else( + || { + let mut base = PathBuf::from("/tmp"); + if !base.is_dir() { + base = std::env::temp_dir(); + } + base + }, + PathBuf::from, + ); let dir = base.join(subdir); // If the path exists, verify it is not a symlink before using it. @@ -1431,29 +1432,23 @@ pub fn launch(config: &VmConfig) -> Result { // network stack to misroute or drop packets. let mac: [u8; 6] = [0x5a, 0x94, 0xef, 0xe4, 0x0c, 0xee]; - // COMPAT_NET_FEATURES from libkrun.h - const NET_FEATURE_CSUM: u32 = 1 << 0; - const NET_FEATURE_GUEST_CSUM: u32 = 1 << 1; - const NET_FEATURE_GUEST_TSO4: u32 = 1 << 7; - const NET_FEATURE_GUEST_UFO: u32 = 1 << 10; - const NET_FEATURE_HOST_TSO4: u32 = 1 << 11; - const NET_FEATURE_HOST_UFO: u32 = 1 << 14; - const COMPAT_NET_FEATURES: u32 = NET_FEATURE_CSUM - | NET_FEATURE_GUEST_CSUM - | NET_FEATURE_GUEST_TSO4 - | NET_FEATURE_GUEST_UFO - | NET_FEATURE_HOST_TSO4 - | NET_FEATURE_HOST_UFO; + // COMPAT_NET_FEATURES from libkrun.h: + // NET_FEATURE_CSUM (1 << 0) | NET_FEATURE_GUEST_CSUM (1 << 1) + // | NET_FEATURE_GUEST_TSO4 (1 << 7) | NET_FEATURE_GUEST_UFO (1 << 10) + // | NET_FEATURE_HOST_TSO4 (1 << 11) | NET_FEATURE_HOST_UFO (1 << 14). + let compat_net_features: u32 = + (1 << 0) | (1 << 1) | (1 << 7) | (1 << 10) | (1 << 11) | (1 << 14); // On Linux use unixstream (SOCK_STREAM) to connect to gvproxy's // QEMU listener. On macOS use unixgram (SOCK_DGRAM) with the vfkit // magic byte for the vfkit listener. #[cfg(target_os = "linux")] - vm.add_net_unixstream(&net_sock, &mac, COMPAT_NET_FEATURES)?; + vm.add_net_unixstream(&net_sock, &mac, compat_net_features)?; #[cfg(target_os = "macos")] { - const NET_FLAG_VFKIT: u32 = 1 << 0; - vm.add_net_unixgram(&net_sock, &mac, COMPAT_NET_FEATURES, NET_FLAG_VFKIT)?; + // NET_FLAG_VFKIT = 1 << 0 + let net_flag_vfkit: u32 = 1 << 0; + vm.add_net_unixgram(&net_sock, &mac, compat_net_features, net_flag_vfkit)?; } eprintln!( @@ -1761,15 +1756,11 @@ fn bootstrap_gateway(rootfs: &Path, gateway_name: &str, gateway_port: u16) -> Re // drift check and the host already has valid certs. If the agent // isn't reachable we skip silently rather than blocking boot for // 30s. - match fetch_pki_over_exec(&exec_socket, std::time::Duration::from_secs(5)) { - Ok(bundle) => { - if let Err(e) = sync_host_certs_if_stale(gateway_name, &bundle) { - eprintln!("Warning: cert sync check failed: {e}"); - } - } - Err(_) => { - // Expected on warm boot — exec agent not ready yet. - } + // Expected on warm boot — exec agent not ready yet. + if let Ok(bundle) = fetch_pki_over_exec(&exec_socket, std::time::Duration::from_secs(5)) + && let Err(e) = sync_host_certs_if_stale(gateway_name, &bundle) + { + eprintln!("Warning: cert sync check failed: {e}"); } eprintln!( diff --git a/crates/openshell-vm/src/main.rs b/crates/openshell-vm/src/main.rs index bb9d854b1..b2dce993e 100644 --- a/crates/openshell-vm/src/main.rs +++ b/crates/openshell-vm/src/main.rs @@ -127,31 +127,31 @@ fn main() { // is available and the variable is not already configured. #[cfg(target_os = "macos")] { - if std::env::var_os("__OPENSHELL_VM_REEXEC").is_none() { - if let Ok(runtime_dir) = openshell_vm::configured_runtime_dir() { - let needs_reexec = std::env::var_os("DYLD_LIBRARY_PATH").map_or(true, |v| { - !v.to_string_lossy() - .contains(runtime_dir.to_str().unwrap_or("")) - }); - if needs_reexec { - let mut dyld_paths = vec![runtime_dir]; - if let Some(existing) = std::env::var_os("DYLD_LIBRARY_PATH") { - dyld_paths.extend(std::env::split_paths(&existing)); - } - let joined = std::env::join_paths(&dyld_paths).expect("join DYLD_LIBRARY_PATH"); - let exe = std::env::current_exe().expect("current_exe"); - let args: Vec = std::env::args().skip(1).collect(); - let err = std::process::Command::new(exe) - .args(&args) - .env("DYLD_LIBRARY_PATH", &joined) - .env("__OPENSHELL_VM_REEXEC", "1") - .status(); - match err { - Ok(status) => std::process::exit(status.code().unwrap_or(1)), - Err(e) => { - eprintln!("Error: failed to re-exec with DYLD_LIBRARY_PATH: {e}"); - std::process::exit(1); - } + if std::env::var_os("__OPENSHELL_VM_REEXEC").is_none() + && let Ok(runtime_dir) = openshell_vm::configured_runtime_dir() + { + let needs_reexec = std::env::var_os("DYLD_LIBRARY_PATH").is_none_or(|v| { + !v.to_string_lossy() + .contains(runtime_dir.to_str().unwrap_or("")) + }); + if needs_reexec { + let mut dyld_paths = vec![runtime_dir]; + if let Some(existing) = std::env::var_os("DYLD_LIBRARY_PATH") { + dyld_paths.extend(std::env::split_paths(&existing)); + } + let joined = std::env::join_paths(&dyld_paths).expect("join DYLD_LIBRARY_PATH"); + let exe = std::env::current_exe().expect("current_exe"); + let args: Vec = std::env::args().skip(1).collect(); + let err = std::process::Command::new(exe) + .args(&args) + .env("DYLD_LIBRARY_PATH", &joined) + .env("__OPENSHELL_VM_REEXEC", "1") + .status(); + match err { + Ok(status) => std::process::exit(status.code().unwrap_or(1)), + Err(e) => { + eprintln!("Error: failed to re-exec with DYLD_LIBRARY_PATH: {e}"); + std::process::exit(1); } } } diff --git a/crates/openshell-vm/tests/gateway_integration.rs b/crates/openshell-vm/tests/gateway_integration.rs index 7ababb42f..fabfd74c8 100644 --- a/crates/openshell-vm/tests/gateway_integration.rs +++ b/crates/openshell-vm/tests/gateway_integration.rs @@ -62,7 +62,7 @@ fn assert_runtime_bundle_staged() { /// Boot the full `OpenShell` gateway and verify the gRPC service becomes /// reachable on port 30051. #[test] -#[ignore] // requires libkrun + rootfs +#[ignore = "requires libkrun + rootfs"] fn gateway_boots_and_service_becomes_reachable() { codesign_if_needed(); assert_runtime_bundle_staged(); @@ -87,7 +87,7 @@ fn gateway_boots_and_service_becomes_reachable() { } // Tear down regardless of result. - let _ = unsafe { libc::kill(child.id() as i32, libc::SIGTERM) }; + let _ = unsafe { libc::kill(child.id().cast_signed(), libc::SIGTERM) }; let _ = child.wait(); assert!( @@ -99,7 +99,7 @@ fn gateway_boots_and_service_becomes_reachable() { /// Run a trivial command inside the VM via `--exec` and verify it exits /// successfully, proving the VM boots and can execute guest processes. #[test] -#[ignore] // requires libkrun + rootfs +#[ignore = "requires libkrun + rootfs"] fn gateway_exec_runs_guest_command() { codesign_if_needed(); assert_runtime_bundle_staged(); @@ -119,7 +119,7 @@ fn gateway_exec_runs_guest_command() { /// Boot the VM, then use `openshell-vm exec` against the running instance. #[test] -#[ignore] // requires libkrun + rootfs +#[ignore = "requires libkrun + rootfs"] fn gateway_exec_attaches_to_running_vm() { codesign_if_needed(); assert_runtime_bundle_staged(); @@ -143,7 +143,7 @@ fn gateway_exec_attaches_to_running_vm() { .output() .expect("failed to run openshell-vm exec"); - let _ = unsafe { libc::kill(child.id() as i32, libc::SIGTERM) }; + let _ = unsafe { libc::kill(child.id().cast_signed(), libc::SIGTERM) }; let _ = child.wait(); assert!( diff --git a/e2e/rust/src/harness/binary.rs b/e2e/rust/src/harness/binary.rs index a9aa785d7..13b8fa451 100644 --- a/e2e/rust/src/harness/binary.rs +++ b/e2e/rust/src/harness/binary.rs @@ -32,7 +32,8 @@ pub fn openshell_bin() -> PathBuf { let bin = workspace_root().join("target/debug/openshell"); assert!( bin.is_file(), - "openshell binary not found at {bin:?} — run `cargo build -p openshell-cli` first" + "openshell binary not found at {} — run `cargo build -p openshell-cli` first", + bin.display() ); bin } diff --git a/e2e/rust/src/harness/sandbox.rs b/e2e/rust/src/harness/sandbox.rs index 9141f8e2f..278c16f53 100644 --- a/e2e/rust/src/harness/sandbox.rs +++ b/e2e/rust/src/harness/sandbox.rs @@ -153,10 +153,10 @@ impl SandboxGuard { accumulated.push('\n'); // Try to extract the sandbox name from the header. - if name.is_none() { - if let Some(n) = extract_sandbox_name(&accumulated) { - name = Some(n); - } + if name.is_none() + && let Some(n) = extract_sandbox_name(&accumulated) + { + name = Some(n); } // Check for the ready marker. diff --git a/e2e/rust/tests/cf_auth_smoke.rs b/e2e/rust/tests/cf_auth_smoke.rs index 43a2f96c5..aa46604fb 100644 --- a/e2e/rust/tests/cf_auth_smoke.rs +++ b/e2e/rust/tests/cf_auth_smoke.rs @@ -147,7 +147,7 @@ async fn gateway_login_help_is_recognized() { // ------------------------------------------------------------------- /// `openshell gateway add ` (cloud gateway) should: -/// - Create cluster metadata with auth_mode = "cloudflare_jwt" +/// - Create cluster metadata with `auth_mode` = `"cloudflare_jwt"` /// - Set the gateway as active /// - Attempt browser authentication (which will fail in CI — non-fatal) #[tokio::test] diff --git a/e2e/rust/tests/docker_preflight.rs b/e2e/rust/tests/docker_preflight.rs index 0b51666e0..d6e125b4b 100644 --- a/e2e/rust/tests/docker_preflight.rs +++ b/e2e/rust/tests/docker_preflight.rs @@ -86,7 +86,7 @@ async fn gateway_start_error_mentions_docker() { } /// When Docker is unavailable, the error output should include guidance -/// about DOCKER_HOST since that's the likely fix for non-default runtimes. +/// about `DOCKER_HOST` since that's the likely fix for non-default runtimes. #[tokio::test] async fn gateway_start_error_mentions_docker_host() { let (output, code, _) = run_without_docker(&["gateway", "start"]).await; diff --git a/e2e/rust/tests/sandbox_labels.rs b/e2e/rust/tests/sandbox_labels.rs index 5b682ed26..018a27615 100644 --- a/e2e/rust/tests/sandbox_labels.rs +++ b/e2e/rust/tests/sandbox_labels.rs @@ -115,6 +115,7 @@ async fn delete_sandbox(name: &str) { } #[tokio::test] +#[allow(clippy::too_many_lines)] // end-to-end test exercises full label lifecycle async fn sandbox_labels_are_stored_and_filterable() { // Create sandboxes with different labels let name1 = create_sandbox_with_labels( @@ -195,7 +196,7 @@ async fn sandbox_labels_are_stored_and_filterable() { assert_eq!( dev_backend_sandboxes .iter() - .filter(|name| [&name1, &name2, &name3, &name4].contains(&name)) + .filter(|name| [&name1, &name2, &name3, &name4].contains(name)) .count(), 1, "env=dev,team=backend filter should return exactly 1 sandbox, got: {dev_backend_sandboxes:?}" diff --git a/tasks/rust.toml b/tasks/rust.toml index 69214ce7f..2972957fc 100644 --- a/tasks/rust.toml +++ b/tasks/rust.toml @@ -9,8 +9,11 @@ run = "cargo check --workspace" hide = true ["rust:lint"] -description = "Lint Rust code with Clippy" -run = "cargo clippy --workspace --all-targets" +description = "Lint Rust code with Clippy (deny warnings)" +run = [ + "cargo clippy --workspace --all-targets -- -D warnings", + "cargo clippy --manifest-path e2e/rust/Cargo.toml --all-targets -- -D warnings", +] hide = true ["rust:format"] From ea8e8715dfa35a2ab8e08a18b4f43ad632b20fae Mon Sep 17 00:00:00 2001 From: Drew Newberry Date: Tue, 28 Apr 2026 18:16:47 -0700 Subject: [PATCH 2/4] fix(clippy): address rust 1.95 clippy lints CI uses rustc 1.95 which catches lints not present in 1.93. Apply suggested fixes: - map(...).unwrap_or(false) -> is_ok_and(...) - map(f).unwrap_or(default) -> map_or(default, f) - Drop unnecessary trailing commas in macro args - Replace manual is_some_and pattern with .is_ok_and(...) - Replace sort_by(|a,b| a.cmp(b)) with sort_by_key - Add module-level allow(result_large_err) on driver gRPC handlers that stream tonic::Status, mirroring openshell-server's grpc/* files. --- crates/openshell-bootstrap/src/docker.rs | 5 +- crates/openshell-cli/src/auth.rs | 3 +- crates/openshell-cli/src/main.rs | 6 +- crates/openshell-cli/src/run.rs | 12 +- crates/openshell-cli/src/ssh.rs | 3 +- crates/openshell-core/src/paths.rs | 4 +- crates/openshell-driver-docker/src/lib.rs | 2 +- .../openshell-driver-kubernetes/src/grpc.rs | 2 + crates/openshell-driver-podman/src/config.rs | 13 +- crates/openshell-driver-podman/src/grpc.rs | 2 + crates/openshell-driver-vm/build.rs | 2 +- crates/openshell-driver-vm/src/driver.rs | 26 +-- .../src/embedded_runtime.rs | 2 +- crates/openshell-driver-vm/src/gpu.rs | 29 ++-- crates/openshell-driver-vm/src/main.rs | 2 + crates/openshell-driver-vm/src/rootfs.rs | 4 +- crates/openshell-driver-vm/src/runtime.rs | 53 ++++--- .../openshell-sandbox/src/bypass_monitor.rs | 8 + .../src/denial_aggregator.rs | 3 +- crates/openshell-sandbox/src/lib.rs | 44 +++-- crates/openshell-sandbox/src/opa.rs | 24 +-- crates/openshell-sandbox/src/process.rs | 2 +- crates/openshell-sandbox/src/procfs.rs | 26 +-- crates/openshell-sandbox/src/proxy.rs | 8 + .../src/sandbox/linux/landlock.rs | 3 +- .../src/sandbox/linux/mod.rs | 150 +++++++++--------- .../src/sandbox/linux/netns.rs | 77 +++++---- .../src/sandbox/linux/seccomp.rs | 23 ++- crates/openshell-sandbox/src/sandbox/mod.rs | 3 + crates/openshell-sandbox/src/ssh.rs | 5 +- crates/openshell-server/src/compute/mod.rs | 8 +- crates/openshell-server/src/grpc/sandbox.rs | 2 +- .../openshell-server/src/persistence/mod.rs | 16 +- crates/openshell-server/src/policy_store.rs | 13 +- crates/openshell-tui/src/app.rs | 118 +++++--------- crates/openshell-tui/src/event.rs | 18 +-- crates/openshell-vfio/src/lib.rs | 3 +- crates/openshell-vm/build.rs | 2 +- crates/openshell-vm/src/embedded.rs | 2 +- crates/openshell-vm/src/exec.rs | 43 +++-- crates/openshell-vm/src/lib.rs | 4 +- 41 files changed, 387 insertions(+), 388 deletions(-) diff --git a/crates/openshell-bootstrap/src/docker.rs b/crates/openshell-bootstrap/src/docker.rs index c0a4459a9..0f7129470 100644 --- a/crates/openshell-bootstrap/src/docker.rs +++ b/crates/openshell-bootstrap/src/docker.rs @@ -749,10 +749,7 @@ pub async fn ensure_container( // When OPENSHELL_PUSH_IMAGES is set the entrypoint overrides the baked-in // HelmChart manifest so k3s uses the locally-pushed images with // IfNotPresent pull policy instead of pulling from the remote registry. - let push_mode = std::env::var("OPENSHELL_PUSH_IMAGES") - .ok() - .filter(|v| !v.trim().is_empty()) - .is_some(); + let push_mode = std::env::var("OPENSHELL_PUSH_IMAGES").is_ok_and(|v| !v.trim().is_empty()); let effective_tag = std::env::var("IMAGE_TAG") .ok() .filter(|v| !v.trim().is_empty()) diff --git a/crates/openshell-cli/src/auth.rs b/crates/openshell-cli/src/auth.rs index 509679f33..a5ae991df 100644 --- a/crates/openshell-cli/src/auth.rs +++ b/crates/openshell-cli/src/auth.rs @@ -94,8 +94,7 @@ pub async fn browser_auth_flow(gateway_endpoint: &str) -> Result { // listener, spawns a callback server, and waits the full AUTH_TIMEOUT // (120 s) for a POST that will never arrive. let no_browser = std::env::var("OPENSHELL_NO_BROWSER") - .map(|v| v == "1" || v.eq_ignore_ascii_case("true")) - .unwrap_or(false); + .is_ok_and(|v| v == "1" || v.eq_ignore_ascii_case("true")); if no_browser { return Err(miette::miette!( "authentication skipped (OPENSHELL_NO_BROWSER is set).\n\ diff --git a/crates/openshell-cli/src/main.rs b/crates/openshell-cli/src/main.rs index 89940ef5b..57f3dbc84 100644 --- a/crates/openshell-cli/src/main.rs +++ b/crates/openshell-cli/src/main.rs @@ -151,7 +151,7 @@ fn resolve_sandbox_name(name: Option, gateway: &str) -> Result { Specify a sandbox name or connect to one first: nav sandbox connect " ) })?; - eprintln!("{} Using sandbox '{}' (last used)", "→".bold(), last.bold(),); + eprintln!("{} Using sandbox '{}' (last used)", "→".bold(), last.bold()); Ok(last) } @@ -1874,7 +1874,7 @@ async fn main() -> Result<()> { } else { println!("{}", "Gateway Status".cyan().bold()); println!(); - println!(" {} No gateway configured.", "Status:".dimmed(),); + println!(" {} No gateway configured.", "Status:".dimmed()); println!(); println!( "Deploy a gateway with: {}", @@ -1897,7 +1897,7 @@ async fn main() -> Result<()> { eprintln!("→ Found forward on sandbox '{n}'"); n } else { - eprintln!("{} No active forward found for port {port}", "!".yellow(),); + eprintln!("{} No active forward found for port {port}", "!".yellow()); return Ok(()); } } diff --git a/crates/openshell-cli/src/run.rs b/crates/openshell-cli/src/run.rs index eb7ae3122..8f96124aa 100644 --- a/crates/openshell-cli/src/run.rs +++ b/crates/openshell-cli/src/run.rs @@ -429,9 +429,7 @@ const CLUSTER_DEPLOY_LOG_LINES: usize = 15; /// Return the current terminal width, falling back to 80 columns. fn term_width() -> usize { - crossterm::terminal::size() - .map(|(w, _)| w as usize) - .unwrap_or(80) + crossterm::terminal::size().map_or(80, |(w, _)| w as usize) } /// Build a horizontal rule of `─` characters with an optional centered label. @@ -446,7 +444,7 @@ fn horizontal_rule(label: Option<&str>, width: usize) -> String { let remaining = width - text_len; let left = remaining / 2; let right = remaining - left; - format!("{}{}{}", "─".repeat(left), text_with_pad, "─".repeat(right),) + format!("{}{}{}", "─".repeat(left), text_with_pad, "─".repeat(right)) } None => "─".repeat(width), } @@ -1182,7 +1180,7 @@ pub async fn gateway_login(name: &str) -> Result<()> { let token = crate::auth::browser_auth_flow(&metadata.gateway_endpoint).await?; openshell_bootstrap::edge_token::store_edge_token(name, &token)?; - eprintln!("{} Authenticated to gateway '{name}'", "✓".green().bold(),); + eprintln!("{} Authenticated to gateway '{name}'", "✓".green().bold()); Ok(()) } @@ -2451,7 +2449,7 @@ pub async fn sandbox_create( ) .await?; } - eprintln!(" {} Files uploaded", "\u{2713}".green().bold(),); + eprintln!(" {} Files uploaded", "\u{2713}".green().bold()); } // If --forward was requested, start the background port forward @@ -4599,7 +4597,7 @@ pub async fn gateway_setting_delete( response.settings_revision ); } else { - println!("{} Global setting {} not found", "!".yellow(), key,); + println!("{} Global setting {} not found", "!".yellow(), key); } Ok(()) } diff --git a/crates/openshell-cli/src/ssh.rs b/crates/openshell-cli/src/ssh.rs index 19dd78389..45e98b359 100644 --- a/crates/openshell-cli/src/ssh.rs +++ b/crates/openshell-cli/src/ssh.rs @@ -512,8 +512,7 @@ fn write_upload_archive(writer: W, source: UploadSource) -> Result<()> let full_path = base_dir.join(file); let archive_path = archive_prefix .as_ref() - .map(|prefix| prefix.join(file)) - .unwrap_or_else(|| PathBuf::from(file)); + .map_or_else(|| PathBuf::from(file), |prefix| prefix.join(file)); if full_path.is_file() { archive .append_path_with_name(&full_path, &archive_path) diff --git a/crates/openshell-core/src/paths.rs b/crates/openshell-core/src/paths.rs index fd0a141b3..00104f3c2 100644 --- a/crates/openshell-core/src/paths.rs +++ b/crates/openshell-core/src/paths.rs @@ -105,9 +105,7 @@ pub fn ensure_parent_dir_restricted(path: &Path) -> Result<()> { #[cfg(unix)] pub fn is_file_permissions_too_open(path: &Path) -> bool { use std::os::unix::fs::PermissionsExt; - std::fs::metadata(path) - .map(|m| m.permissions().mode() & 0o077 != 0) - .unwrap_or(false) + std::fs::metadata(path).is_ok_and(|m| m.permissions().mode() & 0o077 != 0) } #[cfg(test)] diff --git a/crates/openshell-driver-docker/src/lib.rs b/crates/openshell-driver-docker/src/lib.rs index d9683d416..525cb2c74 100644 --- a/crates/openshell-driver-docker/src/lib.rs +++ b/crates/openshell-driver-docker/src/lib.rs @@ -1607,7 +1607,7 @@ fn write_cache_binary_atomic(final_path: &Path, bytes: &[u8]) -> CoreResult<()> )) })?; temp.as_file().sync_all().map_err(|err| { - Error::config(format!("failed to sync supervisor binary temp file: {err}",)) + Error::config(format!("failed to sync supervisor binary temp file: {err}")) })?; #[cfg(unix)] diff --git a/crates/openshell-driver-kubernetes/src/grpc.rs b/crates/openshell-driver-kubernetes/src/grpc.rs index 75e131d41..51488f694 100644 --- a/crates/openshell-driver-kubernetes/src/grpc.rs +++ b/crates/openshell-driver-kubernetes/src/grpc.rs @@ -1,6 +1,8 @@ // SPDX-FileCopyrightText: Copyright (c) 2025-2026 NVIDIA CORPORATION & AFFILIATES. All rights reserved. // SPDX-License-Identifier: Apache-2.0 +#![allow(clippy::result_large_err)] // gRPC handlers return Result<_, tonic::Status> + use futures::{Stream, StreamExt}; use openshell_core::proto::compute::v1::{ CreateSandboxRequest, CreateSandboxResponse, DeleteSandboxRequest, DeleteSandboxResponse, diff --git a/crates/openshell-driver-podman/src/config.rs b/crates/openshell-driver-podman/src/config.rs index e90eae3e7..1586002ab 100644 --- a/crates/openshell-driver-podman/src/config.rs +++ b/crates/openshell-driver-podman/src/config.rs @@ -121,12 +121,13 @@ impl PodmanComputeConfig { } #[cfg(target_os = "linux")] { - if let Ok(xdg) = std::env::var("XDG_RUNTIME_DIR") { - PathBuf::from(xdg).join("podman/podman.sock") - } else { - let uid = nix::unistd::getuid(); - PathBuf::from(format!("/run/user/{uid}/podman/podman.sock")) - } + std::env::var("XDG_RUNTIME_DIR").map_or_else( + |_| { + let uid = nix::unistd::getuid(); + PathBuf::from(format!("/run/user/{uid}/podman/podman.sock")) + }, + |xdg| PathBuf::from(xdg).join("podman/podman.sock"), + ) } } } diff --git a/crates/openshell-driver-podman/src/grpc.rs b/crates/openshell-driver-podman/src/grpc.rs index 2b413b5e4..df4c90d13 100644 --- a/crates/openshell-driver-podman/src/grpc.rs +++ b/crates/openshell-driver-podman/src/grpc.rs @@ -1,6 +1,8 @@ // SPDX-FileCopyrightText: Copyright (c) 2025-2026 NVIDIA CORPORATION & AFFILIATES. All rights reserved. // SPDX-License-Identifier: Apache-2.0 +#![allow(clippy::result_large_err)] // gRPC handlers return Result<_, tonic::Status> + use futures::{Stream, StreamExt}; use openshell_core::proto::compute::v1::{ CreateSandboxRequest, CreateSandboxResponse, DeleteSandboxRequest, DeleteSandboxResponse, diff --git a/crates/openshell-driver-vm/build.rs b/crates/openshell-driver-vm/build.rs index 6b9800ef6..e10a1dde0 100644 --- a/crates/openshell-driver-vm/build.rs +++ b/crates/openshell-driver-vm/build.rs @@ -126,7 +126,7 @@ fn main() { e ) }); - let size = fs::metadata(&dst_path).map(|m| m.len()).unwrap_or(0); + let size = fs::metadata(&dst_path).map_or(0, |m| m.len()); println!("cargo:warning=Embedded {src_name}: {size} bytes"); } diff --git a/crates/openshell-driver-vm/src/driver.rs b/crates/openshell-driver-vm/src/driver.rs index a0c69cef6..704f91610 100644 --- a/crates/openshell-driver-vm/src/driver.rs +++ b/crates/openshell-driver-vm/src/driver.rs @@ -277,6 +277,9 @@ impl VmDriver { validate_vm_sandbox(sandbox, self.config.gpu_enabled) } + // `tonic::Status` is large but is the standard error type across the + // gRPC API surface; boxing here would diverge from every other handler. + #[allow(clippy::result_large_err)] pub async fn create_sandbox(&self, sandbox: &Sandbox) -> Result { validate_vm_sandbox(sandbox, self.config.gpu_enabled)?; @@ -327,7 +330,7 @@ impl VmDriver { .map_err(|e| Status::internal(format!("GPU inventory lock poisoned: {e}"))) .and_then(|mut inv| { inv.assign(&sandbox.id, gpu_device) - .map_err(|e| Status::failed_precondition(e)) + .map_err(Status::failed_precondition) }) { Ok(assignment) => { tracing::info!( @@ -361,7 +364,7 @@ impl VmDriver { // Compute the endpoint override before building the env so // there is a single OPENSHELL_ENDPOINT value in the env list. - let endpoint_override = if gpu_bdf.is_some() { + let endpoint_override = if let Some(bdf) = gpu_bdf.as_ref() { let subnet = match self .subnet_allocator .lock() @@ -369,7 +372,7 @@ impl VmDriver { .and_then(|mut alloc| { alloc .allocate(&sandbox.id) - .map_err(|e| Status::failed_precondition(e)) + .map_err(Status::failed_precondition) }) { Ok(s) => s, Err(err) => { @@ -398,7 +401,7 @@ impl VmDriver { command .arg("--vm-mem-mib") .arg(self.config.gpu_mem_mib.to_string()); - command.arg("--vm-gpu-bdf").arg(gpu_bdf.as_ref().unwrap()); + command.arg("--vm-gpu-bdf").arg(bdf); command.arg("--vm-tap-device").arg(&tap); command .arg("--vm-guest-ip") @@ -574,10 +577,10 @@ impl VmDriver { } fn release_gpu_and_subnet(&self, sandbox_id: &str) { - if let Some(ref inventory) = self.gpu_inventory { - if let Ok(mut inv) = inventory.lock() { - inv.release(sandbox_id); - } + if let Some(inventory) = self.gpu_inventory.as_ref() + && let Ok(mut inv) = inventory.lock() + { + inv.release(sandbox_id); } if let Ok(mut alloc) = self.subnet_allocator.lock() { alloc.release(sandbox_id); @@ -963,9 +966,10 @@ fn build_guest_environment( config: &VmDriverConfig, endpoint_override: Option<&str>, ) -> Vec { - let openshell_endpoint = endpoint_override - .map(String::from) - .unwrap_or_else(|| guest_visible_openshell_endpoint(&config.openshell_endpoint)); + let openshell_endpoint = endpoint_override.map_or_else( + || guest_visible_openshell_endpoint(&config.openshell_endpoint), + String::from, + ); let mut environment = HashMap::from([ ("HOME".to_string(), "/root".to_string()), ( diff --git a/crates/openshell-driver-vm/src/embedded_runtime.rs b/crates/openshell-driver-vm/src/embedded_runtime.rs index a59c80b45..70626edd9 100644 --- a/crates/openshell-driver-vm/src/embedded_runtime.rs +++ b/crates/openshell-driver-vm/src/embedded_runtime.rs @@ -115,7 +115,7 @@ pub fn validate_runtime_dir(dir: &Path) -> Result<(), String> { if !path.is_file() { return Err(format!("missing runtime file: {}", path.display())); } - let size = fs::metadata(path).map(|m| m.len()).unwrap_or(0); + let size = fs::metadata(path).map_or(0, |m| m.len()); if size == 0 { return Err(format!("runtime file is empty (stub): {}", path.display())); } diff --git a/crates/openshell-driver-vm/src/gpu.rs b/crates/openshell-driver-vm/src/gpu.rs index 9089a166b..dc5883b5b 100644 --- a/crates/openshell-driver-vm/src/gpu.rs +++ b/crates/openshell-driver-vm/src/gpu.rs @@ -50,14 +50,17 @@ impl GpuInventory { } pub fn gpu_count(&self) -> u32 { - self.slots.len() as u32 + u32::try_from(self.slots.len()).unwrap_or(u32::MAX) } pub fn available_count(&self) -> u32 { - self.slots - .iter() - .filter(|s| s.assigned_to.is_none()) - .count() as u32 + u32::try_from( + self.slots + .iter() + .filter(|s| s.assigned_to.is_none()) + .count(), + ) + .unwrap_or(u32::MAX) } /// Assign a GPU to a sandbox. Returns the assignment details including BDF. @@ -136,7 +139,7 @@ impl GpuInventory { sandbox_id: id.clone(), bound_at_ms: std::time::SystemTime::now() .duration_since(std::time::UNIX_EPOCH) - .map_or(0, |d| d.as_millis() as i64), + .map_or(0, |d| i64::try_from(d.as_millis()).unwrap_or(i64::MAX)), }) }) .collect(); @@ -188,7 +191,7 @@ impl SubnetAllocator { let pool_size = 1u32 << (32 - self.prefix_len); let max_subnets = pool_size / 4; - if self.allocated.len() as u32 >= max_subnets { + if u32::try_from(self.allocated.len()).unwrap_or(u32::MAX) >= max_subnets { return Err("subnet pool exhausted".to_string()); } @@ -235,10 +238,10 @@ pub fn allocate_vsock_cid() -> u32 { /// Generate a locally-administered MAC from sandbox ID using FNV-1a. pub fn mac_from_sandbox_id(sandbox_id: &str) -> [u8; 6] { - let mut hash: u64 = 0xcbf29ce484222325; + let mut hash: u64 = 0xcbf2_9ce4_8422_2325; for byte in sandbox_id.as_bytes() { hash ^= u64::from(*byte); - hash = hash.wrapping_mul(0x100000001b3); + hash = hash.wrapping_mul(0x0000_0100_0000_01b3); } let bytes = hash.to_le_bytes(); let mut mac = [bytes[0], bytes[1], bytes[2], bytes[3], bytes[4], bytes[5]]; @@ -248,8 +251,12 @@ pub fn mac_from_sandbox_id(sandbox_id: &str) -> [u8; 6] { /// TAP device name from sandbox ID (fits `IFNAMSIZ=16`). pub fn tap_device_name(sandbox_id: &str) -> String { - let end = sandbox_id.len().min(8); - let end = sandbox_id.floor_char_boundary(end); + let mut end = sandbox_id.len().min(8); + // Walk back to a UTF-8 char boundary (str::floor_char_boundary requires + // Rust 1.91 — we still build on older toolchains). + while end > 0 && !sandbox_id.is_char_boundary(end) { + end -= 1; + } let prefix = &sandbox_id[..end]; format!("vmtap-{prefix}") } diff --git a/crates/openshell-driver-vm/src/main.rs b/crates/openshell-driver-vm/src/main.rs index 94169a61f..ca1842596 100644 --- a/crates/openshell-driver-vm/src/main.rs +++ b/crates/openshell-driver-vm/src/main.rs @@ -301,6 +301,8 @@ fn maybe_reexec_internal_vm_with_runtime_env() -> Result<()> { } #[cfg(not(target_os = "macos"))] +// Signature must match the macOS variant which can fail. +#[allow(clippy::unnecessary_wraps)] fn maybe_reexec_internal_vm_with_runtime_env() -> Result<()> { Ok(()) } diff --git a/crates/openshell-driver-vm/src/rootfs.rs b/crates/openshell-driver-vm/src/rootfs.rs index ef3e3409d..7c7fa19f4 100644 --- a/crates/openshell-driver-vm/src/rootfs.rs +++ b/crates/openshell-driver-vm/src/rootfs.rs @@ -41,9 +41,7 @@ fn extract_variant(blob: &[u8], variant: &str, empty_msg: &str, dest: &Path) -> let marker_path = dest.join(ROOTFS_VARIANT_MARKER); if dest.is_dir() - && fs::read_to_string(&marker_path) - .map(|value| value.trim() == expected_marker) - .unwrap_or(false) + && fs::read_to_string(&marker_path).is_ok_and(|value| value.trim() == expected_marker) { return Ok(()); } diff --git a/crates/openshell-driver-vm/src/runtime.rs b/crates/openshell-driver-vm/src/runtime.rs index 09576bee0..c063da10b 100644 --- a/crates/openshell-driver-vm/src/runtime.rs +++ b/crates/openshell-driver-vm/src/runtime.rs @@ -24,7 +24,7 @@ static CHILD_PID: AtomicI32 = AtomicI32::new(0); /// launcher (especially on macOS where `PR_SET_PDEATHSIG` is absent). static GVPROXY_PID: AtomicI32 = AtomicI32::new(0); -#[derive(Debug, Clone, PartialEq)] +#[derive(Debug, Clone, PartialEq, Eq)] pub enum VmBackend { Libkrun, Qemu, @@ -168,7 +168,7 @@ fn run_qemu_vm(config: &VmLaunchConfig) -> Result<(), String> { let virtiofsd_child = virtiofsd_cmd .spawn() .map_err(|e| format!("failed to start virtiofsd: {e}"))?; - let virtiofsd_pid = virtiofsd_child.id() as i32; + let virtiofsd_pid = virtiofsd_child.id().cast_signed(); GVPROXY_PID.store(virtiofsd_pid, Ordering::Relaxed); let mut virtiofsd_guard = GvproxyGuard::new(virtiofsd_child); @@ -254,7 +254,7 @@ fn run_qemu_vm(config: &VmLaunchConfig) -> Result<(), String> { .spawn() .map_err(|e| format!("failed to start QEMU: {e}"))?; - let qemu_pid = qemu_child.id() as i32; + let qemu_pid = qemu_child.id().cast_signed(); install_signal_forwarding(qemu_pid); let status = qemu_child @@ -288,7 +288,8 @@ fn write_guest_env_file(rootfs: &Path, env_vars: &[String]) -> Result<(), String let mut content = String::new(); for var in env_vars { if let Some((key, value)) = var.split_once('=') { - content.push_str(&format!("export {key}=\"{}\"\n", shell_escape(value))); + use std::fmt::Write as _; + let _ = writeln!(content, "export {key}=\"{}\"", shell_escape(value)); } } std::fs::write(&env_file, &content).map_err(|e| format!("write guest env file: {e}"))?; @@ -315,12 +316,12 @@ fn build_kernel_cmdline(config: &VmLaunchConfig) -> String { format!("init={}", config.exec_path), ]; - if let Some(ip) = &config.guest_ip { - if let Some(host_ip) = &config.host_ip { - parts.push(format!("ip={ip}::{host_ip}:255.255.255.252:sandbox::off")); - parts.push(format!("VM_NET_IP={ip}")); - parts.push(format!("VM_NET_GW={host_ip}")); - } + if let Some(ip) = &config.guest_ip + && let Some(host_ip) = &config.host_ip + { + parts.push(format!("ip={ip}::{host_ip}:255.255.255.252:sandbox::off")); + parts.push(format!("VM_NET_IP={ip}")); + parts.push(format!("VM_NET_GW={host_ip}")); } if let Some(dns) = host_dns_server() { @@ -358,10 +359,11 @@ fn host_dns_server() -> Option { None } -/// Remove leftover `vmtap-*` interfaces from previous driver runs that -/// were not torn down (e.g. the launcher was SIGKILLed before teardown). -/// Called once at driver startup so stale interfaces cannot cause subnet -/// routing conflicts with newly allocated TAPs. +/// Remove leftover `vmtap-*` interfaces from previous driver runs. +/// +/// Called once at driver startup for interfaces that were not torn down +/// (e.g. the launcher was `SIGKILL`-ed before teardown), so stale +/// interfaces cannot cause subnet routing conflicts with newly allocated TAPs. pub fn cleanup_stale_tap_interfaces() { let Ok(entries) = std::fs::read_dir("/sys/class/net") else { return; @@ -400,10 +402,10 @@ fn read_tap_host_ip(device: &str) -> Option { let stdout = String::from_utf8_lossy(&output.stdout); // Format: "28: vmtap-xxx inet 10.0.128.1/30 ..." for token in stdout.split_whitespace() { - if let Some((ip, _prefix)) = token.split_once('/') { - if ip.parse::().is_ok() { - return Some(ip.to_string()); - } + if let Some((ip, _prefix)) = token.split_once('/') + && ip.parse::().is_ok() + { + return Some(ip.to_string()); } } None @@ -566,13 +568,14 @@ fn teardown_tap_networking(tap_device: &str, host_ip: &str, gateway_port: u16) { } fn tap_subnet_from_host_ip(host_ip: &str) -> String { - if let Ok(ip) = host_ip.parse::() { - let base = u32::from(ip) & !3; - let base_ip = std::net::Ipv4Addr::from(base); - format!("{base_ip}/30") - } else { - format!("{host_ip}/30") - } + host_ip.parse::().map_or_else( + |_| format!("{host_ip}/30"), + |ip| { + let base = u32::from(ip) & !3; + let base_ip = std::net::Ipv4Addr::from(base); + format!("{base_ip}/30") + }, + ) } fn enable_ip_forwarding() -> Result<(), String> { diff --git a/crates/openshell-sandbox/src/bypass_monitor.rs b/crates/openshell-sandbox/src/bypass_monitor.rs index 3cef9e4d6..1a7ec5f99 100644 --- a/crates/openshell-sandbox/src/bypass_monitor.rs +++ b/crates/openshell-sandbox/src/bypass_monitor.rs @@ -512,6 +512,8 @@ mod tests { let (_accepted, _) = listener.accept().expect("accept"); let fd = stream.as_raw_fd(); + // libc/syscall FFI requires unsafe + #[allow(unsafe_code)] unsafe { let flags = libc::fcntl(fd, libc::F_GETFD); assert!(flags >= 0, "F_GETFD failed"); @@ -525,9 +527,13 @@ mod tests { let sleep_path = CString::new("/bin/sleep").unwrap(); let arg0 = CString::new("sleep").unwrap(); let arg1 = CString::new("30").unwrap(); + // libc/syscall FFI requires unsafe + #[allow(unsafe_code)] let child_pid = unsafe { libc::fork() }; assert!(child_pid >= 0, "fork failed"); if child_pid == 0 { + // libc/syscall FFI requires unsafe + #[allow(unsafe_code)] unsafe { libc::execl( sleep_path.as_ptr(), @@ -555,6 +561,8 @@ mod tests { let (binary, pid, ancestors) = resolve_process_identity(std::process::id(), peer_port); + // libc/syscall FFI requires unsafe + #[allow(unsafe_code)] unsafe { libc::kill(child_pid, libc::SIGKILL); libc::waitpid(child_pid, std::ptr::null_mut(), 0); diff --git a/crates/openshell-sandbox/src/denial_aggregator.rs b/crates/openshell-sandbox/src/denial_aggregator.rs index 175f31afc..d64be7f1d 100644 --- a/crates/openshell-sandbox/src/denial_aggregator.rs +++ b/crates/openshell-sandbox/src/denial_aggregator.rs @@ -221,6 +221,5 @@ pub struct FlushableL7Sample { fn current_time_ms() -> i64 { SystemTime::now() .duration_since(UNIX_EPOCH) - .map(|d| i64::try_from(d.as_millis()).unwrap_or(i64::MAX)) - .unwrap_or(0) + .map_or(0, |d| i64::try_from(d.as_millis()).unwrap_or(i64::MAX)) } diff --git a/crates/openshell-sandbox/src/lib.rs b/crates/openshell-sandbox/src/lib.rs index f4e87fffa..fbd7460e2 100644 --- a/crates/openshell-sandbox/src/lib.rs +++ b/crates/openshell-sandbox/src/lib.rs @@ -26,12 +26,14 @@ mod supervisor_session; use miette::{IntoDiagnostic, Result}; #[cfg(target_os = "linux")] use std::collections::HashSet; +use std::future::Future; use std::net::SocketAddr; use std::sync::Arc; +use std::sync::LazyLock; +#[cfg(any(target_os = "linux", test))] +use std::sync::Mutex; use std::sync::OnceLock; use std::sync::atomic::{AtomicU32, Ordering}; -#[cfg(target_os = "linux")] -use std::sync::{LazyLock, Mutex}; use std::time::Duration; use tokio::time::timeout; use tracing::{debug, info, trace, warn}; @@ -67,16 +69,15 @@ static OCSF_CTX: OnceLock = OnceLock::new(); /// Fallback context used when `OCSF_CTX` has not been initialized (e.g. in /// unit tests that exercise individual functions without calling `run_sandbox`). -static OCSF_CTX_FALLBACK: std::sync::LazyLock = - std::sync::LazyLock::new(|| SandboxContext { - sandbox_id: String::new(), - sandbox_name: String::new(), - container_image: String::new(), - hostname: "test".to_string(), - product_version: openshell_core::VERSION.to_string(), - proxy_ip: std::net::IpAddr::from([127, 0, 0, 1]), - proxy_port: 3128, - }); +static OCSF_CTX_FALLBACK: LazyLock = LazyLock::new(|| SandboxContext { + sandbox_id: String::new(), + sandbox_name: String::new(), + container_image: String::new(), + hostname: "test".to_string(), + product_version: openshell_core::VERSION.to_string(), + proxy_ip: std::net::IpAddr::from([127, 0, 0, 1]), + proxy_port: 3128, +}); /// Return a reference to the process-wide [`SandboxContext`]. /// @@ -492,15 +493,13 @@ pub async fn run_sandbox( // Reads /dev/kmsg for iptables LOG entries and emits structured // tracing events for direct connection attempts that bypass the proxy. #[cfg(target_os = "linux")] - let _bypass_monitor = if netns.is_some() { + let _bypass_monitor = netns.as_ref().and_then(|ns| { bypass_monitor::spawn( - netns.as_ref().expect("netns is Some").name().to_string(), + ns.name().to_string(), entrypoint_pid.clone(), bypass_denial_tx, ) - } else { - None - }; + }); // On non-Linux, bypass_denial_tx is unused (no /dev/kmsg). #[cfg(not(target_os = "linux"))] @@ -597,11 +596,11 @@ pub async fn run_sandbox( } match waitpid(pid, Some(WaitPidFlag::WNOHANG)) { - Ok(WaitStatus::StillAlive) | Err(nix::errno::Errno::ECHILD) => {} + Ok(WaitStatus::StillAlive) + | Err(nix::errno::Errno::ECHILD | nix::errno::Errno::EINTR) => {} Ok(reaped) => { tracing::debug!(?reaped, "Reaped orphaned child process"); } - Err(nix::errno::Errno::EINTR) => {} Err(e) => { tracing::debug!(error = %e, "waitpid error during orphan reap"); break; @@ -1608,10 +1607,10 @@ fn is_retryable_error(err: &miette::Report) -> bool { /// /// Non-transient gRPC errors (e.g. `NOT_FOUND`, `INVALID_ARGUMENT`, /// `PERMISSION_DENIED`) are returned immediately without retrying. -async fn grpc_retry(op_name: &str, f: F) -> miette::Result +async fn grpc_retry(op_name: &str, f: F) -> Result where F: Fn() -> Fut, - Fut: std::future::Future>, + Fut: Future>, { let mut last_err = None; for attempt in 1..=5u32 { @@ -2352,8 +2351,7 @@ mod tests { use std::os::unix::fs::{MetadataExt, symlink}; use temp_env::with_vars; - static ENV_LOCK: std::sync::LazyLock> = - std::sync::LazyLock::new(|| std::sync::Mutex::new(())); + static ENV_LOCK: LazyLock> = LazyLock::new(|| Mutex::new(())); #[test] fn bundle_to_resolved_routes_converts_all_fields() { diff --git a/crates/openshell-sandbox/src/opa.rs b/crates/openshell-sandbox/src/opa.rs index 1e2860d00..841f20ce3 100644 --- a/crates/openshell-sandbox/src/opa.rs +++ b/crates/openshell-sandbox/src/opa.rs @@ -638,7 +638,7 @@ fn normalize_endpoint_ports(data: &mut serde_json::Value) { /// /// Normalize a path by resolving `.` and `..` components without touching /// the filesystem. Only works correctly for absolute paths. -#[cfg(test)] +#[cfg(any(target_os = "linux", test))] fn normalize_path(path: &Path) -> PathBuf { let mut result = PathBuf::new(); for component in path.components() { @@ -664,7 +664,7 @@ fn resolve_binary_in_container(policy_path: &str, entrypoint_pid: u32) -> Option // /proc//root itself (a kernel pseudo-symlink to /) which // strips the prefix we need. read_link only reads the target of // the specified symlink, keeping us in the container's namespace. - let mut resolved = std::path::PathBuf::from(policy_path); + let mut resolved = PathBuf::from(policy_path); // Linux SYMLOOP_MAX is 40; stop before infinite loops for _ in 0..40 { @@ -3680,9 +3680,8 @@ network_policies: #[cfg(target_os = "linux")] fn procfs_root_accessible() -> bool { use std::os::unix::fs::symlink; - let dir = match tempfile::tempdir() { - Ok(d) => d, - Err(_) => return false, + let Ok(dir) = tempfile::tempdir() else { + return false; }; let target = dir.path().join("probe_target"); let link = dir.path().join("probe_link"); @@ -3701,6 +3700,8 @@ network_policies: #[cfg(target_os = "linux")] #[test] fn resolve_binary_with_real_symlink() { + use std::os::unix::fs::symlink; + if !procfs_root_accessible() { eprintln!("Skipping: /proc//root/ not accessible in this environment"); return; @@ -3708,7 +3709,6 @@ network_policies: // Create a real symlink in a temp directory and verify resolution // works through /proc/self/root (which maps to / on the host) - use std::os::unix::fs::symlink; let dir = tempfile::tempdir().unwrap(); let target = dir.path().join("python3.11"); let link = dir.path().join("python3"); @@ -3737,13 +3737,14 @@ network_policies: #[cfg(target_os = "linux")] #[test] fn resolve_binary_non_symlink_returns_none() { + use std::io::Write; + if !procfs_root_accessible() { eprintln!("Skipping: /proc//root/ not accessible in this environment"); return; } // A regular file should return None (no expansion needed) - use std::io::Write; let mut tmp = tempfile::NamedTempFile::new().unwrap(); tmp.write_all(b"regular file").unwrap(); tmp.flush().unwrap(); @@ -3761,13 +3762,14 @@ network_policies: #[cfg(target_os = "linux")] #[test] fn resolve_binary_multi_level_symlink() { + use std::os::unix::fs::symlink; + if !procfs_root_accessible() { eprintln!("Skipping: /proc//root/ not accessible in this environment"); return; } // Test multi-level symlink resolution: python3 -> python3.11 -> cpython3.11 - use std::os::unix::fs::symlink; let dir = tempfile::tempdir().unwrap(); let final_target = dir.path().join("cpython3.11"); let mid_link = dir.path().join("python3.11"); @@ -3792,6 +3794,8 @@ network_policies: #[cfg(target_os = "linux")] #[test] fn from_proto_with_pid_expands_symlinks_in_container() { + use std::os::unix::fs::symlink; + if !procfs_root_accessible() { eprintln!("Skipping: /proc//root/ not accessible in this environment"); return; @@ -3799,7 +3803,6 @@ network_policies: // End-to-end test: create a symlink, build engine with our PID, // verify the resolved path is allowed - use std::os::unix::fs::symlink; let dir = tempfile::tempdir().unwrap(); let target = dir.path().join("node22"); let link = dir.path().join("node"); @@ -3868,6 +3871,8 @@ network_policies: #[cfg(target_os = "linux")] #[test] fn reload_from_proto_with_pid_resolves_symlinks() { + use std::os::unix::fs::symlink; + if !procfs_root_accessible() { eprintln!("Skipping: /proc//root/ not accessible in this environment"); return; @@ -3875,7 +3880,6 @@ network_policies: // Test hot-reload path: initial engine at pid=0, then reload with // real PID to trigger symlink resolution - use std::os::unix::fs::symlink; let dir = tempfile::tempdir().unwrap(); let target = dir.path().join("python3.11"); let link = dir.path().join("python3"); diff --git a/crates/openshell-sandbox/src/process.rs b/crates/openshell-sandbox/src/process.rs index b491b19ff..d13a99a45 100644 --- a/crates/openshell-sandbox/src/process.rs +++ b/crates/openshell-sandbox/src/process.rs @@ -790,7 +790,7 @@ mod tests { #[cfg(target_os = "linux")] #[allow(unsafe_code)] unsafe fn dumpable_flag_probe() -> i64 { - unsafe { libc::prctl(libc::PR_GET_DUMPABLE, 0, 0, 0, 0) as i64 } + unsafe { i64::from(libc::prctl(libc::PR_GET_DUMPABLE, 0, 0, 0, 0)) } } #[test] diff --git a/crates/openshell-sandbox/src/procfs.rs b/crates/openshell-sandbox/src/procfs.rs index f831615c1..988ee2412 100644 --- a/crates/openshell-sandbox/src/procfs.rs +++ b/crates/openshell-sandbox/src/procfs.rs @@ -162,7 +162,7 @@ fn resolve_single_tcp_peer_owner(entrypoint_pid: u32, peer_port: u16) -> Result< /// Like `resolve_tcp_peer_binary`, but also returns the PID that owns the socket. /// -/// Needed for the ancestor walk: we must know the PID to walk `/proc//status` PPid chain. +/// Needed for the ancestor walk: we must know the PID to walk `/proc//status` `PPid` chain. #[cfg(target_os = "linux")] pub fn resolve_tcp_peer_identity(entrypoint_pid: u32, peer_port: u16) -> Result<(PathBuf, u32)> { let owner = resolve_single_tcp_peer_owner(entrypoint_pid, peer_port)?; @@ -423,7 +423,7 @@ fn check_pid_fds(pid: u32, target: &str) -> bool { /// /// Performs a BFS walk of the process tree. If `/proc//task//children` /// is not available (requires `CONFIG_PROC_CHILDREN`), returns only the root PID. -#[cfg(target_os = "linux")] +#[cfg(all(test, target_os = "linux"))] fn collect_descendant_pids(root_pid: u32) -> Vec { collect_descendant_pids_with_depth(root_pid) .into_iter() @@ -514,7 +514,7 @@ mod tests { /// binary for a brief window. Byte-level `starts_with` tolerates the kernel's /// `" (deleted)"` suffix on unlinked executables. #[cfg(target_os = "linux")] - fn wait_for_child_exec(pid: i32, target: &std::path::Path) { + fn wait_for_child_exec(pid: i32, target: &Path) { use std::os::unix::ffi::OsStrExt as _; let target_bytes = target.as_os_str().as_bytes(); let deadline = std::time::Instant::now() + std::time::Duration::from_secs(2); @@ -752,10 +752,7 @@ mod tests { fn collect_descendants_dedupes_pids() { let pid = std::process::id(); let pids = collect_descendant_pids(pid); - let unique = pids - .iter() - .copied() - .collect::>(); + let unique = pids.iter().copied().collect::>(); assert_eq!(pids.len(), unique.len()); } @@ -771,16 +768,20 @@ mod tests { let peer_port = stream.local_addr().unwrap().port(); let (_accepted, _) = listener.accept().expect("accept"); + // libc/syscall FFI requires unsafe + #[allow(unsafe_code)] let child_pid = unsafe { libc::fork() }; assert!(child_pid >= 0, "fork failed"); if child_pid == 0 { + // libc/syscall FFI requires unsafe + #[allow(unsafe_code)] unsafe { libc::sleep(30); libc::_exit(0); } } - let child_pid_u32 = child_pid as u32; + let child_pid_u32 = child_pid.cast_unsigned(); let entrypoint_pid = std::process::id(); let deadline = Instant::now() + Duration::from_secs(2); let owners = loop { @@ -790,18 +791,19 @@ mod tests { .owners .iter() .map(|owner| owner.pid) - .collect::>(); + .collect::>(); if owner_pids.contains(&entrypoint_pid) && owner_pids.contains(&child_pid_u32) { break owners; } assert!( Instant::now() < deadline, - "timed out waiting for forked child to appear as a socket owner; got {:?}", - owner_pids + "timed out waiting for forked child to appear as a socket owner; got {owner_pids:?}" ); std::thread::sleep(Duration::from_millis(20)); }; + // libc/syscall FFI requires unsafe + #[allow(unsafe_code)] unsafe { libc::kill(child_pid, libc::SIGKILL); libc::waitpid(child_pid, std::ptr::null_mut(), 0); @@ -811,7 +813,7 @@ mod tests { .owners .iter() .map(|owner| owner.pid) - .collect::>(); + .collect::>(); assert!(owner_pids.contains(&entrypoint_pid)); assert!(owner_pids.contains(&child_pid_u32)); } diff --git a/crates/openshell-sandbox/src/proxy.rs b/crates/openshell-sandbox/src/proxy.rs index f8df885d5..861905a6b 100644 --- a/crates/openshell-sandbox/src/proxy.rs +++ b/crates/openshell-sandbox/src/proxy.rs @@ -4394,6 +4394,8 @@ mod tests { let (_accepted, _) = listener.accept().expect("accept"); let fd = stream.as_raw_fd(); + // libc/syscall FFI requires unsafe + #[allow(unsafe_code)] unsafe { let flags = libc::fcntl(fd, libc::F_GETFD); assert!(flags >= 0, "F_GETFD failed"); @@ -4407,9 +4409,13 @@ mod tests { let sleep_path = CString::new("/bin/sleep").unwrap(); let arg0 = CString::new("sleep").unwrap(); let arg1 = CString::new("30").unwrap(); + // libc/syscall FFI requires unsafe + #[allow(unsafe_code)] let child_pid = unsafe { libc::fork() }; assert!(child_pid >= 0, "fork failed"); if child_pid == 0 { + // libc/syscall FFI requires unsafe + #[allow(unsafe_code)] unsafe { libc::execl( sleep_path.as_ptr(), @@ -4438,6 +4444,8 @@ mod tests { let cache = BinaryIdentityCache::new(); let result = resolve_process_identity(std::process::id(), peer_port, &cache); + // libc/syscall FFI requires unsafe + #[allow(unsafe_code)] unsafe { libc::kill(child_pid, libc::SIGKILL); libc::waitpid(child_pid, std::ptr::null_mut(), 0); diff --git a/crates/openshell-sandbox/src/sandbox/linux/landlock.rs b/crates/openshell-sandbox/src/sandbox/linux/landlock.rs index b982c5238..214fc700a 100644 --- a/crates/openshell-sandbox/src/sandbox/linux/landlock.rs +++ b/crates/openshell-sandbox/src/sandbox/linux/landlock.rs @@ -95,7 +95,7 @@ pub struct PreparedRuleset { compatibility: LandlockCompatibility, } -/// Phase 1: Open PathFds and build the Landlock ruleset **as root**. +/// Phase 1: Open `PathFds` and build the Landlock ruleset **as root**. /// /// This must run before `drop_privileges()` so that `PathFd::new()` can open /// paths that are only accessible to root (e.g. mode 700 directories). @@ -272,6 +272,7 @@ pub fn enforce(prepared: PreparedRuleset) -> Result<()> { /// Legacy single-phase apply. Kept for non-Linux platforms and tests. /// On Linux, callers should use [`prepare`] + [`enforce`] for correct /// privilege ordering. +#[allow(dead_code)] // Retained for backward compat; live callers use prepare+enforce. pub fn apply(policy: &SandboxPolicy, workdir: Option<&str>) -> Result<()> { if let Some(prepared) = prepare(policy, workdir)? { enforce(prepared)?; diff --git a/crates/openshell-sandbox/src/sandbox/linux/mod.rs b/crates/openshell-sandbox/src/sandbox/linux/mod.rs index 565ce4f46..848ab1e3b 100644 --- a/crates/openshell-sandbox/src/sandbox/linux/mod.rs +++ b/crates/openshell-sandbox/src/sandbox/linux/mod.rs @@ -13,7 +13,7 @@ use std::path::PathBuf; use std::sync::Once; /// Opaque handle to a prepared-but-not-yet-enforced sandbox. -/// Holds the Landlock ruleset with PathFds opened as root. +/// Holds the Landlock ruleset with `PathFds` opened as root. pub struct PreparedSandbox { landlock: Option, policy: SandboxPolicy, @@ -21,7 +21,7 @@ pub struct PreparedSandbox { /// Phase 1: Prepare sandbox restrictions **as root** (before `drop_privileges`). /// -/// Opens Landlock PathFds while the process still has root privileges, +/// Opens Landlock `PathFds` while the process still has root privileges, /// ensuring paths like mode-700 directories are accessible. pub fn prepare(policy: &SandboxPolicy, workdir: Option<&str>) -> Result { let landlock = landlock::prepare(policy, workdir)?; @@ -50,6 +50,7 @@ pub fn apply_supervisor_prelude() -> Result<()> { /// Legacy single-phase apply. Kept for backward compatibility. /// New callers should use [`prepare`] + [`enforce`] for correct privilege ordering. +#[allow(dead_code)] // Retained for backward compat; live callers use prepare+enforce. pub fn apply(policy: &SandboxPolicy, workdir: Option<&str>) -> Result<()> { landlock::apply(policy, workdir)?; seccomp::apply(policy)?; @@ -72,12 +73,12 @@ pub fn log_sandbox_readiness(policy: &SandboxPolicy, workdir: Option<&str>) { let mut read_write = policy.filesystem.read_write.clone(); let read_only = &policy.filesystem.read_only; - if policy.filesystem.include_workdir { - if let Some(dir) = workdir { - let workdir_path = PathBuf::from(dir); - if !read_write.contains(&workdir_path) { - read_write.push(workdir_path); - } + if policy.filesystem.include_workdir + && let Some(dir) = workdir + { + let workdir_path = PathBuf::from(dir); + if !read_write.contains(&workdir_path) { + read_write.push(workdir_path); } } @@ -96,74 +97,71 @@ pub fn log_sandbox_readiness(policy: &SandboxPolicy, workdir: Option<&str>) { } let availability = landlock::probe_availability(); - match &availability { - landlock::LandlockAvailability::Available { abi } => { - openshell_ocsf::ocsf_emit!( - openshell_ocsf::ConfigStateChangeBuilder::new(crate::ocsf_ctx()) - .severity(openshell_ocsf::SeverityId::Informational) - .status(openshell_ocsf::StatusId::Success) - .state(openshell_ocsf::StateId::Enabled, "probed") - .message(format!( - "Landlock filesystem sandbox available \ - [abi:v{abi} compat:{:?} ro:{} rw:{}]", - policy.landlock.compatibility, - read_only.len(), - read_write.len(), - )) - .build() - ); - } - _ => { - // Landlock is NOT available — this is the critical log that was - // previously invisible because it only fired inside pre_exec. - let is_best_effort = matches!( - policy.landlock.compatibility, - crate::policy::LandlockCompatibility::BestEffort - ); - let (desc, msg) = if is_best_effort { - ( - format!( - "Sandbox will run WITHOUT filesystem restrictions: {availability}. \ - Policy requests {total_paths} path rule(s) \ - (ro:{} rw:{}) but Landlock cannot enforce them. \ - Set landlock.compatibility to 'hard_requirement' to make this fatal.", - read_only.len(), - read_write.len(), - ), - format!( - "Landlock filesystem sandbox unavailable (best_effort, degraded): {availability}" - ), - ) - } else { - ( - format!( - "Landlock is unavailable: {availability}. \ - Policy requires {total_paths} path rule(s) \ - (ro:{} rw:{}) with hard_requirement — sandbox startup will fail.", - read_only.len(), - read_write.len(), - ), - format!( - "Landlock filesystem sandbox unavailable (hard_requirement, will fail): {availability}" - ), - ) - }; - openshell_ocsf::ocsf_emit!( - openshell_ocsf::DetectionFindingBuilder::new(crate::ocsf_ctx()) - .activity(openshell_ocsf::ActivityId::Open) - .severity(openshell_ocsf::SeverityId::High) - .confidence(openshell_ocsf::ConfidenceId::High) - .is_alert(true) - .finding_info( - openshell_ocsf::FindingInfo::new( - "landlock-unavailable", - "Landlock Filesystem Sandbox Unavailable", - ) - .with_desc(&desc), + if let landlock::LandlockAvailability::Available { abi } = &availability { + openshell_ocsf::ocsf_emit!( + openshell_ocsf::ConfigStateChangeBuilder::new(crate::ocsf_ctx()) + .severity(openshell_ocsf::SeverityId::Informational) + .status(openshell_ocsf::StatusId::Success) + .state(openshell_ocsf::StateId::Enabled, "probed") + .message(format!( + "Landlock filesystem sandbox available \ + [abi:v{abi} compat:{:?} ro:{} rw:{}]", + policy.landlock.compatibility, + read_only.len(), + read_write.len(), + )) + .build() + ); + } else { + // Landlock is NOT available — this is the critical log that was + // previously invisible because it only fired inside pre_exec. + let is_best_effort = matches!( + policy.landlock.compatibility, + crate::policy::LandlockCompatibility::BestEffort + ); + let (desc, msg) = if is_best_effort { + ( + format!( + "Sandbox will run WITHOUT filesystem restrictions: {availability}. \ + Policy requests {total_paths} path rule(s) \ + (ro:{} rw:{}) but Landlock cannot enforce them. \ + Set landlock.compatibility to 'hard_requirement' to make this fatal.", + read_only.len(), + read_write.len(), + ), + format!( + "Landlock filesystem sandbox unavailable (best_effort, degraded): {availability}" + ), + ) + } else { + ( + format!( + "Landlock is unavailable: {availability}. \ + Policy requires {total_paths} path rule(s) \ + (ro:{} rw:{}) with hard_requirement — sandbox startup will fail.", + read_only.len(), + read_write.len(), + ), + format!( + "Landlock filesystem sandbox unavailable (hard_requirement, will fail): {availability}" + ), + ) + }; + openshell_ocsf::ocsf_emit!( + openshell_ocsf::DetectionFindingBuilder::new(crate::ocsf_ctx()) + .activity(openshell_ocsf::ActivityId::Open) + .severity(openshell_ocsf::SeverityId::High) + .confidence(openshell_ocsf::ConfidenceId::High) + .is_alert(true) + .finding_info( + openshell_ocsf::FindingInfo::new( + "landlock-unavailable", + "Landlock Filesystem Sandbox Unavailable", ) - .message(msg) - .build() - ); - } + .with_desc(&desc), + ) + .message(msg) + .build() + ); } } diff --git a/crates/openshell-sandbox/src/sandbox/linux/netns.rs b/crates/openshell-sandbox/src/sandbox/linux/netns.rs index bbd02255f..e926335e0 100644 --- a/crates/openshell-sandbox/src/sandbox/linux/netns.rs +++ b/crates/openshell-sandbox/src/sandbox/linux/netns.rs @@ -11,7 +11,7 @@ use miette::{IntoDiagnostic, Result}; use std::net::IpAddr; use std::os::unix::io::RawFd; use std::process::Command; -use tracing::{debug, info, warn}; +use tracing::{debug, warn}; use uuid::Uuid; /// Default subnet for sandbox networking. @@ -211,6 +211,8 @@ impl NetworkNamespace { if let Some(fd) = self.ns_fd { debug!(namespace = %self.name, "Entering network namespace via setns"); // SAFETY: setns is safe to call after fork, before exec + // libc/syscall FFI requires unsafe + #[allow(unsafe_code)] let result = unsafe { libc::setns(fd, libc::CLONE_NEWNET) }; if result != 0 { return Err(miette::miette!( @@ -235,7 +237,7 @@ impl NetworkNamespace { /// Install iptables rules for bypass detection inside the namespace. /// /// Sets up OUTPUT chain rules that: - /// 1. ACCEPT traffic destined for the proxy (host_ip:proxy_port) + /// 1. ACCEPT traffic destined for the proxy (`host_ip:proxy_port`) /// 2. ACCEPT loopback traffic /// 3. ACCEPT established/related connections (response packets) /// 4. LOG + REJECT all other TCP/UDP traffic (bypass attempts) @@ -251,22 +253,19 @@ impl NetworkNamespace { /// diagnostic logging. pub fn install_bypass_rules(&self, proxy_port: u16) -> Result<()> { // Check if iptables is available before attempting to install rules. - let iptables_path = match find_iptables() { - Some(path) => path, - None => { - openshell_ocsf::ocsf_emit!(openshell_ocsf::ConfigStateChangeBuilder::new( - crate::ocsf_ctx() - ) - .severity(openshell_ocsf::SeverityId::Medium) - .status(openshell_ocsf::StatusId::Failure) - .state(openshell_ocsf::StateId::Disabled, "degraded") - .message(format!( - "iptables not found; bypass detection rules will not be installed [ns:{}]", - self.name - )) - .build()); - return Ok(()); - } + let Some(iptables_path) = find_iptables() else { + openshell_ocsf::ocsf_emit!( + openshell_ocsf::ConfigStateChangeBuilder::new(crate::ocsf_ctx()) + .severity(openshell_ocsf::SeverityId::Medium) + .status(openshell_ocsf::StatusId::Failure) + .state(openshell_ocsf::StateId::Disabled, "degraded") + .message(format!( + "iptables not found; bypass detection rules will not be installed [ns:{}]", + self.name + )) + .build() + ); + return Ok(()); }; let host_ip_str = self.host_ip.to_string(); @@ -299,20 +298,20 @@ impl NetworkNamespace { // Install IPv6 rules — best-effort. // Skip the proxy ACCEPT rule for IPv6 since the proxy address is IPv4. - if let Some(ip6_path) = find_ip6tables(&iptables_path) { - if let Err(e) = self.install_bypass_rules_for_v6(&ip6_path, &log_prefix) { - openshell_ocsf::ocsf_emit!(openshell_ocsf::ConfigStateChangeBuilder::new( - crate::ocsf_ctx() - ) - .severity(openshell_ocsf::SeverityId::Low) - .status(openshell_ocsf::StatusId::Failure) - .state(openshell_ocsf::StateId::Other, "degraded") - .message(format!( - "Failed to install IPv6 bypass detection rules (non-fatal) [ns:{}]: {e}", - self.name - )) - .build()); - } + if let Some(ip6_path) = find_ip6tables(&iptables_path) + && let Err(e) = self.install_bypass_rules_for_v6(&ip6_path, &log_prefix) + { + openshell_ocsf::ocsf_emit!( + openshell_ocsf::ConfigStateChangeBuilder::new(crate::ocsf_ctx()) + .severity(openshell_ocsf::SeverityId::Low) + .status(openshell_ocsf::StatusId::Failure) + .state(openshell_ocsf::StateId::Other, "degraded") + .message(format!( + "Failed to install IPv6 bypass detection rules (non-fatal) [ns:{}]: {e}", + self.name + )) + .build() + ); } openshell_ocsf::ocsf_emit!( @@ -755,13 +754,13 @@ fn run_iptables_netns(netns: &str, iptables_cmd: &str, args: &[&str]) -> Result< const IPTABLES_SEARCH_PATHS: &[&str] = &["/usr/sbin/iptables", "/sbin/iptables", "/usr/bin/iptables"]; -/// Returns true if xt extension modules (e.g. xt_comment) cannot be used +/// Returns true if xt extension modules (e.g. `xt_comment`) cannot be used /// via the given iptables binary. /// -/// Some kernels have nf_tables but lack the nft_compat bridge that allows -/// xt extension modules to be used through the nf_tables path (e.g. Jetson +/// Some kernels have `nf_tables` but lack the `nft_compat` bridge that allows +/// xt extension modules to be used through the `nf_tables` path (e.g. Jetson /// Linux 5.15-tegra). This probe detects that condition by attempting to -/// insert a rule using the xt_comment extension. If it fails, xt extensions +/// insert a rule using the `xt_comment` extension. If it fails, xt extensions /// are unavailable and the caller should fall back to iptables-legacy. fn xt_extensions_unavailable(iptables_path: &str) -> bool { // Create a temporary probe chain. If this fails (e.g. no CAP_NET_ADMIN), @@ -769,8 +768,7 @@ fn xt_extensions_unavailable(iptables_path: &str) -> bool { let created = Command::new(iptables_path) .args(["-t", "filter", "-N", "_xt_probe"]) .output() - .map(|o| o.status.success()) - .unwrap_or(false); + .is_ok_and(|o| o.status.success()); if !created { return false; @@ -792,8 +790,7 @@ fn xt_extensions_unavailable(iptables_path: &str) -> bool { "ACCEPT", ]) .output() - .map(|o| o.status.success()) - .unwrap_or(false); + .is_ok_and(|o| o.status.success()); // Clean up — best-effort, ignore failures. let _ = Command::new(iptables_path) diff --git a/crates/openshell-sandbox/src/sandbox/linux/seccomp.rs b/crates/openshell-sandbox/src/sandbox/linux/seccomp.rs index dee96d7ce..f61464023 100644 --- a/crates/openshell-sandbox/src/sandbox/linux/seccomp.rs +++ b/crates/openshell-sandbox/src/sandbox/linux/seccomp.rs @@ -7,10 +7,10 @@ //! //! 1. **Socket domain blocks** -- prevent raw/kernel sockets that bypass the proxy //! 2. **Unconditional syscall blocks** -- block syscalls that enable sandbox escape -//! (fileless exec, ptrace, BPF, cross-process memory access, io_uring, mount) +//! (fileless exec, ptrace, BPF, cross-process memory access, `io_uring`, mount) //! 3. **Conditional syscall blocks** -- block dangerous flag combinations on otherwise -//! needed syscalls (execveat+AT_EMPTY_PATH, unshare+CLONE_NEWUSER, -//! seccomp+SET_MODE_FILTER) +//! needed syscalls (`execveat+AT_EMPTY_PATH`, `unshare+CLONE_NEWUSER`, +//! `seccomp+SET_MODE_FILTER`) use crate::policy::{NetworkMode, SandboxPolicy}; use miette::{IntoDiagnostic, Result}; @@ -90,6 +90,8 @@ fn build_supervisor_prelude_rules() -> BTreeMap> { } fn set_no_new_privs() -> Result<()> { + // libc/syscall FFI requires unsafe + #[allow(unsafe_code)] let rc = unsafe { libc::prctl(libc::PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0) }; if rc != 0 { return Err(miette::miette!( @@ -119,7 +121,7 @@ fn compile_filter( /// /// This is a separate filter from the main one because seccomp BPF cannot /// dereference the `struct clone_args *` pointer that clone3 takes as arg 0, -/// so we cannot selectively block CLONE_NEWUSER. We block clone3 +/// so we cannot selectively block `CLONE_NEWUSER`. We block clone3 /// unconditionally with ENOSYS so glibc falls back to the older clone /// syscall (where flags are a direct register argument and CAN be filtered). /// @@ -140,8 +142,8 @@ fn build_clone3_filter() -> Result { /// 2. Install the main filter second. It blocks further seccomp filter /// installation with `EPERM`, preserving the original hardening intent. fn apply_runtime_filters( - main_filter: seccompiler::BpfProgramRef, - clone3_filter: seccompiler::BpfProgramRef, + main_filter: seccompiler::BpfProgramRef<'_>, + clone3_filter: seccompiler::BpfProgramRef<'_>, ) -> Result<()> { apply_filter(clone3_filter).into_diagnostic()?; apply_filter(main_filter).into_diagnostic()?; @@ -283,6 +285,15 @@ fn add_masked_arg_rule( } #[cfg(test)] +// libc/syscall FFI requires unsafe; these tests fork children and exercise +// blocked syscalls, so unsafe blocks/calls are pervasive. +#[allow( + unsafe_code, + unsafe_op_in_unsafe_fn, + unused_unsafe, + clippy::borrow_as_ptr, + trivial_numeric_casts +)] mod tests { use super::*; diff --git a/crates/openshell-sandbox/src/sandbox/mod.rs b/crates/openshell-sandbox/src/sandbox/mod.rs index a153b7b9e..95aeae492 100644 --- a/crates/openshell-sandbox/src/sandbox/mod.rs +++ b/crates/openshell-sandbox/src/sandbox/mod.rs @@ -14,6 +14,9 @@ pub mod linux; /// # Errors /// /// Returns an error if the sandbox cannot be applied. +// On Linux the spawn path uses `prepare`+`enforce` directly; this single-phase +// apply is only invoked from the non-Linux spawn_impl. +#[cfg_attr(target_os = "linux", allow(dead_code))] #[cfg_attr(not(target_os = "linux"), allow(clippy::unnecessary_wraps))] pub fn apply(policy: &SandboxPolicy, workdir: Option<&str>) -> Result<()> { #[cfg(target_os = "linux")] diff --git a/crates/openshell-sandbox/src/ssh.rs b/crates/openshell-sandbox/src/ssh.rs index 34607a04e..9434d0a16 100644 --- a/crates/openshell-sandbox/src/ssh.rs +++ b/crates/openshell-sandbox/src/ssh.rs @@ -1045,6 +1045,9 @@ mod unsafe_pty { } #[allow(unsafe_code)] + // `libc::TIOCSCTTY` is `u32` on macOS/BSD and `u64` on Linux; allow the + // cross-platform conversion so the same expression compiles everywhere. + #[allow(clippy::useless_conversion)] fn set_controlling_tty(fd: RawFd) -> std::io::Result<()> { let rc = unsafe { libc::ioctl(fd, libc::TIOCSCTTY.into(), 0) }; if rc != 0 { @@ -1498,7 +1501,7 @@ mod tests { None, None, // no netns fd #[cfg(target_os = "linux")] - crate::sandbox::linux::prepare( + sandbox::linux::prepare( &SandboxPolicy { version: 0, filesystem: FilesystemPolicy::default(), diff --git a/crates/openshell-server/src/compute/mod.rs b/crates/openshell-server/src/compute/mod.rs index 8b614efbb..df6f88c77 100644 --- a/crates/openshell-server/src/compute/mod.rs +++ b/crates/openshell-server/src/compute/mod.rs @@ -1497,7 +1497,7 @@ fn is_terminal_failure_reason(reason: &str) -> bool { #[cfg(test)] #[derive(Debug, Default)] -pub(crate) struct NoopTestDriver; +pub struct NoopTestDriver; #[cfg(test)] #[tonic::async_trait] @@ -1591,7 +1591,7 @@ impl ComputeDriver for NoopTestDriver { } #[cfg(test)] -pub(crate) async fn new_test_runtime(store: Arc) -> ComputeRuntime { +pub async fn new_test_runtime(store: Arc) -> ComputeRuntime { ComputeRuntime { driver: Arc::new(NoopTestDriver), shutdown_cleanup: None, @@ -1802,8 +1802,8 @@ mod tests { metadata: Some(openshell_core::proto::datamodel::v1::ObjectMeta { id: id.to_string(), name: format!("session-{id}"), - created_at_ms: 1000000, - labels: std::collections::HashMap::new(), + created_at_ms: 1_000_000, + labels: HashMap::new(), }), sandbox_id: sandbox_id.to_string(), token: format!("token-{id}"), diff --git a/crates/openshell-server/src/grpc/sandbox.rs b/crates/openshell-server/src/grpc/sandbox.rs index 2b6de3b59..ed1b4cdfc 100644 --- a/crates/openshell-server/src/grpc/sandbox.rs +++ b/crates/openshell-server/src/grpc/sandbox.rs @@ -681,7 +681,7 @@ const MAX_COMMAND_STRING_LEN: usize = 256 * 1024; // 256 KiB fn build_remote_exec_command(req: &ExecSandboxRequest) -> Result { let mut parts = Vec::new(); let mut env_entries = req.environment.iter().collect::>(); - env_entries.sort_by(|(a, _), (b, _)| a.cmp(b)); + env_entries.sort_by_key(|(a, _)| *a); for (key, value) in env_entries { parts.push(format!("{key}={}", shell_escape(value)?)); } diff --git a/crates/openshell-server/src/persistence/mod.rs b/crates/openshell-server/src/persistence/mod.rs index a62cf58f7..1c926bd4a 100644 --- a/crates/openshell-server/src/persistence/mod.rs +++ b/crates/openshell-server/src/persistence/mod.rs @@ -292,14 +292,14 @@ pub fn current_time_ms() -> PersistenceResult { } fn map_db_error(error: &sqlx::Error) -> PersistenceError { - if let sqlx::Error::Database(db) = error { - if db.is_unique_violation() { - let constraint = db - .constraint() - .map(ToString::to_string) - .or_else(|| infer_sqlite_unique_constraint(db.message())); - return PersistenceError::unique_violation(constraint, Some(db.message().to_string())); - } + if let sqlx::Error::Database(db) = error + && db.is_unique_violation() + { + let constraint = db + .constraint() + .map(ToString::to_string) + .or_else(|| infer_sqlite_unique_constraint(db.message())); + return PersistenceError::unique_violation(constraint, Some(db.message().to_string())); } PersistenceError::Database(error.to_string()) } diff --git a/crates/openshell-server/src/policy_store.rs b/crates/openshell-server/src/policy_store.rs index dffa00ca4..f0a43698e 100644 --- a/crates/openshell-server/src/policy_store.rs +++ b/crates/openshell-server/src/policy_store.rs @@ -8,7 +8,7 @@ use openshell_core::proto::{ }; use prost::Message; -pub(crate) trait PolicyStoreExt { +pub trait PolicyStoreExt { async fn put_policy_revision( &self, id: &str, @@ -257,7 +257,7 @@ impl PolicyStoreExt for Store { } } -pub(crate) fn policy_payload_from_record(record: &PolicyRecord) -> PersistenceResult> { +pub fn policy_payload_from_record(record: &PolicyRecord) -> PersistenceResult> { let policy = ProtoSandboxPolicy::decode(record.policy_payload.as_slice()).map_err(|e| { crate::persistence::PersistenceError::Decode(format!("decode policy payload failed: {e}")) })?; @@ -270,7 +270,7 @@ pub(crate) fn policy_payload_from_record(record: &PolicyRecord) -> PersistenceRe .encode_to_vec()) } -pub(crate) fn policy_record_from_parts( +pub fn policy_record_from_parts( id: String, sandbox_id: String, version: i64, @@ -301,9 +301,7 @@ pub(crate) fn policy_record_from_parts( }) } -pub(crate) fn draft_chunk_payload_from_record( - chunk: &DraftChunkRecord, -) -> PersistenceResult> { +pub fn draft_chunk_payload_from_record(chunk: &DraftChunkRecord) -> PersistenceResult> { let proposed_rule = if chunk.proposed_rule.is_empty() { None } else { @@ -320,6 +318,7 @@ pub(crate) fn draft_chunk_payload_from_record( proposed_rule, rationale: chunk.rationale.clone(), security_notes: chunk.security_notes.clone(), + #[allow(clippy::cast_possible_truncation)] // f64->f32 for confidence scores confidence: chunk.confidence as f32, decided_at_ms: chunk.decided_at_ms.unwrap_or(0), host: chunk.host.clone(), @@ -330,7 +329,7 @@ pub(crate) fn draft_chunk_payload_from_record( .encode_to_vec()) } -pub(crate) fn draft_chunk_record_from_parts( +pub fn draft_chunk_record_from_parts( id: String, sandbox_id: String, status: String, diff --git a/crates/openshell-tui/src/app.rs b/crates/openshell-tui/src/app.rs index 2247581b3..1cab7127c 100644 --- a/crates/openshell-tui/src/app.rs +++ b/crates/openshell-tui/src/app.rs @@ -859,11 +859,8 @@ impl App { self.input_mode = InputMode::Command; self.command_input.clear(); } - KeyCode::Char('j') | KeyCode::Down => { - if !self.gateways.is_empty() { - self.gateway_selected = - (self.gateway_selected + 1).min(self.gateways.len() - 1); - } + KeyCode::Char('j') | KeyCode::Down if !self.gateways.is_empty() => { + self.gateway_selected = (self.gateway_selected + 1).min(self.gateways.len() - 1); } KeyCode::Char('k') | KeyCode::Up => { self.gateway_selected = self.gateway_selected.saturating_sub(1); @@ -903,11 +900,8 @@ impl App { self.input_mode = InputMode::Command; self.command_input.clear(); } - KeyCode::Char('j') | KeyCode::Down => { - if self.provider_count > 0 { - self.provider_selected = - (self.provider_selected + 1).min(self.provider_count - 1); - } + KeyCode::Char('j') | KeyCode::Down if self.provider_count > 0 => { + self.provider_selected = (self.provider_selected + 1).min(self.provider_count - 1); } KeyCode::Char('k') | KeyCode::Up => { self.provider_selected = self.provider_selected.saturating_sub(1); @@ -915,22 +909,16 @@ impl App { KeyCode::Char('c') => { self.open_create_provider_form(); } - KeyCode::Enter => { - // Fetch and show provider detail. - if self.provider_count > 0 { - self.pending_provider_get = true; - } + // Fetch and show provider detail. + KeyCode::Enter if self.provider_count > 0 => { + self.pending_provider_get = true; } - KeyCode::Char('u') => { - // Open update form for the selected provider. - if self.provider_count > 0 { - self.open_update_provider_form(); - } + // Open update form for the selected provider. + KeyCode::Char('u') if self.provider_count > 0 => { + self.open_update_provider_form(); } - KeyCode::Char('d') => { - if self.provider_count > 0 { - self.confirm_provider_delete = true; - } + KeyCode::Char('d') if self.provider_count > 0 => { + self.confirm_provider_delete = true; } KeyCode::Char('h' | 'l') | KeyCode::Left | KeyCode::Right => { self.middle_pane_tab = self.middle_pane_tab.next(); @@ -948,11 +936,9 @@ impl App { self.input_mode = InputMode::Command; self.command_input.clear(); } - KeyCode::Char('j') | KeyCode::Down => { - if !self.global_settings.is_empty() { - self.global_settings_selected = - (self.global_settings_selected + 1).min(self.global_settings.len() - 1); - } + KeyCode::Char('j') | KeyCode::Down if !self.global_settings.is_empty() => { + self.global_settings_selected = + (self.global_settings_selected + 1).min(self.global_settings.len() - 1); } KeyCode::Char('k') | KeyCode::Up => { self.global_settings_selected = self.global_settings_selected.saturating_sub(1); @@ -1083,10 +1069,8 @@ impl App { self.input_mode = InputMode::Command; self.command_input.clear(); } - KeyCode::Char('j') | KeyCode::Down => { - if self.sandbox_count > 0 { - self.sandbox_selected = (self.sandbox_selected + 1).min(self.sandbox_count - 1); - } + KeyCode::Char('j') | KeyCode::Down if self.sandbox_count > 0 => { + self.sandbox_selected = (self.sandbox_selected + 1).min(self.sandbox_count - 1); } KeyCode::Char('k') | KeyCode::Up => { self.sandbox_selected = self.sandbox_selected.saturating_sub(1); @@ -1094,13 +1078,11 @@ impl App { KeyCode::Char('c') => { self.open_create_form(); } - KeyCode::Enter => { - if self.sandbox_count > 0 { - self.screen = Screen::Sandbox; - self.focus = Focus::SandboxPolicy; - self.confirm_delete = false; - self.pending_sandbox_detail = true; - } + KeyCode::Enter if self.sandbox_count > 0 => { + self.screen = Screen::Sandbox; + self.focus = Focus::SandboxPolicy; + self.confirm_delete = false; + self.pending_sandbox_detail = true; } KeyCode::Esc => { self.focus = Focus::Providers; @@ -1151,10 +1133,8 @@ impl App { KeyCode::Char('r') => { self.focus = Focus::SandboxDraft; } - KeyCode::Char('s') => { - if self.sandbox_count > 0 { - self.pending_shell_connect = true; - } + KeyCode::Char('s') if self.sandbox_count > 0 => { + self.pending_shell_connect = true; } KeyCode::Char('d') => { self.confirm_delete = true; @@ -1196,11 +1176,9 @@ impl App { // In policy tab, 'l' opens logs. In settings tab, switch tab. self.sandbox_policy_tab = self.sandbox_policy_tab.next(); } - KeyCode::Char('j') | KeyCode::Down => { - if !self.sandbox_settings.is_empty() { - self.sandbox_settings_selected = - (self.sandbox_settings_selected + 1).min(self.sandbox_settings.len() - 1); - } + KeyCode::Char('j') | KeyCode::Down if !self.sandbox_settings.is_empty() => { + self.sandbox_settings_selected = + (self.sandbox_settings_selected + 1).min(self.sandbox_settings.len() - 1); } KeyCode::Char('k') | KeyCode::Up => { self.sandbox_settings_selected = self.sandbox_settings_selected.saturating_sub(1); @@ -1405,10 +1383,8 @@ impl App { self.focus = Focus::SandboxLogs; self.pending_log_fetch = true; } - KeyCode::Enter => { - if !self.draft_chunks.is_empty() { - self.draft_detail_open = true; - } + KeyCode::Enter if !self.draft_chunks.is_empty() => { + self.draft_detail_open = true; } KeyCode::Char('j') | KeyCode::Down => { if total == 0 { @@ -1436,13 +1412,11 @@ impl App { self.draft_scroll = 0; self.draft_selected = 0; } - KeyCode::Char('G') => { - if total > 0 { - let max_scroll = total.saturating_sub(vh.min(total)); - self.draft_scroll = max_scroll; - let visible = total.saturating_sub(self.draft_scroll).min(vh); - self.draft_selected = visible.saturating_sub(1); - } + KeyCode::Char('G') if total > 0 => { + let max_scroll = total.saturating_sub(vh.min(total)); + self.draft_scroll = max_scroll; + let visible = total.saturating_sub(self.draft_scroll).min(vh); + self.draft_selected = visible.saturating_sub(1); } // Approve selected chunk (pending → approved, rejected → approved). KeyCode::Char('a') => { @@ -1587,12 +1561,10 @@ impl App { self.log_autoscroll = false; } } - KeyCode::Enter => { - if filtered_len > 0 && self.log_selection_anchor.is_none() { - let abs = self.sandbox_log_scroll + self.log_cursor; - if abs < filtered_len { - self.log_detail_index = Some(abs); - } + KeyCode::Enter if filtered_len > 0 && self.log_selection_anchor.is_none() => { + let abs = self.sandbox_log_scroll + self.log_cursor; + if abs < filtered_len { + self.log_detail_index = Some(abs); } } KeyCode::Char('j') | KeyCode::Down => { @@ -1725,11 +1697,9 @@ impl App { CreateFormField::Image => Self::handle_text_input(&mut form.image, key), CreateFormField::Command => Self::handle_text_input(&mut form.command, key), CreateFormField::Providers => match key.code { - KeyCode::Char('j') | KeyCode::Down => { - if !form.providers.is_empty() { - form.provider_cursor = - (form.provider_cursor + 1).min(form.providers.len() - 1); - } + KeyCode::Char('j') | KeyCode::Down if !form.providers.is_empty() => { + form.provider_cursor = + (form.provider_cursor + 1).min(form.providers.len() - 1); } KeyCode::Char('k') | KeyCode::Up => { form.provider_cursor = form.provider_cursor.saturating_sub(1); @@ -1828,10 +1798,8 @@ impl App { KeyCode::Esc => { self.create_provider_form = None; } - KeyCode::Char('j') | KeyCode::Down => { - if !form.types.is_empty() { - form.type_cursor = (form.type_cursor + 1).min(form.types.len() - 1); - } + KeyCode::Char('j') | KeyCode::Down if !form.types.is_empty() => { + form.type_cursor = (form.type_cursor + 1).min(form.types.len() - 1); } KeyCode::Char('k') | KeyCode::Up => { form.type_cursor = form.type_cursor.saturating_sub(1); diff --git a/crates/openshell-tui/src/event.rs b/crates/openshell-tui/src/event.rs index 66f9dd962..4c6eeb8b4 100644 --- a/crates/openshell-tui/src/event.rs +++ b/crates/openshell-tui/src/event.rs @@ -85,20 +85,14 @@ impl EventHandler { if event::poll(poll_interval).unwrap_or(false) { match event::read() { - Ok(TermEvent::Key(key)) => { - if tx.send(Event::Key(key)).is_err() { - return; - } + Ok(TermEvent::Key(key)) if tx.send(Event::Key(key)).is_err() => { + return; } - Ok(TermEvent::Mouse(mouse)) => { - if tx.send(Event::Mouse(mouse)).is_err() { - return; - } + Ok(TermEvent::Mouse(mouse)) if tx.send(Event::Mouse(mouse)).is_err() => { + return; } - Ok(TermEvent::Resize(w, h)) => { - if tx.send(Event::Resize(w, h)).is_err() { - return; - } + Ok(TermEvent::Resize(w, h)) if tx.send(Event::Resize(w, h)).is_err() => { + return; } _ => {} } diff --git a/crates/openshell-vfio/src/lib.rs b/crates/openshell-vfio/src/lib.rs index e7276b301..bc226a78c 100644 --- a/crates/openshell-vfio/src/lib.rs +++ b/crates/openshell-vfio/src/lib.rs @@ -591,7 +591,8 @@ fn bind_device_to_vfio(sysfs: &SysfsRoot, bdf: &str) -> Result bdf: bdf.to_string(), reason: format!( "after drivers_probe with {}ms polling, driver is {:?} instead of vfio-pci", - VFIO_BIND_MAX_POLL_ATTEMPTS as u64 * VFIO_BIND_POLL_INTERVAL.as_millis() as u64, + u64::from(VFIO_BIND_MAX_POLL_ATTEMPTS) + * u64::try_from(VFIO_BIND_POLL_INTERVAL.as_millis()).unwrap_or(u64::MAX), current_driver_name(sysfs, bdf) .as_deref() .unwrap_or("") diff --git a/crates/openshell-vm/build.rs b/crates/openshell-vm/build.rs index f448ed0bc..6351be6e8 100644 --- a/crates/openshell-vm/build.rs +++ b/crates/openshell-vm/build.rs @@ -97,7 +97,7 @@ fn main() { e ) }); - let size = fs::metadata(&dst_path).map(|m| m.len()).unwrap_or(0); + let size = fs::metadata(&dst_path).map_or(0, |m| m.len()); println!("cargo:warning=Embedded {src_name}: {size} bytes"); } else { println!( diff --git a/crates/openshell-vm/src/embedded.rs b/crates/openshell-vm/src/embedded.rs index f019385d6..537e7d725 100644 --- a/crates/openshell-vm/src/embedded.rs +++ b/crates/openshell-vm/src/embedded.rs @@ -417,7 +417,7 @@ fn validate_runtime_dir(dir: &Path) -> Result<(), VmError> { } // Check file is not empty (would indicate a stub) - let size = fs::metadata(path).map(|m| m.len()).unwrap_or(0); + let size = fs::metadata(path).map_or(0, |m| m.len()); if size == 0 { return Err(VmError::HostSetup(format!( "runtime file is empty (stub): {}", diff --git a/crates/openshell-vm/src/exec.rs b/crates/openshell-vm/src/exec.rs index 23ab23175..63771509e 100644 --- a/crates/openshell-vm/src/exec.rs +++ b/crates/openshell-vm/src/exec.rs @@ -111,10 +111,10 @@ impl RawModeGuard { let stdin = std::io::stdin(); let fd = stdin.as_fd(); let original = - termios::tcgetattr(&fd).map_err(|e| VmError::Exec(format!("tcgetattr: {e}")))?; + termios::tcgetattr(fd).map_err(|e| VmError::Exec(format!("tcgetattr: {e}")))?; let mut raw = original.clone(); termios::cfmakeraw(&mut raw); - termios::tcsetattr(&fd, SetArg::TCSANOW, &raw) + termios::tcsetattr(fd, SetArg::TCSANOW, &raw) .map_err(|e| VmError::Exec(format!("tcsetattr: {e}")))?; Ok(Self { raw_fd: std::os::unix::io::AsRawFd::as_raw_fd(&stdin), @@ -126,7 +126,7 @@ impl RawModeGuard { impl Drop for RawModeGuard { fn drop(&mut self) { let fd = unsafe { BorrowedFd::borrow_raw(self.raw_fd) }; - let _ = termios::tcsetattr(&fd, SetArg::TCSANOW, &self.original); + let _ = termios::tcsetattr(fd, SetArg::TCSANOW, &self.original); } } @@ -792,19 +792,18 @@ fn pump_stdin(mut writer: UnixStream, tty: bool) -> Result<(), VmError> { break; } - if tty { - if let Some(size) = get_terminal_size() { - if last_size != Some(size) { - last_size = Some(size); - let _ = send_json_line( - &mut writer, - &ClientFrame::Resize { - cols: size.0, - rows: size.1, - }, - ); - } - } + if tty + && let Some(size) = get_terminal_size() + && last_size != Some(size) + { + last_size = Some(size); + let _ = send_json_line( + &mut writer, + &ClientFrame::Resize { + cols: size.0, + rows: size.1, + }, + ); } let frame = ClientFrame::Stdin { @@ -1016,8 +1015,8 @@ mod tests { rows: u16::MAX, }; let json: serde_json::Value = serde_json::to_value(&frame).unwrap(); - assert_eq!(json["cols"], u16::MAX as u64); - assert_eq!(json["rows"], u16::MAX as u64); + assert_eq!(json["cols"], u64::from(u16::MAX)); + assert_eq!(json["rows"], u64::from(u16::MAX)); } #[test] @@ -1070,9 +1069,7 @@ mod tests { fn stdin_payload_round_trip() { let original = b"echo hello\n"; let encoded = base64::engine::general_purpose::STANDARD.encode(original); - let frame = ClientFrame::Stdin { - data: encoded.clone(), - }; + let frame = ClientFrame::Stdin { data: encoded }; let json = serde_json::to_string(&frame).unwrap(); let parsed: serde_json::Value = serde_json::from_str(&json).unwrap(); let decoded = decode_payload(parsed["data"].as_str().unwrap()).unwrap(); @@ -1103,7 +1100,7 @@ mod tests { }; let v: serde_json::Value = serde_json::to_value(&req).unwrap(); assert!(v["tty"].is_boolean()); - assert_eq!(v["tty"].as_bool().unwrap(), true); + assert!(v["tty"].as_bool().unwrap()); let req_no_tty = ExecRequest { argv: vec!["echo".into()], @@ -1112,7 +1109,7 @@ mod tests { tty: false, }; let v: serde_json::Value = serde_json::to_value(&req_no_tty).unwrap(); - assert_eq!(v["tty"].as_bool().unwrap(), false); + assert!(!v["tty"].as_bool().unwrap()); } #[test] diff --git a/crates/openshell-vm/src/lib.rs b/crates/openshell-vm/src/lib.rs index 99f122e45..b4d8081c1 100644 --- a/crates/openshell-vm/src/lib.rs +++ b/crates/openshell-vm/src/lib.rs @@ -998,9 +998,7 @@ fn is_process_named(pid: libc::pid_t, expected: &str) -> bool { #[cfg(target_os = "linux")] fn is_process_named(pid: libc::pid_t, expected: &str) -> bool { let comm_path = format!("/proc/{pid}/comm"); - std::fs::read_to_string(comm_path) - .map(|name| name.trim().contains(expected)) - .unwrap_or(false) + std::fs::read_to_string(comm_path).is_ok_and(|name| name.trim().contains(expected)) } #[cfg(not(any(target_os = "macos", target_os = "linux")))] From f5a6b08e4f833e572d1cd3bb2687440b13cb1103 Mon Sep 17 00:00:00 2001 From: Drew Newberry Date: Tue, 28 Apr 2026 23:48:33 -0700 Subject: [PATCH 3/4] test(e2e): align upload_respects_gitignore with basename preservation PR #1028 (preserve directory basename for filtered uploads) changed the on-disk layout so that filtered directory uploads land under `//...` rather than `/...`, matching the unfiltered upload semantics. The accompanying e2e test was not updated and now fails because it still expects `tracked.txt` directly under the download root. Update the assertions to look under the preserved `repo/` basename. --- e2e/rust/tests/sync.rs | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/e2e/rust/tests/sync.rs b/e2e/rust/tests/sync.rs index 29ffdc00d..f08f670c5 100644 --- a/e2e/rust/tests/sync.rs +++ b/e2e/rust/tests/sync.rs @@ -261,26 +261,31 @@ async fn upload_respects_gitignore_by_default() { .await .expect("download filtered upload"); + // Filtered uploads of a directory preserve the source basename, so + // contents land under `/repo/...` (matches `openshell sandbox + // upload ` semantics from the unfiltered path). + let uploaded_root = download_dir.join("repo"); + // tracked.txt should be present. - let tracked = fs::read_to_string(download_dir.join("tracked.txt")) + let tracked = fs::read_to_string(uploaded_root.join("tracked.txt")) .expect("tracked.txt should exist after filtered upload"); assert_eq!(tracked, "i-am-tracked", "tracked.txt content mismatch"); // .gitignore itself should be present (it's tracked). assert!( - download_dir.join(".gitignore").exists(), + uploaded_root.join(".gitignore").exists(), ".gitignore should be uploaded (it's a tracked file)" ); // ignored.log should NOT be present. assert!( - !download_dir.join("ignored.log").exists(), + !uploaded_root.join("ignored.log").exists(), "ignored.log should be filtered out by .gitignore" ); // build/ directory should NOT be present. assert!( - !download_dir.join("build").exists(), + !uploaded_root.join("build").exists(), "build/ directory should be filtered out by .gitignore" ); From 7aa2d611cd6241756f4931cd93765bf1b9341285 Mon Sep 17 00:00:00 2001 From: Drew Newberry Date: Wed, 29 Apr 2026 07:47:15 -0700 Subject: [PATCH 4/4] test(sandbox): retry shared-socket ambiguity resolve to fix flake MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The Linux-only test `resolve_process_identity_denies_fork_exec_shared_socket_ambiguity` forks a child, exec's `/bin/sleep 30`, polls until `/proc//exe` shows `sleep`, then expects `resolve_process_identity` to return an ambiguous-ownership error. Under heavy CI load the child's procfs `exe` symlink can momentarily fail to resolve (os error 2) between the polling loop and the resolve call — observed in PR CI even though `sleep 30` keeps the child alive. Retry the resolve a small bounded number of times (5 attempts, 50 ms apart) on "No such file or directory" errors before failing the assertion. Real ambiguity errors and successful resolutions short-circuit immediately. --- crates/openshell-sandbox/src/proxy.rs | 19 ++++++++++++++++++- 1 file changed, 18 insertions(+), 1 deletion(-) diff --git a/crates/openshell-sandbox/src/proxy.rs b/crates/openshell-sandbox/src/proxy.rs index 861905a6b..a4ad694a9 100644 --- a/crates/openshell-sandbox/src/proxy.rs +++ b/crates/openshell-sandbox/src/proxy.rs @@ -4442,7 +4442,24 @@ mod tests { } let cache = BinaryIdentityCache::new(); - let result = resolve_process_identity(std::process::id(), peer_port, &cache); + + // Resolve with a brief retry loop — under heavy CI load the child's + // procfs entry can momentarily fail to resolve even though the loop + // above just verified `/proc//exe` pointed at `sleep`. Retry a + // few times before declaring failure so the test is not flaky. + let mut result = resolve_process_identity(std::process::id(), peer_port, &cache); + for _ in 0..5 { + match &result { + Err(err) + if err.reason.contains("No such file or directory") + || err.reason.contains("os error 2") => + { + std::thread::sleep(Duration::from_millis(50)); + result = resolve_process_identity(std::process::id(), peer_port, &cache); + } + _ => break, + } + } // libc/syscall FFI requires unsafe #[allow(unsafe_code)]