diff --git a/crates/sandlock-ffi/src/handler/run.rs b/crates/sandlock-ffi/src/handler/run.rs index 8a1bed5..26889ab 100644 --- a/crates/sandlock-ffi/src/handler/run.rs +++ b/crates/sandlock-ffi/src/handler/run.rs @@ -107,15 +107,6 @@ fn block_on_run( handlers: Vec<(i64, FfiHandler)>, interactive: bool, ) -> Option> { - // Use a fresh runtime — sandlock-core already pulls in tokio with - // rt-multi-thread; this matches the pattern used by the existing - // `sandlock_run` path. A panic in an `extern "C"`-reachable path is - // UB, so we report runtime-build failure to the caller via `None` - // instead of unwrapping. - let rt = tokio::runtime::Builder::new_multi_thread() - .enable_all() - .build() - .ok()?; let cmd_refs: Vec<&str> = cmd.iter().map(String::as_str).collect(); // Apply `name` via the builder method on a clone — mirrors the // pattern used by `sandlock_run` in lib.rs. A `None` here means @@ -124,7 +115,11 @@ fn block_on_run( Some(n) => sandbox.clone().with_name(n), None => sandbox.clone(), }; - Some(rt.block_on(async move { + // Drives the supervisor on the shared per-thread runtime; see + // `crate::runtime` for why this is `current_thread`. This path is + // reached from `extern "C-unwind"` entry points, so user callback + // panics are intentionally allowed to propagate. + crate::runtime::with_runtime_unwind(|rt| rt.block_on(async move { if interactive { sb.run_interactive_with_extra_handlers(&cmd_refs, handlers).await } else { diff --git a/crates/sandlock-ffi/src/lib.rs b/crates/sandlock-ffi/src/lib.rs index d140c1f..00e4afd 100644 --- a/crates/sandlock-ffi/src/lib.rs +++ b/crates/sandlock-ffi/src/lib.rs @@ -14,6 +14,9 @@ use sandlock_core::{Sandbox, RunResult}; pub mod handler; pub mod notif_repr; +mod runtime; + +use runtime::{block_on_runtime, build_live_runtime, build_runtime, with_runtime}; // ---------------------------------------------------------------- // Opaque wrapper types @@ -691,17 +694,13 @@ pub unsafe extern "C" fn sandlock_run( let args = read_argv(argv, argc); let arg_refs: Vec<&str> = args.iter().map(|s| s.as_str()).collect(); - let rt = match tokio::runtime::Runtime::new() { - Ok(rt) => rt, - Err(_) => return ptr::null_mut(), - }; let mut sb = match name { Some(ref n) => policy.clone().with_name(n.clone()), None => policy.clone(), }; - match rt.block_on(sb.run(&arg_refs)) { - Ok(result) => Box::into_raw(Box::new(sandlock_result_t { _private: result })), - Err(_) => ptr::null_mut(), + match with_runtime(|rt| rt.block_on(sb.run(&arg_refs))) { + Some(Ok(result)) => Box::into_raw(Box::new(sandlock_result_t { _private: result })), + _ => ptr::null_mut(), } } @@ -710,7 +709,10 @@ pub unsafe extern "C" fn sandlock_run( // ---------------------------------------------------------------- /// Opaque handle for a live sandbox. -/// Owns both the Sandbox and the tokio Runtime that drives its supervisor. +/// +/// Owns both the Sandbox and a small Tokio runtime that drives its +/// supervisor. Live handles need a runtime whose spawned tasks keep +/// progressing after `sandlock_start` returns. #[allow(non_camel_case_types)] pub struct sandlock_handle_t { sandbox: Sandbox, @@ -725,12 +727,12 @@ pub struct sandlock_handle_t { /// `policy` must be a valid policy pointer. `name` may be NULL to /// auto-generate a sandbox name, or a valid NUL-terminated string. /// `argv` must point to `argc` C strings. -#[no_mangle] -pub unsafe extern "C" fn sandlock_create( +unsafe fn sandlock_create_with_runtime( policy: *const sandlock_sandbox_t, name: *const c_char, argv: *const *const c_char, argc: c_uint, + build_rt: fn() -> Option, ) -> *mut sandlock_handle_t { if policy.is_null() || argv.is_null() { return ptr::null_mut(); } let policy = &(*policy)._private; @@ -741,9 +743,9 @@ pub unsafe extern "C" fn sandlock_create( let args = read_argv(argv, argc); let arg_refs: Vec<&str> = args.iter().map(|s| s.as_str()).collect(); - let rt = match tokio::runtime::Runtime::new() { - Ok(rt) => rt, - Err(_) => return ptr::null_mut(), + let rt = match build_rt() { + Some(rt) => rt, + None => return ptr::null_mut(), }; let mut sb = match name { @@ -751,13 +753,44 @@ pub unsafe extern "C" fn sandlock_create( None => policy.clone(), }; - if rt.block_on(sb.create(&arg_refs)).is_err() { + if !matches!(block_on_runtime(&rt, sb.create(&arg_refs)), Some(Ok(()))) { return ptr::null_mut(); } Box::into_raw(Box::new(sandlock_handle_t { sandbox: sb, runtime: rt })) } +#[no_mangle] +pub unsafe extern "C" fn sandlock_create( + policy: *const sandlock_sandbox_t, + name: *const c_char, + argv: *const *const c_char, + argc: c_uint, +) -> *mut sandlock_handle_t { + sandlock_create_with_runtime(policy, name, argv, argc, build_live_runtime) +} + +/// Create a sandbox handle for immediate start+wait use on the calling +/// FFI thread. Unlike `sandlock_create`, this uses the thread-local +/// `current_thread` runtime and does not create Tokio worker threads. +/// +/// This is intended for blocking one-shot wrappers that call +/// `sandlock_start` and `sandlock_handle_wait*` immediately from the +/// same thread. Long-lived handles should use `sandlock_create` so the +/// supervisor keeps progressing between FFI calls. +/// +/// # Safety +/// Same constraints as `sandlock_create`. +#[no_mangle] +pub unsafe extern "C" fn sandlock_create_for_run( + policy: *const sandlock_sandbox_t, + name: *const c_char, + argv: *const *const c_char, + argc: c_uint, +) -> *mut sandlock_handle_t { + sandlock_create_with_runtime(policy, name, argv, argc, build_runtime) +} + /// Release a previously `sandlock_create`d child to execve. Returns 0 on /// success, -1 on error. /// @@ -789,9 +822,9 @@ pub unsafe extern "C" fn sandlock_handle_pid(h: *const sandlock_handle_t) -> i32 pub unsafe extern "C" fn sandlock_handle_wait(h: *mut sandlock_handle_t) -> *mut sandlock_result_t { if h.is_null() { return ptr::null_mut(); } let h = &mut *h; - match h.runtime.block_on(h.sandbox.wait()) { - Ok(result) => Box::into_raw(Box::new(sandlock_result_t { _private: result })), - Err(_) => ptr::null_mut(), + match block_on_runtime(&h.runtime, h.sandbox.wait()) { + Some(Ok(result)) => Box::into_raw(Box::new(sandlock_result_t { _private: result })), + _ => ptr::null_mut(), } } @@ -811,19 +844,19 @@ pub unsafe extern "C" fn sandlock_handle_wait_timeout( if timeout_ms == 0 { // No timeout -- same as sandlock_handle_wait. - return match h.runtime.block_on(h.sandbox.wait()) { - Ok(result) => Box::into_raw(Box::new(sandlock_result_t { _private: result })), - Err(_) => ptr::null_mut(), + return match block_on_runtime(&h.runtime, h.sandbox.wait()) { + Some(Ok(result)) => Box::into_raw(Box::new(sandlock_result_t { _private: result })), + _ => ptr::null_mut(), }; } let dur = Duration::from_millis(timeout_ms); - match h.runtime.block_on(async { + match block_on_runtime(&h.runtime, async { tokio::time::timeout(dur, h.sandbox.wait()).await }) { - Ok(Ok(result)) => Box::into_raw(Box::new(sandlock_result_t { _private: result })), - Ok(Err(_)) => ptr::null_mut(), - Err(_) => { + Some(Ok(Ok(result))) => Box::into_raw(Box::new(sandlock_result_t { _private: result })), + Some(Ok(Err(_))) | None => ptr::null_mut(), + Some(Err(_)) => { // Timeout -- kill the process and return a timeout result. let _ = h.sandbox.kill(); let result = RunResult::timeout(); @@ -844,7 +877,10 @@ pub unsafe extern "C" fn sandlock_handle_port_mappings( ) -> *mut c_char { if h.is_null() { return ptr::null_mut(); } let h = &*h; - let map = h.runtime.block_on(h.sandbox.port_mappings()); + let map = match block_on_runtime(&h.runtime, h.sandbox.port_mappings()) { + Some(map) => map, + None => return ptr::null_mut(), + }; if map.is_empty() { return ptr::null_mut(); } let json = serde_json::to_string(&map).unwrap_or_default(); match CString::new(json) { @@ -887,17 +923,13 @@ pub unsafe extern "C" fn sandlock_run_interactive( let args = read_argv(argv, argc); let arg_refs: Vec<&str> = args.iter().map(|s| s.as_str()).collect(); - let rt = match tokio::runtime::Runtime::new() { - Ok(rt) => rt, - Err(_) => return -1, - }; let mut sb = match name { Some(ref n) => policy.clone().with_name(n.clone()), None => policy.clone(), }; - match rt.block_on(sb.run_interactive(&arg_refs)) { - Ok(result) => result.code().unwrap_or(-1), - Err(_) => -1, + match with_runtime(|rt| rt.block_on(sb.run_interactive(&arg_refs))) { + Some(Ok(result)) => result.code().unwrap_or(-1), + _ => -1, } } @@ -1039,17 +1071,13 @@ pub unsafe extern "C" fn sandlock_dry_run( let args = read_argv(argv, argc); let arg_refs: Vec<&str> = args.iter().map(|s| s.as_str()).collect(); - let rt = match tokio::runtime::Runtime::new() { - Ok(rt) => rt, - Err(_) => return ptr::null_mut(), - }; let mut sb = match name { Some(ref n) => policy.clone().with_name(n.clone()), None => policy.clone(), }; - match rt.block_on(sb.dry_run(&arg_refs)) { - Ok(result) => Box::into_raw(Box::new(sandlock_dry_run_result_t { _private: result })), - Err(_) => ptr::null_mut(), + match with_runtime(|rt| rt.block_on(sb.dry_run(&arg_refs))) { + Some(Ok(result)) => Box::into_raw(Box::new(sandlock_dry_run_result_t { _private: result })), + _ => ptr::null_mut(), } } @@ -1221,14 +1249,9 @@ pub unsafe extern "C" fn sandlock_pipeline_run( None }; - let rt = match tokio::runtime::Runtime::new() { - Ok(rt) => rt, - Err(_) => return ptr::null_mut(), - }; - - match rt.block_on(pipeline.run(timeout)) { - Ok(result) => Box::into_raw(Box::new(sandlock_result_t { _private: result })), - Err(_) => ptr::null_mut(), + match with_runtime(|rt| rt.block_on(pipeline.run(timeout))) { + Some(Ok(result)) => Box::into_raw(Box::new(sandlock_result_t { _private: result })), + _ => ptr::null_mut(), } } @@ -1327,14 +1350,9 @@ pub unsafe extern "C" fn sandlock_gather_run( None }; - let rt = match tokio::runtime::Runtime::new() { - Ok(rt) => rt, - Err(_) => return ptr::null_mut(), - }; - - match rt.block_on(gather.run(timeout)) { - Ok(result) => Box::into_raw(Box::new(sandlock_result_t { _private: result })), - Err(_) => ptr::null_mut(), + match with_runtime(|rt| rt.block_on(gather.run(timeout))) { + Some(Ok(result)) => Box::into_raw(Box::new(sandlock_result_t { _private: result })), + _ => ptr::null_mut(), } } @@ -1625,14 +1643,9 @@ pub unsafe extern "C" fn sandlock_fork( if sb.is_null() { return ptr::null_mut(); } let sb = &mut *sb; - let rt = match tokio::runtime::Runtime::new() { - Ok(rt) => rt, - Err(_) => return ptr::null_mut(), - }; - - match rt.block_on(sb.fork(n)) { - Ok(clones) => Box::into_raw(Box::new(sandlock_fork_result_t { clones })), - Err(_) => ptr::null_mut(), + match with_runtime(|rt| rt.block_on(sb.fork(n))) { + Some(Ok(clones)) => Box::into_raw(Box::new(sandlock_fork_result_t { clones })), + _ => ptr::null_mut(), } } @@ -1676,19 +1689,14 @@ pub unsafe extern "C" fn sandlock_reduce( let args = read_argv(argv, argc); let arg_refs: Vec<&str> = args.iter().map(|s| s.as_str()).collect(); - let rt = match tokio::runtime::Runtime::new() { - Ok(rt) => rt, - Err(_) => return ptr::null_mut(), - }; - let reducer = match name { Some(ref n) => policy.clone().with_name(n.clone()), None => policy.clone(), }; - match rt.block_on(reducer.reduce(&arg_refs, &mut fr.clones.as_mut_slice())) { - Ok(result) => Box::into_raw(Box::new(sandlock_result_t { _private: result })), - Err(_) => ptr::null_mut(), + match with_runtime(|rt| rt.block_on(reducer.reduce(&arg_refs, &mut fr.clones.as_mut_slice()))) { + Some(Ok(result)) => Box::into_raw(Box::new(sandlock_result_t { _private: result })), + _ => ptr::null_mut(), } } @@ -1707,14 +1715,9 @@ pub unsafe extern "C" fn sandlock_wait(sb: *mut Sandbox) -> c_int { if sb.is_null() { return -1; } let sb = &mut *sb; - let rt = match tokio::runtime::Runtime::new() { - Ok(rt) => rt, - Err(_) => return -1, - }; - - match rt.block_on(sb.wait()) { - Ok(r) => r.code().unwrap_or(-1), - Err(_) => -1, + match with_runtime(|rt| rt.block_on(sb.wait())) { + Some(Ok(r)) => r.code().unwrap_or(-1), + _ => -1, } } @@ -1749,9 +1752,9 @@ pub unsafe extern "C" fn sandlock_handle_checkpoint( ) -> *mut sandlock_checkpoint_t { if h.is_null() { return ptr::null_mut(); } let h = &mut *h; - match h.runtime.block_on(h.sandbox.checkpoint()) { - Ok(cp) => Box::into_raw(Box::new(sandlock_checkpoint_t { _private: cp })), - Err(_) => ptr::null_mut(), + match block_on_runtime(&h.runtime, h.sandbox.checkpoint()) { + Some(Ok(cp)) => Box::into_raw(Box::new(sandlock_checkpoint_t { _private: cp })), + _ => ptr::null_mut(), } } diff --git a/crates/sandlock-ffi/src/runtime.rs b/crates/sandlock-ffi/src/runtime.rs new file mode 100644 index 0000000..5aee30a --- /dev/null +++ b/crates/sandlock-ffi/src/runtime.rs @@ -0,0 +1,103 @@ +//! Thread-local Tokio runtime for FFI entry points. +//! +//! Each FFI thread lazily builds a single `current_thread` runtime on +//! first use and reuses it for all subsequent calls on that thread. We +//! prefer `current_thread` over `multi_thread` so the runtime does not +//! eagerly spawn worker threads at construction: that path fails when +//! the FFI is invoked from a multi-threaded host whose seccomp profile +//! blocks `clone3` (Kubernetes `RuntimeDefault` + multi-threaded +//! Python/uvicorn was the original report, issue #47). +//! +//! Live handles are the exception: once `sandlock_start` returns, their +//! supervisor tasks still need to progress between FFI calls, so they use +//! a small multi-thread runtime built by `build_live_runtime`. + +use std::cell::OnceCell; +use std::future::Future; +use std::io; +use std::panic::{catch_unwind, AssertUnwindSafe}; +use tokio::runtime::{Builder, Runtime}; + +thread_local! { + static RT: OnceCell = const { OnceCell::new() }; +} + +/// Build a fresh `current_thread` Tokio runtime. Logs the error to +/// stderr on failure so environment-incompatibility cases (e.g. seccomp +/// blocking a syscall the runtime needs at startup) surface to the +/// caller instead of being swallowed into a NULL pointer return. +pub(crate) fn build_runtime() -> Option { + match Builder::new_current_thread().enable_all().build() { + Ok(rt) => Some(rt), + Err(e) => { + log_build_error(&e); + None + } + } +} + +/// Build a runtime for live handles returned by `sandlock_create`. +/// +/// Unlike the shared `current_thread` runtime, this must keep spawned +/// supervisor tasks running after an FFI call returns; Tokio suspends +/// `current_thread` tasks whenever `block_on` exits. +pub(crate) fn build_live_runtime() -> Option { + match Builder::new_multi_thread() + .worker_threads(1) + .enable_all() + .build() + { + Ok(rt) => Some(rt), + Err(e) => { + log_build_error(&e); + None + } + } +} + +/// Drive `future` on a runtime, converting runtime panics into FFI-level +/// failure. This keeps panics such as Tokio blocking-pool startup +/// failures from unwinding across `extern "C"` entry points. +pub(crate) fn block_on_runtime(rt: &Runtime, future: impl Future) -> Option { + match catch_unwind(AssertUnwindSafe(|| rt.block_on(future))) { + Ok(result) => Some(result), + Err(_) => { + log_runtime_panic(); + None + } + } +} + +/// Run `f` with this thread's shared runtime. Returns `None` if the +/// runtime could not be built on first use. Panics are converted to +/// `None`, so this helper is suitable for `extern "C"` entry points. +pub(crate) fn with_runtime(f: impl FnOnce(&Runtime) -> R) -> Option { + match catch_unwind(AssertUnwindSafe(|| with_runtime_unwind(f))) { + Ok(result) => result, + Err(_) => { + log_runtime_panic(); + None + } + } +} + +/// Run `f` with this thread's shared runtime without catching panics. +/// Use this only from `extern "C-unwind"` entry points that intentionally +/// allow user callback panics to propagate. +pub(crate) fn with_runtime_unwind(f: impl FnOnce(&Runtime) -> R) -> Option { + RT.with(|cell| { + if cell.get().is_none() { + let rt = build_runtime()?; + let _ = cell.set(rt); + } + Some(f(cell.get().expect("runtime initialised above"))) + }) +} + +fn log_build_error(e: &io::Error) { + eprintln!("sandlock: failed to build tokio runtime: {e}"); +} + +fn log_runtime_panic() { + eprintln!("sandlock: tokio runtime panicked while driving an FFI call"); +} diff --git a/python/src/sandlock/_sdk.py b/python/src/sandlock/_sdk.py index 2659080..62ed124 100644 --- a/python/src/sandlock/_sdk.py +++ b/python/src/sandlock/_sdk.py @@ -232,6 +232,9 @@ def confine(policy: "PolicyDataclass") -> None: _lib.sandlock_create.restype = _c_handle_p _lib.sandlock_create.argtypes = [_c_policy_p, ctypes.c_char_p, ctypes.POINTER(ctypes.c_char_p), ctypes.c_uint] +_lib.sandlock_create_for_run.restype = _c_handle_p +_lib.sandlock_create_for_run.argtypes = [_c_policy_p, ctypes.c_char_p, ctypes.POINTER(ctypes.c_char_p), ctypes.c_uint] + _lib.sandlock_start.restype = ctypes.c_int _lib.sandlock_start.argtypes = [_c_handle_p] diff --git a/python/src/sandlock/sandbox.py b/python/src/sandlock/sandbox.py index 6efb578..e07c373 100644 --- a/python/src/sandlock/sandbox.py +++ b/python/src/sandlock/sandbox.py @@ -485,7 +485,10 @@ def run(self, cmd: Sequence[str], timeout: float | None = None): resolved_name = self._resolve_name() # Create (parked) so PID is available for pause/resume, then start. - self._handle = _lib.sandlock_create( + # The one-shot run path immediately drives wait on this same Python + # thread, so it can use the FFI current-thread runtime and avoid + # eager Tokio worker-thread creation. + self._handle = _lib.sandlock_create_for_run( native.ptr, _encode(resolved_name), argv, argc, ) if not self._handle: diff --git a/python/tests/test_sandbox.py b/python/tests/test_sandbox.py index 3813628..205a6c3 100644 --- a/python/tests/test_sandbox.py +++ b/python/tests/test_sandbox.py @@ -25,6 +25,17 @@ def _policy(**overrides): return Sandbox(**defaults) +def _join_threads_or_fail(threads, timeout: float): + deadline = time.monotonic() + timeout + for thread in threads: + thread.join(timeout=max(0.0, deadline - time.monotonic())) + + alive = [thread.name for thread in threads if thread.is_alive()] + assert not alive, ( + f"threads did not finish within {timeout:g}s: {', '.join(alive)}" + ) + + class TestSandboxRun: def test_simple_command(self): result = _policy().run(["echo", "hello"]) @@ -76,6 +87,126 @@ def test_fs_denied_blocks_read(self, tmp_dir): assert not result.success +class TestSandlockRunCAbiMultiThreaded: + """Regression for issue #47 covering only the C ABI ``sandlock_run`` path. + + Tests here invoke ``_lib.sandlock_run`` directly through ctypes from + multiple threads, then assert all calls succeed and produce the + expected output. The Python ``Sandbox.run()`` user-facing path is + covered by :class:`TestSandboxRunMultiThreaded` below; it uses + ``sandlock_create_for_run`` so the parked handle still exposes + PID/pause/resume during ``run()``. + + Note: these tests assert "concurrent multi-threaded callers do not + deadlock or corrupt each other"; they are not red-on-pristine + against a regression that re-introduces the eager multi-thread + worker-spawn pattern, because glibc transparently falls back from + ``clone3`` to ``clone(2)`` on an unrestricted dev box. The original + failure mode requires a host with ``clone3`` blocked by seccomp + (Kubernetes ``RuntimeDefault``). + """ + + @staticmethod + def _run_via_c_abi(name: str, cmd): + """Invoke ``sandlock_run`` directly, bypassing Python ``Sandbox.run``.""" + import ctypes + from sandlock._sdk import _lib, _make_argv, _read_result_bytes, Result + + sb = Sandbox(name=name, fs_readable=_PYTHON_READABLE) + native = sb._ensure_native() + argv, argc = _make_argv(list(cmd)) + name_b = name.encode("utf-8") + b"\x00" + + result_p = _lib.sandlock_run( + native.ptr, ctypes.c_char_p(name_b), argv, argc, + ) + if not result_p: + return Result(success=False, exit_code=-1, error="sandlock_run returned NULL") + + exit_code = _lib.sandlock_result_exit_code(result_p) + success = _lib.sandlock_result_success(result_p) + stdout = _read_result_bytes(result_p, _lib.sandlock_result_stdout_bytes) + stderr = _read_result_bytes(result_p, _lib.sandlock_result_stderr_bytes) + _lib.sandlock_result_free(result_p) + return Result( + success=bool(success), exit_code=exit_code, + stdout=stdout, stderr=stderr, + ) + + def test_concurrent_sandlock_run_from_many_threads(self): + N = 8 + results = [None] * N + errors = [None] * N + + def worker(i: int): + try: + results[i] = self._run_via_c_abi( + f"issue47-cabi-{i}", ["echo", f"hello from thread {i}"], + ) + except Exception as e: + errors[i] = e + + threads = [ + threading.Thread( + target=worker, + args=(i,), + name=f"issue47-cabi-{i}", + daemon=True, + ) + for i in range(N) + ] + for t in threads: + t.start() + _join_threads_or_fail(threads, timeout=30) + + for i in range(N): + assert errors[i] is None, f"thread {i} raised: {errors[i]}" + assert results[i] is not None, f"thread {i} produced no result" + assert results[i].success, ( + f"thread {i}: success=False exit={results[i].exit_code} " + f"error={results[i].error!r}" + ) + assert f"hello from thread {i}".encode() in results[i].stdout + + +class TestSandboxRunMultiThreaded: + """Regression for issue #47 on the Python user-facing ``Sandbox.run`` path.""" + + def test_concurrent_run_from_many_threads(self): + N = 8 + results = [None] * N + errors = [None] * N + + def worker(i: int): + try: + sb = Sandbox(name=f"issue47-python-{i}", fs_readable=_PYTHON_READABLE) + results[i] = sb.run(["echo", f"hello from thread {i}"]) + except Exception as e: + errors[i] = e + + threads = [ + threading.Thread( + target=worker, + args=(i,), + name=f"issue47-python-{i}", + daemon=True, + ) + for i in range(N) + ] + for t in threads: + t.start() + _join_threads_or_fail(threads, timeout=30) + + for i in range(N): + assert errors[i] is None, f"thread {i} raised: {errors[i]}" + assert results[i] is not None, f"thread {i} produced no result" + assert results[i].success, ( + f"thread {i}: success=False exit={results[i].exit_code} " + f"error={results[i].error!r}" + ) + assert f"hello from thread {i}".encode() in results[i].stdout + + class TestPortRemap: """Test transparent TCP port remapping."""