+ Re-import private-upload datamaps that exist on disk but are no longer in your upload history
+ (e.g. after clearing history or reinstalling the app).
+
+
+
+
+
+
+
+ No orphaned datamaps found.
+
+
+
+
+
+
+
{{ orphan.suggested_name }}
+
+ {{ orphan.path }}
+
+
+
+ {{ formatShortDate(orphan.modified_at) }}
+
+
+
+
+
+
+
+
+
@@ -383,6 +437,7 @@ import { isValidEthAddress } from '~/utils/validators'
import { useToastStore } from '~/stores/toasts'
import { useErrorLogStore } from '~/stores/errorlog'
import { useUpdaterStore } from '~/stores/updater'
+import { useFilesStore, type UploadHistoryEntry } from '~/stores/files'
const settingsStore = useSettingsStore()
const walletStore = useWalletStore()
@@ -390,6 +445,7 @@ const nodesStore = useNodesStore()
const toasts = useToastStore()
const errorLogStore = useErrorLogStore()
const updaterStore = useUpdaterStore()
+const filesStore = useFilesStore()
const showAdvanced = ref(false)
const showLog = ref(false)
const appVersion = ref('0.1.0')
@@ -619,5 +675,133 @@ function clearLog() {
toasts.add('Log cleared', 'info')
}
+// ── Rescue Datamaps (V2-195) ──
+
+interface OrphanDatamap {
+ path: string
+ suggested_name: string
+ modified_at: string
+}
+
+const rescueScanning = ref(false)
+const rescueScanned = ref(false)
+const rescueImporting = ref(false)
+const orphanDatamaps = ref([])
+
+async function scanOrphans() {
+ rescueScanning.value = true
+ try {
+ if (!filesStore.historyLoaded) {
+ await filesStore.loadHistory()
+ }
+ const knownPaths = filesStore.files
+ .filter(f => f.kind === 'upload' && f.data_map_file)
+ .map(f => f.data_map_file!)
+ orphanDatamaps.value = await invoke('scan_orphan_datamaps', {
+ knownPaths,
+ })
+ rescueScanned.value = true
+ } catch (e: any) {
+ toasts.add(`Scan failed: ${e.message ?? e}`, 'error')
+ } finally {
+ rescueScanning.value = false
+ }
+}
+
+async function importOrphans() {
+ rescueImporting.value = true
+ try {
+ const newEntries: UploadHistoryEntry[] = []
+ for (const orphan of orphanDatamaps.value) {
+ // Read the datamap JSON so we can compute its network address. Without
+ // the address the history row can't participate in re-download flows.
+ let json: string
+ try {
+ json = await invoke('read_datamap_file', { path: orphan.path })
+ } catch {
+ // Skip datamaps we can't read — they stay as orphans for the user
+ // to re-scan later once they've fixed permissions / disk issues.
+ continue
+ }
+ const address = await sha256Hex(json)
+ newEntries.push({
+ name: orphan.suggested_name,
+ size_bytes: 0,
+ address,
+ cost: null,
+ uploaded_at: orphan.modified_at,
+ data_map_file: orphan.path,
+ })
+ }
+
+ // Append, skipping any address already in history (shouldn't happen since
+ // we filtered by known path, but a computed address could coincidentally
+ // collide with an address we already have from some other path).
+ const existingAddrs = new Set(
+ filesStore.files
+ .filter(f => f.kind === 'upload' && f.address)
+ .map(f => f.address!.toLowerCase()),
+ )
+ const toImport = newEntries.filter(e => !existingAddrs.has(e.address.toLowerCase()))
+
+ if (toImport.length === 0) {
+ toasts.add('No new datamaps to import', 'info')
+ orphanDatamaps.value = []
+ rescueScanned.value = false
+ return
+ }
+
+ // Build the full entries list (existing history + new) and persist.
+ const fullEntries: UploadHistoryEntry[] = [
+ ...filesStore.files
+ .filter(f => f.kind === 'upload' && f.status === 'complete' && f.address)
+ .map(f => ({
+ name: f.name,
+ size_bytes: f.size_bytes,
+ address: f.address!,
+ cost: f.cost ?? null,
+ uploaded_at: f.date,
+ data_map_file: f.data_map_file ?? null,
+ })),
+ ...toImport,
+ ]
+ await invoke('save_upload_history', { entries: fullEntries })
+
+ // Refresh the store so the Files page picks them up immediately.
+ filesStore.historyLoaded = false
+ filesStore.files = filesStore.files.filter(f => f.kind !== 'upload' || f.status !== 'complete')
+ await filesStore.loadHistory()
+
+ toasts.add(`Imported ${toImport.length} datamap${toImport.length === 1 ? '' : 's'}`, 'success')
+ orphanDatamaps.value = []
+ rescueScanned.value = false
+ } catch (e: any) {
+ toasts.add(`Import failed: ${e.message ?? e}`, 'error')
+ } finally {
+ rescueImporting.value = false
+ }
+}
+
+async function sha256Hex(text: string): Promise {
+ const bytes = new TextEncoder().encode(text)
+ const digest = await crypto.subtle.digest('SHA-256', bytes)
+ const hex = Array.from(new Uint8Array(digest))
+ .map(b => b.toString(16).padStart(2, '0'))
+ .join('')
+ return `0x${hex}`
+}
+
+function formatShortDate(iso: string): string {
+ try {
+ return new Date(iso).toLocaleDateString(undefined, {
+ month: 'short',
+ day: 'numeric',
+ hour: '2-digit',
+ minute: '2-digit',
+ })
+ } catch {
+ return iso
+ }
+}
diff --git a/src-tauri/src/config.rs b/src-tauri/src/config.rs
index d31ac2f..76daf5e 100644
--- a/src-tauri/src/config.rs
+++ b/src-tauri/src/config.rs
@@ -117,6 +117,111 @@ pub(crate) fn config_path() -> PathBuf {
.join("ant-gui")
}
+#[derive(Debug, Clone, Serialize, Deserialize)]
+pub struct OrphanDatamap {
+ /// Absolute path to the .datamap file on disk.
+ pub path: String,
+ /// Basename with the `.datamap` extension stripped — the original upload's
+ /// filename stem. Shown to the user to jog their memory about which file
+ /// this datamap belongs to.
+ pub suggested_name: String,
+ /// File modification time as an ISO-8601 string (UTC). Proxy for "when
+ /// you uploaded this" when the real upload timestamp is gone from history.
+ pub modified_at: String,
+}
+
+/// List `.datamap` files in the app config directory that aren't referenced by
+/// any entry in `known_paths`. Used by the Settings → Advanced rescue flow to
+/// surface datamaps orphaned by a wiped `upload_history.json` so the user can
+/// re-import them and resume downloading.
+pub fn scan_orphan_datamaps(known_paths: &[String]) -> Result, String> {
+ let dir = config_path();
+ if !dir.exists() {
+ return Ok(Vec::new());
+ }
+
+ let known: std::collections::HashSet = known_paths
+ .iter()
+ .filter_map(|p| std::fs::canonicalize(p).ok())
+ .collect();
+
+ let entries = std::fs::read_dir(&dir).map_err(|e| format!("Failed to read config dir: {e}"))?;
+
+ let mut orphans = Vec::new();
+ for entry in entries.flatten() {
+ let path = entry.path();
+ if path.extension().and_then(|s| s.to_str()) != Some(DATAMAP_EXTENSION) {
+ continue;
+ }
+ let canonical = match std::fs::canonicalize(&path) {
+ Ok(p) => p,
+ Err(_) => continue,
+ };
+ if known.contains(&canonical) {
+ continue;
+ }
+
+ let suggested_name = path
+ .file_stem()
+ .map(|s| s.to_string_lossy().to_string())
+ .unwrap_or_else(|| "unknown".to_string());
+
+ let modified_at = entry
+ .metadata()
+ .ok()
+ .and_then(|m| m.modified().ok())
+ .and_then(|t| t.duration_since(std::time::UNIX_EPOCH).ok())
+ .map(|d| {
+ // Format as an ISO-8601 UTC string without pulling in chrono —
+ // just enough precision for a display label.
+ let secs = d.as_secs() as i64;
+ let nanos = d.subsec_nanos();
+ format_iso_utc(secs, nanos)
+ })
+ .unwrap_or_default();
+
+ orphans.push(OrphanDatamap {
+ path: canonical.to_string_lossy().into_owned(),
+ suggested_name,
+ modified_at,
+ });
+ }
+
+ Ok(orphans)
+}
+
+/// Minimal ISO-8601 UTC formatter. Avoids a chrono dep just for a label.
+fn format_iso_utc(secs: i64, nanos: u32) -> String {
+ let (y, mo, d, h, mi, s) = epoch_to_ymdhms(secs);
+ format!(
+ "{y:04}-{mo:02}-{d:02}T{h:02}:{mi:02}:{s:02}.{:03}Z",
+ nanos / 1_000_000
+ )
+}
+
+/// Convert unix epoch seconds to (year, month, day, hour, minute, second)
+/// in UTC. Implements the civil-from-days algorithm so we don't pull in chrono.
+fn epoch_to_ymdhms(secs: i64) -> (i32, u8, u8, u8, u8, u8) {
+ let days = secs.div_euclid(86_400);
+ let seconds_of_day = secs.rem_euclid(86_400) as u32;
+ let h = (seconds_of_day / 3600) as u8;
+ let mi = ((seconds_of_day % 3600) / 60) as u8;
+ let s = (seconds_of_day % 60) as u8;
+
+ // Howard Hinnant's days_from_civil inverse.
+ let z = days + 719_468;
+ let era = if z >= 0 { z } else { z - 146_096 } / 146_097;
+ let doe = (z - era * 146_097) as u32;
+ let yoe = (doe - doe / 1460 + doe / 36_524 - doe / 146_096) / 365;
+ let y = (yoe as i32) + era as i32 * 400;
+ let doy = doe - (365 * yoe + yoe / 4 - yoe / 100);
+ let mp = (5 * doy + 2) / 153;
+ let d = (doy - (153 * mp + 2) / 5 + 1) as u8;
+ let mo = (if mp < 10 { mp + 3 } else { mp - 9 }) as u8;
+ let y = if mo <= 2 { y + 1 } else { y };
+ (y, mo, d, h, mi, s)
+}
+
/// Resolve the OS-appropriate default downloads directory. Returns
/// `~/Downloads` on macOS/Linux and `C:\Users\\Downloads` on Windows,
/// falling back to `/Downloads` if the platform-specific lookup fails.
diff --git a/src-tauri/src/lib.rs b/src-tauri/src/lib.rs
index 3825843..2fe41d3 100644
--- a/src-tauri/src/lib.rs
+++ b/src-tauri/src/lib.rs
@@ -2,7 +2,7 @@ mod autonomi_ops;
mod config;
use autonomi_ops::AutonomiState;
-use config::{AppConfig, FileMetaResult, UploadHistory, UploadHistoryEntry};
+use config::{AppConfig, FileMetaResult, OrphanDatamap, UploadHistory, UploadHistoryEntry};
use std::path::PathBuf;
use std::sync::Arc;
use tokio::sync::{watch, RwLock};
@@ -572,6 +572,11 @@ fn save_upload_history(entries: Vec) -> Result<(), String> {
history.save().map_err(|e| e.to_string())
}
+#[tauri::command]
+fn scan_orphan_datamaps(known_paths: Vec) -> Result, String> {
+ config::scan_orphan_datamaps(&known_paths)
+}
+
pub fn run() {
// Pipe ant-core / ant-node tracing events to stderr so the dev console
// surfaces upload progress (encrypt → quote → store → finalize). Without
@@ -613,6 +618,7 @@ pub fn run() {
read_file_bytes,
load_upload_history,
save_upload_history,
+ scan_orphan_datamaps,
discover_daemon_url,
ensure_daemon_running,
connect_daemon_sse,