feat: remove blockchain depenency for context management and embed it into the node#2078
feat: remove blockchain depenency for context management and embed it into the node#2078
Conversation
There was a problem hiding this comment.
Cursor Bugbot has reviewed your changes and found 2 potential issues.
Autofix Details
Bugbot Autofix prepared fixes for both issues found in the latest run.
- ✅ Fixed: Join group context fails unconditionally under local governance
- Gated NEAR params and external client calls behind External governance mode; for Local governance, the context must already exist (replicated from peers).
- ✅ Fixed: Nonce allows replay of first op after deletion
- Added check to reject signed group ops for deleted/unknown groups by requiring group metadata to exist before processing ops.
Or push these changes by commenting:
@cursor push bee7e880b8
Preview (bee7e880b8)
diff --git a/crates/context/src/group_store.rs b/crates/context/src/group_store.rs
--- a/crates/context/src/group_store.rs
+++ b/crates/context/src/group_store.rs
@@ -15,13 +15,13 @@
AsKeyParts, ContextGroupRef, ContextIdentity, GroupAlias, GroupContextAlias,
GroupContextAllowlist, GroupContextIndex, GroupContextLastMigration,
GroupContextLastMigrationValue, GroupContextVisibility, GroupContextVisibilityValue,
- GroupDefaultCaps, GroupDefaultCapsValue, GroupDefaultVis, GroupDefaultVisValue, GroupLocalGovNonce,
- GroupMember, GroupMemberAlias, GroupMemberCapability, GroupMemberCapabilityValue, GroupMeta,
- GroupMetaValue, GroupSigningKey, GroupSigningKeyValue, GroupUpgradeKey, GroupUpgradeStatus,
- GroupUpgradeValue, GROUP_CONTEXT_ALLOWLIST_PREFIX, GROUP_CONTEXT_INDEX_PREFIX,
- GROUP_CONTEXT_LAST_MIGRATION_PREFIX, GROUP_CONTEXT_VISIBILITY_PREFIX,
- GROUP_MEMBER_ALIAS_PREFIX, GROUP_MEMBER_CAPABILITY_PREFIX, GROUP_MEMBER_PREFIX,
- GROUP_META_PREFIX, GROUP_SIGNING_KEY_PREFIX, GROUP_UPGRADE_PREFIX,
+ GroupDefaultCaps, GroupDefaultCapsValue, GroupDefaultVis, GroupDefaultVisValue,
+ GroupLocalGovNonce, GroupMember, GroupMemberAlias, GroupMemberCapability,
+ GroupMemberCapabilityValue, GroupMeta, GroupMetaValue, GroupSigningKey, GroupSigningKeyValue,
+ GroupUpgradeKey, GroupUpgradeStatus, GroupUpgradeValue, GROUP_CONTEXT_ALLOWLIST_PREFIX,
+ GROUP_CONTEXT_INDEX_PREFIX, GROUP_CONTEXT_LAST_MIGRATION_PREFIX,
+ GROUP_CONTEXT_VISIBILITY_PREFIX, GROUP_MEMBER_ALIAS_PREFIX, GROUP_MEMBER_CAPABILITY_PREFIX,
+ GROUP_MEMBER_PREFIX, GROUP_META_PREFIX, GROUP_SIGNING_KEY_PREFIX, GROUP_UPGRADE_PREFIX,
};
use calimero_store::Store;
use eyre::{bail, Result as EyreResult};
@@ -255,7 +255,8 @@
let inv_bytes = borsh::to_vec(inv).map_err(|e| eyre::eyre!("borsh: {e}"))?;
let inv_hash = Sha256::digest(&inv_bytes);
let inv_sig_hex = signed_invitation.inviter_signature.trim_start_matches("0x");
- let inv_sig = hex::decode(inv_sig_hex).map_err(|e| eyre::eyre!("inviter signature hex: {e}"))?;
+ let inv_sig =
+ hex::decode(inv_sig_hex).map_err(|e| eyre::eyre!("inviter signature hex: {e}"))?;
let inv_sig_bytes: [u8; 64] = inv_sig
.try_into()
.map_err(|_| eyre::eyre!("inviter signature must be 64 bytes"))?;
@@ -290,6 +291,14 @@
op.verify_signature()
.map_err(|e| eyre::eyre!("signed group op: {e}"))?;
let group_id = ContextGroupId::from(op.group_id);
+
+ // Reject ops for deleted/unknown groups to prevent replay attacks after deletion.
+ // After a group is deleted, nonce records are cleared, so without this check an
+ // old op with nonce=1 would pass since unwrap_or(0) makes last=0 and 1 > 0.
+ if load_group_meta(store, &group_id)?.is_none() {
+ bail!("group '{group_id:?}' does not exist or was deleted");
+ }
+
let last = get_local_gov_nonce(store, &group_id, &op.signer)?.unwrap_or(0);
if op.nonce <= last {
bail!(
@@ -315,7 +324,10 @@
ensure_not_last_admin_demotion(store, &group_id, member, role)?;
add_group_member(store, &group_id, member, role.clone())?;
}
- GroupOp::MemberCapabilitySet { member, capabilities } => {
+ GroupOp::MemberCapabilitySet {
+ member,
+ capabilities,
+ } => {
require_group_admin(store, &group_id, &op.signer)?;
set_member_capability(store, &group_id, member, *capabilities)?;
}
@@ -348,9 +360,7 @@
&op.signer,
MemberCapabilities::CAN_CREATE_CONTEXT,
)? {
- bail!(
- "only group admin or members with CAN_CREATE_CONTEXT can register a context"
- );
+ bail!("only group admin or members with CAN_CREATE_CONTEXT can register a context");
}
register_context_in_group(store, &group_id, context_id)?;
}
@@ -386,7 +396,10 @@
}
}
}
- GroupOp::ContextAllowlistReplaced { context_id, members } => {
+ GroupOp::ContextAllowlistReplaced {
+ context_id,
+ members,
+ } => {
let is_admin = is_group_admin(store, &group_id, &op.signer)?;
if !is_admin {
if let Some((_, creator_bytes)) =
@@ -1933,6 +1946,7 @@
let gid_bytes = gid.to_bytes();
let admin_sk = PrivateKey::random(&mut rng);
let admin_pk = admin_sk.public_key();
+ save_group_meta(&store, &gid, &test_meta()).unwrap();
add_group_member(&store, &gid, &admin_pk, GroupMemberRole::Admin).unwrap();
let member_pk = PrivateKey::random(&mut rng).public_key();
@@ -1951,14 +1965,8 @@
apply_local_signed_group_op(&store, &op1).unwrap();
assert!(check_group_membership(&store, &gid, &member_pk).unwrap());
- let op_dup_nonce = SignedGroupOp::sign(
- &admin_sk,
- gid_bytes,
- None,
- 1,
- GroupOp::Noop,
- )
- .unwrap();
+ let op_dup_nonce =
+ SignedGroupOp::sign(&admin_sk, gid_bytes, None, 1, GroupOp::Noop).unwrap();
assert!(apply_local_signed_group_op(&store, &op_dup_nonce).is_err());
let op2 = SignedGroupOp::sign(&admin_sk, gid_bytes, None, 2, GroupOp::Noop).unwrap();
@@ -2018,7 +2026,9 @@
.unwrap();
apply_local_signed_group_op(&store, &op).unwrap();
assert_eq!(
- get_member_alias(&store, &gid, &member_pk).unwrap().as_deref(),
+ get_member_alias(&store, &gid, &member_pk)
+ .unwrap()
+ .as_deref(),
Some("alice")
);
@@ -2049,7 +2059,9 @@
.unwrap();
apply_local_signed_group_op(&store, &admin_op).unwrap();
assert_eq!(
- get_member_alias(&store, &gid, &member_pk).unwrap().as_deref(),
+ get_member_alias(&store, &gid, &member_pk)
+ .unwrap()
+ .as_deref(),
Some("carol")
);
}
@@ -2088,9 +2100,7 @@
gid_bytes,
None,
1,
- GroupOp::ContextRegistered {
- context_id,
- },
+ GroupOp::ContextRegistered { context_id },
)
.unwrap();
apply_local_signed_group_op(&store, &op_reg).unwrap();
@@ -2110,7 +2120,9 @@
.unwrap();
apply_local_signed_group_op(&store, &op_alias).unwrap();
assert_eq!(
- get_context_alias(&store, &gid, &context_id).unwrap().as_deref(),
+ get_context_alias(&store, &gid, &context_id)
+ .unwrap()
+ .as_deref(),
Some("from-creator")
);
@@ -2148,7 +2160,9 @@
.unwrap();
apply_local_signed_group_op(&store, &op_admin).unwrap();
assert_eq!(
- get_context_alias(&store, &gid, &context_id).unwrap().as_deref(),
+ get_context_alias(&store, &gid, &context_id)
+ .unwrap()
+ .as_deref(),
Some("from-admin")
);
}
@@ -2185,7 +2199,9 @@
.unwrap();
apply_local_signed_group_op(&store, &op_caps).unwrap();
assert_eq!(
- get_member_capability(&store, &gid, &member_m).unwrap().unwrap(),
+ get_member_capability(&store, &gid, &member_m)
+ .unwrap()
+ .unwrap(),
0x7
);
@@ -2201,18 +2217,15 @@
.unwrap();
apply_local_signed_group_op(&store, &op_policy).unwrap();
assert_eq!(
- load_group_meta(&store, &gid).unwrap().unwrap().upgrade_policy,
+ load_group_meta(&store, &gid)
+ .unwrap()
+ .unwrap()
+ .upgrade_policy,
UpgradePolicy::Automatic
);
- let op_del = SignedGroupOp::sign(
- &admin_sk,
- gid_bytes,
- None,
- 3,
- GroupOp::GroupDelete,
- )
- .unwrap();
+ let op_del =
+ SignedGroupOp::sign(&admin_sk, gid_bytes, None, 3, GroupOp::GroupDelete).unwrap();
apply_local_signed_group_op(&store, &op_del).unwrap();
assert!(load_group_meta(&store, &gid).unwrap().is_none());
}
diff --git a/crates/context/src/handlers/join_group_context.rs b/crates/context/src/handlers/join_group_context.rs
--- a/crates/context/src/handlers/join_group_context.rs
+++ b/crates/context/src/handlers/join_group_context.rs
@@ -99,12 +99,14 @@
let context_client = self.context_client.clone();
let node_client = self.node_client.clone();
- let protocol = "near".to_owned();
- let params = match self.external_config.params.get("near") {
- Some(p) => p.clone(),
- None => {
- return ActorResponse::reply(Err(eyre::eyre!("no 'near' protocol config")));
- }
+ let near_params = match group_governance {
+ GroupGovernanceMode::External => match self.external_config.params.get("near") {
+ Some(p) => Some(p.clone()),
+ None => {
+ return ActorResponse::reply(Err(eyre::eyre!("no 'near' protocol config")));
+ }
+ },
+ GroupGovernanceMode::Local => None,
};
ActorResponse::r#async(
@@ -132,27 +134,45 @@
group_store::register_context_in_group(&datastore, &group_id, &context_id)?;
// Ensure we have context config locally.
- // If the context is unknown, build config from protocol params
- // and fetch the proxy contract so sync_context_config can
- // bootstrap the context from on-chain state.
- let config = if !context_client.has_context(&context_id)? {
- let mut external_config = ContextConfigParams {
- protocol: protocol.clone().into(),
- network_id: params.network.clone().into(),
- contract_id: params.contract_id.clone().into(),
- proxy_contract: "".into(),
- application_revision: 0,
- members_revision: 0,
- };
+ // For External governance: if the context is unknown, build config from protocol
+ // params and fetch the proxy contract so sync_context_config can bootstrap the
+ // context from on-chain state.
+ // For Local governance: the context must already exist (replicated from peers).
+ let config = match group_governance {
+ GroupGovernanceMode::External => {
+ if !context_client.has_context(&context_id)? {
+ let params = near_params.as_ref().ok_or_else(|| {
+ eyre::eyre!("near params required for external governance")
+ })?;
+ let mut external_config = ContextConfigParams {
+ protocol: "near".into(),
+ network_id: params.network.clone().into(),
+ contract_id: params.contract_id.clone().into(),
+ proxy_contract: "".into(),
+ application_revision: 0,
+ members_revision: 0,
+ };
- let external_client =
- context_client.external_client(&context_id, &external_config)?;
- let proxy_contract = external_client.config().get_proxy_contract().await?;
- external_config.proxy_contract = proxy_contract.into();
+ let external_client =
+ context_client.external_client(&context_id, &external_config)?;
+ let proxy_contract =
+ external_client.config().get_proxy_contract().await?;
+ external_config.proxy_contract = proxy_contract.into();
- Some(external_config)
- } else {
- None
+ Some(external_config)
+ } else {
+ None
+ }
+ }
+ GroupGovernanceMode::Local => {
+ if !context_client.has_context(&context_id)? {
+ bail!(
+ "context not found locally; wait for context state to replicate \
+ before joining (local governance)"
+ );
+ }
+ None
+ }
};
let _ignored = context_clientThis Bugbot Autofix run was free. To enable autofix for future PRs, go to the Cursor dashboard.
There was a problem hiding this comment.
🤖 AI Code Reviewer
Reviewed by 3 agents | Quality score: 93% | Review time: 418.1s
🔴 1 critical, 🟡 3 warnings, 💡 5 suggestions, 📝 1 nitpicks. See inline comments.
🤖 Generated by AI Code Reviewer | Review ID: review-af1768ac
There was a problem hiding this comment.
🤖 AI Code Reviewer
Reviewed by 3 agents | Quality score: 100% | Review time: 512.6s
🔴 1 critical, 🟡 1 warnings, 💡 4 suggestions, 📝 2 nitpicks. See inline comments.
🤖 Generated by AI Code Reviewer | Review ID: review-72efa11c
There was a problem hiding this comment.
🤖 AI Code Reviewer
Reviewed by 3 agents | Quality score: 100% | Review time: 451.2s
🟡 1 warnings. See inline comments.
🤖 Generated by AI Code Reviewer | Review ID: review-bb9544fc
There was a problem hiding this comment.
🤖 AI Code Reviewer
Reviewed by 3 agents | Quality score: 100% | Review time: 418.9s
🟡 2 warnings, 💡 4 suggestions, 📝 2 nitpicks. See inline comments.
🤖 Generated by AI Code Reviewer | Review ID: review-83a3e408
There was a problem hiding this comment.
🤖 AI Code Reviewer
Reviewed by 3 agents | Quality score: 63% | Review time: 376.4s
🟡 4 warnings, 💡 4 suggestions, 📝 1 nitpicks. See inline comments.
🤖 Generated by AI Code Reviewer | Review ID: review-ac13edff
| uses: actions/setup-python@v5 | ||
| with: | ||
| python-version: ${{ inputs.python-version }} | ||
|
|
There was a problem hiding this comment.
🟡 Unpinned git dependency in CI supply chain
Installing merobox from a branch name (feat/signed-open-invitation) without a pinned commit hash allows potential supply chain attacks if the branch is force-pushed with malicious code.
Suggested fix:
Pin to a specific commit hash: `pip install "merobox @ git+https://github.com/calimero-network/merobox.git@<commit-sha>"`
| echo "deb [signed-by=/usr/share/keyrings/merobox.gpg] https://calimero-network.github.io/merobox stable main" | sudo tee /etc/apt/sources.list.d/merobox.list | ||
| sudo apt update | ||
| sudo apt install -y merobox | ||
| run: pip install "merobox @ git+https://github.com/calimero-network/merobox.git@feat/signed-open-invitation" |
There was a problem hiding this comment.
🟡 Pinned git branch reference for merobox installation
Installing from feat/signed-open-invitation branch ties CI to an unpublished feature branch; this will break when the branch is merged or deleted.
Suggested fix:
Pin to a stable release tag or publish to PyPI before merging.
| } | ||
|
|
||
| let dag = self.get_or_create_group_dag(&group_id); | ||
| let mut dag_guard = dag.try_lock().expect("DAG lock uncontended at startup"); |
There was a problem hiding this comment.
🟡 Blocking try_lock().expect() in async startup
Using try_lock().expect() during DAG reload assumes no contention; if startup races with another operation, this will panic.
Suggested fix:
Replace `try_lock().expect()` with async `.lock().await` since this runs at startup within the actor context, or use `try_lock()` with proper error handling.
| @@ -0,0 +1,42 @@ | |||
| # Calimero Core | |||
There was a problem hiding this comment.
💡 New README added but AGENTS.md table references removed relayer crate
The new README correctly omits relayer, but the crates/AGENTS.md still lists relayer | mero-relayer in the Binary Crates table.
Suggested fix:
Update crates/AGENTS.md to remove the relayer entry from the Binary Crates table for consistency.
|
|
||
| ctx.run_interval(std::time::Duration::from_secs(30), move |_act, _ctx| { | ||
| let datastore = datastore.clone(); | ||
| let node_client = node_client.clone(); |
There was a problem hiding this comment.
💡 Heartbeat enumerates all groups every 30s without limit
enumerate_all_groups with usize::MAX scans the entire groups table every 30 seconds; for nodes with many groups this could cause latency spikes.
Suggested fix:
Consider batching heartbeat work across multiple intervals or adding a reasonable limit with pagination state.
| } | ||
| }; | ||
|
|
||
| for (group_id_bytes, _meta) in &groups { |
There was a problem hiding this comment.
💡 reload_group_dags reads entire op log with usize::MAX
At startup, read_op_log_after with usize::MAX loads all historical ops into memory before inserting into DAG; for groups with long histories this could cause memory pressure.
Suggested fix:
Consider streaming ops in batches or limiting the reload to recent ops if older ones are already applied to persistent state.
| let meta = load_group_meta(store, group_id)? | ||
| .ok_or_else(|| eyre::eyre!("group not found for state hash computation"))?; | ||
|
|
||
| let mut members = list_group_members(store, group_id, 0, usize::MAX)?; |
There was a problem hiding this comment.
💡 compute_group_state_hash loads all members without limit
list_group_members with usize::MAX plus sorting all members on every state hash computation could be slow for groups with many members.
Suggested fix:
Consider caching the state hash and invalidating on member changes, or using incremental hashing.
Made-with: Cursor
There was a problem hiding this comment.
Cursor Bugbot has reviewed your changes and found 1 potential issue.
Bugbot Autofix is OFF. To automatically fix reported issues with cloud agents, enable autofix in the Cursor dashboard.
| echo "deb [signed-by=/usr/share/keyrings/merobox.gpg] https://calimero-network.github.io/merobox stable main" | sudo tee /etc/apt/sources.list.d/merobox.list | ||
| sudo apt update | ||
| sudo apt install -y merobox | ||
| run: pip install "merobox @ git+https://github.com/calimero-network/merobox.git@feat/signed-open-invitation" |
There was a problem hiding this comment.
CI installs merobox from ephemeral feature branch
High Severity
The setup-merobox action now installs merobox via pip install from a git feature branch (feat/signed-open-invitation). Once that branch is merged or deleted in the merobox repository, every workflow that uses this action (e2e-rust-apps, e2e-rust-apps-release, xcall, etc.) will fail to install merobox and break CI entirely. The description also incorrectly says "Setup Python and install merobox from PyPI" when it actually installs from git.
There was a problem hiding this comment.
🤖 AI Code Reviewer
Reviewed by 3 agents | Quality score: 71% | Review time: 324.4s
🟡 3 warnings, 💡 1 suggestions. See inline comments.
🤖 Generated by AI Code Reviewer | Review ID: review-ea853594
| with: | ||
| python-version: ${{ inputs.python-version }} | ||
|
|
||
| - name: Install merobox from git |
There was a problem hiding this comment.
🟡 Installing from unstable feature branch in CI
Hardcoding feat/signed-open-invitation branch makes CI fragile—branch deletion or force-push will break builds.
Suggested fix:
Use a tagged release, commit SHA, or at minimum a stable branch like `main`. Consider pinning: `pip install 'merobox @ git+...@v1.2.3'` or `...@<commit-sha>`.
| python-version: ${{ inputs.python-version }} | ||
|
|
||
| - name: Install merobox from git | ||
| shell: bash |
There was a problem hiding this comment.
🟡 Unpinned git dependency in CI pipeline
Installing merobox from a feature branch (feat/signed-open-invitation) without commit pinning allows supply chain attacks if that branch is force-pushed.
Suggested fix:
Pin to a specific commit hash: `pip install "merobox @ git+https://github.com/calimero-network/merobox.git@<commit-sha>"`
| uses: actions/setup-python@v5 | ||
| with: | ||
| python-version: ${{ inputs.python-version }} | ||
|
|
There was a problem hiding this comment.
🟡 CI depends on unstable feature branch
Installing merobox from feat/signed-open-invitation branch rather than a stable release/tag; if that branch is force-pushed or deleted, CI will break.
Suggested fix:
Pin to a specific commit SHA or wait for the feature to be merged to main/released before updating CI.
| @@ -74,9 +74,6 @@ jobs: | |||
| - name: Setup merobox | |||
There was a problem hiding this comment.
💡 Removed contract download but no replacement test coverage for local governance
Removing Download Contracts step is correct for local governance, but the workflow doesn't add any validation that local governance initialization works correctly.
Suggested fix:
Consider adding a simple smoke test step that verifies local governance mode initializes properly (e.g., check config.toml output).
There was a problem hiding this comment.
Cursor Bugbot has reviewed your changes and found 1 potential issue.
Bugbot Autofix is OFF. To automatically fix reported issues with cloud agents, enable autofix in the Cursor dashboard.
| echo "deb [signed-by=/usr/share/keyrings/merobox.gpg] https://calimero-network.github.io/merobox stable main" | sudo tee /etc/apt/sources.list.d/merobox.list | ||
| sudo apt update | ||
| sudo apt install -y merobox | ||
| run: pip install "merobox @ git+https://github.com/calimero-network/merobox.git@feat/signed-open-invitation" |
There was a problem hiding this comment.
CI installs merobox from unpinned feature branch
Medium Severity
The setup-merobox action installs merobox from an unpinned feature branch (feat/signed-open-invitation) rather than a tagged release or pinned commit. If this branch is force-pushed, rebased, or deleted, all CI workflows that use this action (e2e-rust-apps, e2e-rust-apps-release) will break. Also, the action description says "from PyPI" but actually installs from git.
There was a problem hiding this comment.
🤖 AI Code Reviewer
Reviewed by 3 agents | Quality score: 62% | Review time: 546.5s
🟡 4 warnings, 💡 4 suggestions, 📝 1 nitpicks. See inline comments.
🤖 Generated by AI Code Reviewer | Review ID: review-e855df73
| uses: actions/setup-python@v5 | ||
| with: | ||
| python-version: ${{ inputs.python-version }} | ||
|
|
There was a problem hiding this comment.
🟡 Pinned to feature branch for CI dependency
Installing merobox from @feat/signed-open-invitation branch pins CI to an unreleased branch. If that branch is rebased or deleted, CI will fail.
Suggested fix:
Use a tagged release, a commit SHA, or ensure the branch is protected and stable before merge.
| with: | ||
| python-version: ${{ inputs.python-version }} | ||
|
|
||
| - name: Install merobox from git |
There was a problem hiding this comment.
🟡 pip install from unpinned git branch
Installing merobox from feat/signed-open-invitation branch via pip lacks version pinning and hash verification. A compromised branch could inject malicious code into CI.
Suggested fix:
Pin to a specific commit SHA instead of a branch name, e.g., `pip install "merobox @ git+https://...@COMMIT_SHA"`, or use a tagged release.
| require_group_admin(store, &group_id, &op.signer)?; | ||
| ensure_not_last_admin_removal(store, &group_id, member)?; | ||
|
|
||
| // Cascade-delete ContextIdentity entries for all contexts in this group. |
There was a problem hiding this comment.
🟡 Unbounded context enumeration during member removal
enumerate_group_contexts(store, &group_id, 0, usize::MAX) loads all contexts into memory during member removal cascade. For groups with many contexts, this is unbounded memory allocation.
Suggested fix:
Process contexts in batches (e.g., 1000 at a time) to bound memory usage, similar to the `delete_group_local_rows` batching pattern.
| @@ -66,29 +66,32 @@ impl Handler<AddGroupMembersRequest> for ContextManager { | |||
| .ok() | |||
| .flatten() | |||
| }); | |||
| let group_client_result = effective_signing_key.map(|sk| self.group_client(group_id, sk)); | |||
| let members = members.clone(); | |||
There was a problem hiding this comment.
🟡 Sequential async calls for batch member additions
Adding N members makes N sequential sign_apply_and_publish calls, each awaiting network I/O. This is O(N) latency-wise when a batched approach could reduce round-trips.
Suggested fix:
Consider batching member additions into a single signed op with multiple members, or using `futures::future::join_all` for parallel publishing after sequential signing.
| let _ = remove_all_member_context_joins(store, group_id, pk); | ||
| } | ||
|
|
||
| loop { |
There was a problem hiding this comment.
💡 Repeated list_group_members calls in deletion loop
The loop calls list_group_members repeatedly with offset 0 until empty, which re-scans from the beginning each iteration. The scan work is O(N) per batch.
Suggested fix:
Use an iterator-based approach that tracks position, or delete all members after collecting them once.
|
|
||
| for (group_id_bytes, _meta) in &groups { | ||
| let group_id = ContextGroupId::from(*group_id_bytes); | ||
| let entries = |
There was a problem hiding this comment.
💡 Unbounded op log read during DAG reload
read_op_log_after(&self.datastore, &group_id, 0, usize::MAX) loads entire op history into memory at startup. Long-running groups may have large op logs.
Suggested fix:
Consider streaming ops or loading in bounded batches, rebuilding DAG incrementally.
|
|
||
| let Some(external_config) = self.context_config(context_id)? else { | ||
| return Ok(None); | ||
| let secret_salt = { |
There was a problem hiding this comment.
💡 System time used for invitation expiration
Using SystemTime::now() for invitation expiration is susceptible to clock skew between nodes. An attacker on a node with skewed time could exploit expired or prematurely-valid invitations.
Suggested fix:
Consider using a monotonic clock or block height for stricter expiration enforcement, or document the expected tolerance for clock drift.
| let datastore = self.datastore.clone(); | ||
| let node_client = self.node_client.clone(); | ||
|
|
||
| ctx.run_interval(std::time::Duration::from_secs(30), move |_act, _ctx| { |
There was a problem hiding this comment.
💡 Heartbeat enumerates all groups every 30 seconds
enumerate_all_groups(&datastore, 0, usize::MAX) in the heartbeat interval scans all groups every 30s. For deployments with many groups, this adds recurring I/O overhead.
Suggested fix:
Consider caching group list with invalidation on mutations, or using a streaming/cursor-based approach.
| @@ -1,16 +1,23 @@ | |||
| name: Setup Merobox | |||
| description: "Install merobox from APT repository (Linux only)" | |||
| description: "Setup Python and install merobox from PyPI" | |||
There was a problem hiding this comment.
📝 Nit: Description says PyPI but installs from git
The description states 'install merobox from PyPI' but the step actually installs from a git repository.
Suggested fix:
Update description to 'Setup Python and install merobox from git'.



Summary
Implements local group governance (signed gossip →
group_store) withgroup_governance = local, and makes the context client relayer signer optional somerod init --group-governance localdoes not write a relayer URL.Highlights
ClientSigner.relayerisOption;Option<RelayerTransport>implementsTransport.--group-governance localomits NEAR protocol blocks and relayer in generated config.calimero-context-configunit test (--features client); CI step;merodintegration tests for local vs external init.R3 roadmap
See §11.6 in
docs/context-management/LOCAL-GROUP-GOVERNANCE.mdfor the next passes (parity sign-off,cargo treeaudit, minimal/no-chain features, downstream inventory, CI guardrail).Checklist for reviewers: CI green; spot-check
merod initlocal vs externalconfig.toml.Made with Cursor
Note
Medium Risk
Moderate risk because it removes NEAR/relayer-related crates, apps, and CI workflows, which can break downstream builds/releases and any remaining code paths that assumed blockchain-backed context management.
Overview
Shifts context management away from blockchain/relayer plumbing by removing NEAR-related dependencies/crates/apps (including
mero-relayer) and pruning workspace members/features accordingly (plus corresponding lockfile updates).CI and release automation are updated to match: blockchain E2E workflows and relayer container publishing/cleanup are removed, Rust CI adds targeted
cargo checkcoverage (includingmerod --no-default-features), and Merobox setup is switched from an APT install to Python +pipinstall from a pinned git ref. Documentation/UX is refreshed with a new repoREADME.md, an updatedCHANGELOG.md, and a new GitHub Pages workflow that deploys thearchitecture/site.Written by Cursor Bugbot for commit 9e7cae3. This will update automatically on new commits. Configure here.