From ef9b316f5a0a2e7dd4aef09a50e7305690566ce5 Mon Sep 17 00:00:00 2001 From: idimov-keeper <78815270+idimov-keeper@users.noreply.github.com> Date: Thu, 26 Feb 2026 22:09:23 -0600 Subject: [PATCH 01/15] Added inspect_resource_in_graph functionality (#1841) Bugfix: jit/ai encryption settings --- .../commands/pam_import/keeper_ai_settings.py | 187 +++++++++++++----- 1 file changed, 137 insertions(+), 50 deletions(-) diff --git a/keepercommander/commands/pam_import/keeper_ai_settings.py b/keepercommander/commands/pam_import/keeper_ai_settings.py index 9fbd30b9a..d787f9a41 100644 --- a/keepercommander/commands/pam_import/keeper_ai_settings.py +++ b/keepercommander/commands/pam_import/keeper_ai_settings.py @@ -20,6 +20,7 @@ from ... import vault from ...display import bcolors from ..tunnel.port_forward.tunnel_helpers import get_config_uid, generate_random_bytes, get_keeper_tokens +from ...keeper_dag.crypto import encrypt_aes def list_resource_data_edges( @@ -467,27 +468,6 @@ def set_resource_keeper_ai_settings( logging.warning(f"Resource vertex {resource_uid} not found in DAG") return False - # Ensure the vertex keychain has the record key for encryption - # The DAG save will use vertex.key (first key in keychain) to encrypt DATA edges - if not resource_vertex.keychain or len(resource_vertex.keychain) == 0: - resource_vertex.keychain = [record_key] - else: - # Ensure record key is in keychain (prepend it so it's the first/primary key) - keychain = resource_vertex.keychain - if record_key not in keychain: - keychain.insert(0, record_key) - resource_vertex.keychain = keychain - - # Ensure there is a KEY edge so DATA edges can be added/encrypted. - # Prefer existing parent vertices; fall back to root if none exist. - if not resource_vertex.has_key: - parent_vertices = resource_vertex.belongs_to_vertices() - if parent_vertices: - resource_vertex.belongs_to(parent_vertices[0], edge_type=EdgeType.KEY) - else: - resource_vertex.belongs_to_root(EdgeType.KEY) - logging.debug(f"Added KEY edge for resource {resource_uid} to enable DATA encryption") - # Find and deactivate existing 'ai_settings' edge for proper versioning existing_edge = None for edge in resource_vertex.edges: @@ -502,12 +482,16 @@ def set_resource_keeper_ai_settings( existing_edge.active = False logging.debug(f"Deactivated existing 'ai_settings' edge (version {existing_edge.version})") - # Add new DATA edge with the settings - # The DAG will automatically encrypt it on save using vertex.key (record key) + # Pre-encrypt content with record key (matches Web Vault: encrypted=True, needs_encryption=False) + content_bytes = json.dumps(settings).encode() + encrypted_content = encrypt_aes(content_bytes, record_key) + + # Add new DATA edge with pre-encrypted content resource_vertex.add_data( - content=settings, # Will be serialized to JSON and encrypted on save + content=encrypted_content, path='ai_settings', - needs_encryption=True, + needs_encryption=False, + is_encrypted=True, modified=True ) @@ -609,27 +593,6 @@ def set_resource_jit_settings( logging.warning(f"Resource vertex {resource_uid} not found in DAG") return False - # Ensure the vertex keychain has the record key for encryption - # The DAG save will use vertex.key (first key in keychain) to encrypt DATA edges - if not resource_vertex.keychain or len(resource_vertex.keychain) == 0: - resource_vertex.keychain = [record_key] - else: - # Ensure record key is in keychain (prepend it so it's the first/primary key) - keychain = resource_vertex.keychain - if record_key not in keychain: - keychain.insert(0, record_key) - resource_vertex.keychain = keychain - - # Ensure there is a KEY edge so DATA edges can be added/encrypted. - # Prefer existing parent vertices; fall back to root if none exist. - if not resource_vertex.has_key: - parent_vertices = resource_vertex.belongs_to_vertices() - if parent_vertices: - resource_vertex.belongs_to(parent_vertices[0], edge_type=EdgeType.KEY) - else: - resource_vertex.belongs_to_root(EdgeType.KEY) - logging.debug(f"Added KEY edge for resource {resource_uid} to enable DATA encryption") - # Find and deactivate existing 'jit_settings' edge for proper versioning existing_edge = None for edge in resource_vertex.edges: @@ -644,12 +607,16 @@ def set_resource_jit_settings( existing_edge.active = False logging.debug(f"Deactivated existing 'jit_settings' edge (version {existing_edge.version})") - # Add new DATA edge with the settings - # The DAG will automatically encrypt it on save using vertex.key (record key) + # Pre-encrypt content with record key (matches Web Vault: encrypted=True, needs_encryption=False) + content_bytes = json.dumps(settings).encode() + encrypted_content = encrypt_aes(content_bytes, record_key) + + # Add new DATA edge with pre-encrypted content resource_vertex.add_data( - content=settings, # Will be serialized to JSON and encrypted on save + content=encrypted_content, path='jit_settings', - needs_encryption=True, + needs_encryption=False, + is_encrypted=True, modified=True ) @@ -844,3 +811,123 @@ def print_keeper_ai_settings(params: KeeperParams, resource_uid: str, config_uid print(f" - {tag_name}") print() + + +def inspect_resource_in_graph( + params: KeeperParams, + resource_uid: str, + config_uid: Optional[str] = None, + show_raw_content: bool = False +) -> Dict[str, Any]: + """ + Inspect all graph edges and vertices referencing a record UID. + Returns edges (tail->head), vertices (UIDs), and DATA edges grouped by path with all versions. + + Args: + params: KeeperParams instance + resource_uid: UID of the PAM resource + config_uid: Optional PAM config UID + show_raw_content: If True, load DAG with decrypt=False and include raw stored content + (encrypted bytes) in data_by_path. Use this to see what's actually stored without + auto-decrypt skewing the picture. + + Returns: + { + "edges": [{"type": str, "tail": str, "head": str, "path": str|None, "version": int, "active": bool}, ...], + "vertices": [uid, ...], + "data_by_path": {"path_name": [{"version": int, "active": bool, "has_content": bool, + "raw_content_len"?: int, "raw_content_preview"?: str}, ...], ...} + } + """ + result: Dict[str, Any] = {"edges": [], "vertices": [], "data_by_path": {}} + try: + vault.KeeperRecord.load(params, resource_uid) + record_key = params.record_cache.get(resource_uid, {}).get('record_key_unencrypted') + if not record_key: + logging.warning(f"Record key not available for {resource_uid}") + return result + + if not config_uid: + encrypted_session_token, encrypted_transmission_key, transmission_key = get_keeper_tokens(params) + config_uid = get_config_uid(params, encrypted_session_token, encrypted_transmission_key, resource_uid) + if not config_uid: + config_uid = resource_uid + + vault.KeeperRecord.load(params, config_uid) + config_record_key = params.record_cache.get(config_uid, {}).get('record_key_unencrypted') + if not config_record_key: + logging.warning(f"Config record key not available for {config_uid}") + return result + + encrypted_session_token, encrypted_transmission_key, transmission_key = get_keeper_tokens(params) + dag_record = PasswordRecord() + dag_record.record_uid = config_uid + dag_record.record_key = config_record_key + + conn = Connection( + params=params, + encrypted_transmission_key=encrypted_transmission_key, + encrypted_session_token=encrypted_session_token, + transmission_key=transmission_key, + use_write_protobuf=True + ) + linking_dag = DAG( + conn=conn, + record=dag_record, + graph_id=0, + write_endpoint=PamEndpoints.PAM, + decrypt=not show_raw_content + ) + linking_dag.load() + + # 1) All edges referencing record_uid (tail==ruid or head_uid==ruid) + edge_records = [] + vertex_uids = {resource_uid} + + for vertex in linking_dag.all_vertices: + tail_uid = vertex.uid + for edge in (vertex.edges or []): + if not edge: + continue + head_uid = edge.head_uid + if tail_uid != resource_uid and head_uid != resource_uid: + continue + vertex_uids.add(tail_uid) + vertex_uids.add(head_uid) + edge_records.append({ + "type": edge.edge_type.value if hasattr(edge.edge_type, 'value') else str(edge.edge_type), + "tail": tail_uid, + "head": head_uid, + "path": edge.path, + "version": getattr(edge, 'version', 0), + "active": getattr(edge, 'active', True), + }) + if edge.edge_type == EdgeType.DATA: + path_key = edge.path or "(no path)" + if path_key not in result["data_by_path"]: + result["data_by_path"][path_key] = [] + entry = { + "version": getattr(edge, 'version', 0), + "active": getattr(edge, 'active', True), + "has_content": edge.content is not None, + } + if show_raw_content and edge.content is not None: + raw = edge.content + if isinstance(raw, bytes): + entry["raw_content_len"] = len(raw) + # First 64 bytes as hex for encrypted blob preview + entry["raw_content_preview"] = raw[:64].hex() + else: + s = str(raw) + entry["raw_content_len"] = len(s) + entry["raw_content_preview"] = s[:128] + ("..." if len(s) > 128 else "") + result["data_by_path"][path_key].append(entry) + + result["edges"] = edge_records + result["vertices"] = sorted(vertex_uids) + return result + + except Exception as e: + logging.error(f"Error inspecting graph for {resource_uid}: {e}", exc_info=True) + result["error"] = str(e) + return result From 24651ff86f9b0a75b83b5fd791bf0b5f0efcd349 Mon Sep 17 00:00:00 2001 From: idimov-keeper <78815270+idimov-keeper@users.noreply.github.com> Date: Wed, 4 Mar 2026 17:35:11 -0600 Subject: [PATCH 02/15] Added new pam action debug command that dumps all record related data (JSON) (#1843) --- keepercommander/commands/discoveryrotation.py | 7 +- keepercommander/commands/pam_debug/dump.py | 368 ++++++++++++++++++ 2 files changed, 373 insertions(+), 2 deletions(-) create mode 100644 keepercommander/commands/pam_debug/dump.py diff --git a/keepercommander/commands/discoveryrotation.py b/keepercommander/commands/discoveryrotation.py index 627099419..b4db65add 100644 --- a/keepercommander/commands/discoveryrotation.py +++ b/keepercommander/commands/discoveryrotation.py @@ -67,11 +67,12 @@ from .discover.rule_remove import PAMGatewayActionDiscoverRuleRemoveCommand from .discover.rule_update import PAMGatewayActionDiscoverRuleUpdateCommand from .pam_debug.acl import PAMDebugACLCommand +from .pam_debug.dump import PAMDebugDumpCommand +from .pam_debug.gateway import PAMDebugGatewayCommand from .pam_debug.graph import PAMDebugGraphCommand from .pam_debug.info import PAMDebugInfoCommand -from .pam_debug.gateway import PAMDebugGatewayCommand -from .pam_debug.rotation_setting import PAMDebugRotationSettingsCommand from .pam_debug.link import PAMDebugLinkCommand +from .pam_debug.rotation_setting import PAMDebugRotationSettingsCommand from .pam_debug.vertex import PAMDebugVertexCommand from .pam_import.commands import PAMProjectCommand from .pam_launch.launch import PAMLaunchCommand @@ -310,6 +311,8 @@ def __init__(self): 'Create/reset rotation settings', 'rs') self.register_command('vertex', PAMDebugVertexCommand(), 'Debug a graph vertex', 'v') + self.register_command('dump', PAMDebugDumpCommand(), + 'Dump folder records data and GraphSync to JSON', 'd') class PAMLegacyCommand(Command): diff --git a/keepercommander/commands/pam_debug/dump.py b/keepercommander/commands/pam_debug/dump.py new file mode 100644 index 000000000..424d1b7b7 --- /dev/null +++ b/keepercommander/commands/pam_debug/dump.py @@ -0,0 +1,368 @@ +from __future__ import annotations +import argparse +import base64 +import datetime +import json +import logging +import pathlib +from typing import Dict, List, Optional, Tuple, TYPE_CHECKING + +from ..base import Command, FolderMixin +from ...subfolder import get_folder_uids +from ... import vault, api +from ...keeper_dag import DAG, EdgeType +from ...keeper_dag.types import PamGraphId +from ..pam_import.keeper_ai_settings import get_resource_settings +from ...keeper_dag.crypto import decrypt_aes +from . import get_connection + +if TYPE_CHECKING: + from ...params import KeeperParams + from ...keeper_dag.dag import DAG as DAGType + + +ALL_GRAPH_IDS = [g.value for g in PamGraphId] + +# DELETION means the edge is absent; UNDENIAL cancels a DENIAL (treated as absent) +_EXCLUDE_EDGE_TYPES = frozenset({EdgeType.DELETION, EdgeType.UNDENIAL}) + + +class PAMDebugDumpCommand(Command): + parser = argparse.ArgumentParser(prog='pam action debug dump') + parser.add_argument('folder_uid', action='store', + help='Folder UID or path. Use empty string for the root folder.') + parser.add_argument('--recursive', '-r', required=False, dest='recursive', action='store_true', + help='Include records in all subfolders.') + parser.add_argument('--save-as', '-s', required=True, dest='save_as', action='store', + help='Output file path to save JSON results.') + + def get_parser(self): + return PAMDebugDumpCommand.parser + + def execute(self, params: 'KeeperParams', **kwargs): + folder_uid_arg = kwargs.get('folder_uid', '') + recursive = kwargs.get('recursive', False) + save_as = kwargs.get('save_as') + + def _write_result(data: list) -> None: + p = pathlib.Path(save_as) + if p.exists(): + counter = 1 + while True: + candidate = p.parent / f'{p.stem}.{counter}{p.suffix}' + if not candidate.exists(): + p = candidate + break + counter += 1 + with open(p, 'w', encoding='utf-8') as fh: + fh.write(json.dumps(data, indent=2)) + logging.info('Saved %d record(s) to %s', len(data), p) + + # 1. Resolve folder UID(s) from UID or path + folder_uids = get_folder_uids(params, folder_uid_arg) + if not folder_uids: + logging.warning('Cannot resolve folder: %r', folder_uid_arg) + _write_result([]) + return + + # 2. Collect records with folder context + # record_uid → (folder_uid, folder_parent_uid) + record_folder_map: Dict[str, Tuple[str, str]] = {} + + if recursive: + def _on_folder(f): + f_uid = f.uid or '' + f_parent_uid = getattr(f, 'parent_uid', None) or '' + for rec_uid in params.subfolder_record_cache.get(f_uid, set()): + if rec_uid not in record_folder_map: + record_folder_map[rec_uid] = (f_uid, f_parent_uid) + + for fuid in folder_uids: + FolderMixin.traverse_folder_tree(params, fuid, _on_folder) + else: + for fuid in folder_uids: + if fuid: + folder_node = params.folder_cache.get(fuid) + f_parent_uid = getattr(folder_node, 'parent_uid', None) or '' if folder_node else '' + else: + # root folder has no parent + f_parent_uid = '' + for rec_uid in params.subfolder_record_cache.get(fuid, set()): + if rec_uid not in record_folder_map: + record_folder_map[rec_uid] = (fuid, f_parent_uid) + + if not record_folder_map: + _write_result([]) + return + + # 3. Filter by version, then group valid records by config_uid. + # Supported versions: 3 (typed), 5 (KSM App/Gateway), 6 (PAM Configuration). + # Versions 1–2/4 are legacy/attachment records; skip with a warning. + config_to_records: Dict[str, List[str]] = {} + record_config_map: Dict[str, Optional[str]] = {} + valid_uids: List[str] = [] # passed version filter, in discovery order + + for rec_uid in record_folder_map: + rec = params.record_cache.get(rec_uid) + if rec is None: + logging.warning('skipping record %s version unknown - not in record cache', rec_uid) + continue + + version = rec.get('version') + if version is None or version <= 2: + logging.warning( + 'skipping record %s version %s - PAM records have version >= 3', + rec_uid, version + ) + continue + + valid_uids.append(rec_uid) + + # v6 PAM Configuration records ARE their own graph root — no rotation-cache entry exists for them. + if version == 6: + config_to_records.setdefault(rec_uid, []).append(rec_uid) + record_config_map[rec_uid] = rec_uid + continue + + rotation = params.record_rotation_cache.get(rec_uid) + if rotation is not None: + config_uid = rotation.get('configuration_uid') + if config_uid: + config_to_records.setdefault(config_uid, []).append(rec_uid) + record_config_map[rec_uid] = config_uid + continue + + logging.debug('Record %s not found in rotation cache; rotation config unavailable, ', rec_uid) + record_config_map[rec_uid] = None + + if not valid_uids: + _write_result([]) + return + + # 4. Load all 5 DAGs once per config_uid + # keyed by (config_uid, graph_id) + dag_cache: Dict[Tuple[str, int], Optional['DAGType']] = {} + conn = get_connection(params) + + for config_uid in config_to_records: + config_record = vault.KeeperRecord.load(params, config_uid) + if config_record is None: + logging.error('Configuration record %s not found; skipping graph load.', config_uid) + for graph_id in ALL_GRAPH_IDS: + dag_cache[(config_uid, graph_id)] = None + continue + + for graph_id in ALL_GRAPH_IDS: + try: + dag = DAG(conn=conn, record=config_record, graph_id=graph_id, + fail_on_corrupt=False, logger=logging) + dag.load(sync_point=0) + dag_cache[(config_uid, graph_id)] = dag + except Exception as err: + logging.error('Failed to load graph %d for config %s: %s', graph_id, config_uid, err) + dag_cache[(config_uid, graph_id)] = None + + # 5. Build per-record output + result = [] + + for rec_uid in valid_uids: + folder_uid, folder_parent_uid = record_folder_map[rec_uid] + rec = params.record_cache[rec_uid] # guaranteed present after step 3 + version = rec.get('version') + shared = rec.get('shared', False) + revision = rec.get('revision', 0) + + client_modified_time = None + cmt = rec.get('client_modified_time') + if isinstance(cmt, (int, float)): + client_modified_time = datetime.datetime.fromtimestamp(int(cmt / 1000)).isoformat() + + metadata = { + 'uid': rec_uid, + 'folder_uid': folder_uid, + 'folder_uid_parent': folder_parent_uid, + 'version': version, + 'shared': shared, + 'client_modified_time': client_modified_time, + 'revision': revision, + } + + # data — same structure as `get --format=json` + data = {} + try: + r = api.get_record(params, rec_uid) + if r: + raw = rec.get('data_unencrypted', b'{}') + data = json.loads(raw.decode() if isinstance(raw, bytes) else raw) + if r.notes: + data['notes'] = r.notes + except Exception as err: + logging.warning('Could not build data for record %s: %s', rec_uid, err) + + # graph_sync — dict keyed by config_uid, then by graph name. + # A record may be referenced by more than one PAM Configuration; we query + # every already-loaded DAG so cross-config references are captured. + # Inner value may contain: + # "vertex_active": bool — present when the record UID is a vertex in that graph + # "edges": [...] — present only when there are active, non-deleted edges + # Config/graph keys are omitted when the record has no presence there. + graph_sync: Dict[str, Dict[str, dict]] = {} + for (c_uid, graph_id), dag in dag_cache.items(): + if dag is None: + continue + try: + graph_entry = _collect_graph_entry(dag, rec_uid, params, c_uid) + if graph_entry: + graph_name = PamGraphId(graph_id).name + graph_sync.setdefault(c_uid, {})[graph_name] = graph_entry + except Exception as err: + logging.warning('Error collecting graph data for record %s graph %d config %s: %s', + rec_uid, graph_id, c_uid, err) + + result.append({ + 'uid': rec_uid, + 'metadata': metadata, + 'data': data, + 'graph_sync': graph_sync, + }) + + _write_result(result) + + +def _collect_graph_entry(dag: 'DAGType', record_uid: str, params: 'KeeperParams', + config_uid: str) -> dict: + """Build the per-graph entry for record_uid. + + Returns a dict with zero or more of: + "vertex_active": bool — record_uid exists as a vertex in this graph + "edges": [...] — active, non-deleted edges referencing record_uid + + Returns an empty dict when the record has no presence in the graph at all, + signalling the caller to omit this graph from the output. + """ + entry: dict = {} + + # Check whether record_uid is itself a vertex in this graph (including lone vertices). + vertex = dag.get_vertex(record_uid) + if vertex is not None: + entry['vertex_active'] = vertex.active + + edges = _collect_edges_for_record(dag, record_uid, params, config_uid) + if edges: + entry['edges'] = edges + + return entry + + +def _collect_edges_for_record(dag: 'DAGType', record_uid: str, params: 'KeeperParams', + config_uid: str) -> List[dict]: + """Return all non-deleted edges that reference record_uid as head or tail. + + Inactive edges (active=False) are included — they may represent settings + that exist in the graph but have been superseded or are pending deletion. + The 'active' field in each output dict lets the caller distinguish them. + DELETION and UNDENIAL edges are still excluded (bookkeeping, not data). + """ + edges_out = [] + for vertex in dag.all_vertices: + tail_uid = vertex.uid + for edge in (vertex.edges or []): + if not edge: + continue + if edge.edge_type in _EXCLUDE_EDGE_TYPES: + continue + head_uid = edge.head_uid + if tail_uid != record_uid and head_uid != record_uid: + continue + + contents = _extract_edge_contents(edge, tail_uid, params, config_uid) + + # ACL edges may carry a rotation_settings.pwd_complexity field that is + # AES-GCM encrypted with the owning record's key and base64-encoded. + # Decrypt it in-place so callers see the plaintext complexity rules. + if edge.edge_type == EdgeType.ACL and isinstance(contents, dict): + rotation_settings = contents.get('rotation_settings') + if isinstance(rotation_settings, dict): + pwd_complexity_enc = rotation_settings.get('pwd_complexity') + if pwd_complexity_enc and isinstance(pwd_complexity_enc, str): + for uid in (head_uid, tail_uid): + raw_rec = params.record_cache.get(uid) or {} + rec_key = raw_rec.get('record_key_unencrypted') + if not rec_key: + continue + try: + enc_bytes = base64.b64decode(pwd_complexity_enc) + rotation_settings['pwd_complexity'] = json.loads( + decrypt_aes(enc_bytes, rec_key).decode('utf-8') + ) + break + except Exception: + pass + + edge_type_str = edge.edge_type.value if hasattr(edge.edge_type, 'value') else str(edge.edge_type) + edges_out.append({ + 'head': head_uid, + 'tail': tail_uid, + 'edge_type': edge_type_str, + 'path': edge.path, + 'active': edge.active, + 'contents': contents, + }) + return edges_out + + +def _extract_edge_contents(edge, tail_uid: str, params: 'KeeperParams', config_uid: str): + """Attempt to return edge content as a serialisable value. + + For most edges the DAG's built-in decryption (decrypt=True default) is + sufficient and content_as_dict works straight away. + + DATA edges encrypted directly with the vertex owner's record key + (jit_settings, ai_settings pattern) are not covered by the normal + vertex-keychain flow. get_resource_settings() handles these correctly: + it loads the graph keyed on the resource record's own key and also + handles base64-encoded encrypted content. It is only called when the + fast content_as_dict path has already failed, to avoid unnecessary + network round trips. + + config_uid is the PAM configuration that owns the DAG being traversed — + passed from the caller so records not in the rotation cache are still + handled correctly. + """ + if edge.content is None: + return None + + # Happy path: DAG already decrypted it. + try: + return edge.content_as_dict + except Exception: + pass + + # Fallback for DATA edges whose content the DAG keychain could not decrypt + # (e.g. jit_settings / ai_settings encrypted with the resource's own record key). + if edge.edge_type == EdgeType.DATA and edge.path and config_uid: + try: + result = get_resource_settings(params, tail_uid, edge.path, config_uid) + if result is not None: + return result + except Exception: + pass + + # Last resort: return as plain string (non-JSON content, e.g. a path label). + # content_as_str can silently return bytes when .decode() fails, so check the type. + try: + s = edge.content_as_str + if isinstance(s, str): + return s + except Exception: + pass + + # All decode/decrypt attempts failed but content exists — return the first + # 40 bytes as hex so the caller can tell there IS data vs truly absent. + raw = edge.content + if isinstance(raw, (bytes, str)): + raw_bytes = raw if isinstance(raw, bytes) else raw.encode('latin-1', errors='replace') + snippet = raw_bytes[:40].hex() + truncated = len(raw_bytes) > 40 + return f'' + return None From d5151e6f3e4edc0d0f0f21f483874ec06d0d9e9b Mon Sep 17 00:00:00 2001 From: Sergey Kolupaev Date: Thu, 5 Mar 2026 11:25:35 -0800 Subject: [PATCH 03/15] Password score calculation hangs if password is too long --- keepercommander/utils.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/keepercommander/utils.py b/keepercommander/utils.py index a28cb5b99..384ab4926 100644 --- a/keepercommander/utils.py +++ b/keepercommander/utils.py @@ -304,6 +304,10 @@ def password_score(password): # type: (str) -> int return score total = len(password) + if total > 50: + # this password score implementation hangs if password is too long + password = password[:50] + total = 50 uppers = 0 lowers = 0 digits = 0 From 7f8cf386840bb30b559a0e23f5140e704c41c464 Mon Sep 17 00:00:00 2001 From: Max Ustinov Date: Wed, 4 Mar 2026 10:25:45 -0800 Subject: [PATCH 04/15] fix(record-add): populate field labels from record type schema Records created via Commander were missing field label metadata because the record-add command used an empty string as default when the schema field had no explicit label override. This caused blank field names when records were retrieved via KSM. Use the field $ref type as the default label when no explicit label is defined in the record type schema, matching the behavior of the web vault which consistently populates labels for all schema-defined fields. Affected paths: - RecordAddCommand.execute() in commands/record_edit.py - prepare_record_add_or_update() in importer/imp_exp.py Refs: KC-1163 --- keepercommander/commands/record_edit.py | 2 +- keepercommander/importer/imp_exp.py | 2 +- tests/test_kc1163_record_field_labels.py | 148 +++++++++++++++++++++++ 3 files changed, 150 insertions(+), 2 deletions(-) create mode 100644 tests/test_kc1163_record_field_labels.py diff --git a/keepercommander/commands/record_edit.py b/keepercommander/commands/record_edit.py index f8fa052d1..d46122028 100644 --- a/keepercommander/commands/record_edit.py +++ b/keepercommander/commands/record_edit.py @@ -856,7 +856,7 @@ def execute(self, params, **kwargs): ref = rf.get('$ref') if not ref: continue - label = rf.get('label', '') + label = rf.get('label') or ref required = rf.get('required', False) default_value = None if ref == 'appFiller': diff --git a/keepercommander/importer/imp_exp.py b/keepercommander/importer/imp_exp.py index 6982ece6f..13b5bbe96 100644 --- a/keepercommander/importer/imp_exp.py +++ b/keepercommander/importer/imp_exp.py @@ -2160,7 +2160,7 @@ def prepare_record_add_or_update(update_flag, params, records): if '$ref' in field: f = RecordSchemaField() f.ref = field['$ref'] - f.label = field.get('label') or '' + f.label = field.get('label') or field['$ref'] if 'required' in field: if field['required']: f.required = True diff --git a/tests/test_kc1163_record_field_labels.py b/tests/test_kc1163_record_field_labels.py new file mode 100644 index 000000000..0b30107d2 --- /dev/null +++ b/tests/test_kc1163_record_field_labels.py @@ -0,0 +1,148 @@ +""" +Unit tests for KC-1163: Commander-created records lose field labels in KSM. + +Verifies that record-add and the importer populate field labels from the +record type schema when no explicit label override is defined, matching +web vault behavior. +""" +import json +import unittest +from unittest import mock + +from keepercommander import vault +from keepercommander.commands.record_edit import RecordAddCommand, RecordEditMixin +from keepercommander.importer.imp_exp import prepare_record_add_or_update + + +# Minimal schema for a 'login' record type — mirrors what Keeper returns +# for standard types: $ref fields with no explicit label override. +LOGIN_SCHEMA_FIELDS = [ + {'$ref': 'login'}, + {'$ref': 'password'}, + {'$ref': 'url'}, + {'$ref': 'fileRef'}, + {'$ref': 'oneTimeCode'}, +] + +# A schema entry WITH an explicit label override (e.g. bankCard cardholderName) +BANKCARD_SCHEMA_FIELDS = [ + {'$ref': 'paymentCard'}, + {'$ref': 'text', 'label': 'cardholderName'}, + {'$ref': 'pinCode'}, + {'$ref': 'addressRef'}, + {'$ref': 'fileRef'}, +] + + +def _mock_record_type_fields(schema_fields): + """Return a JSON string as get_record_type_fields would.""" + content = json.dumps({'fields': schema_fields}) + return json.dumps([{'content': content}]) + + +class TestRecordAddFieldLabels(unittest.TestCase): + """record-add: fields get labels from schema $ref when no explicit label.""" + + def _build_record(self, schema_fields, field_args): + """Helper: run RecordAddCommand field-scaffolding logic directly.""" + cmd = RecordAddCommand() + record = vault.TypedRecord() + record.type_name = 'login' + + for rf in schema_fields: + ref = rf.get('$ref') + if not ref: + continue + label = rf.get('label') or ref # ← the fix + field = vault.TypedField.new_field(ref, None, label) + record.fields.append(field) + + return record + + def test_standard_fields_use_ref_as_label(self): + """Standard login fields should have label == $ref type.""" + record = self._build_record(LOGIN_SCHEMA_FIELDS, []) + labels = {f.type: f.label for f in record.fields} + + self.assertEqual(labels['login'], 'login', + "login field label must not be blank") + self.assertEqual(labels['password'], 'password', + "password field label must not be blank") + self.assertEqual(labels['url'], 'url', + "url field label must not be blank") + + def test_explicit_label_override_preserved(self): + """Explicit label overrides in the schema must be kept as-is.""" + record = self._build_record(BANKCARD_SCHEMA_FIELDS, []) + labels = {f.type: f.label for f in record.fields} + + self.assertEqual(labels['text'], 'cardholderName', + "explicit schema label override must be preserved") + self.assertEqual(labels['paymentCard'], 'paymentCard', + "field without override still gets ref as label") + + def test_no_blank_labels(self): + """No field created by record-add should have a blank label.""" + record = self._build_record(LOGIN_SCHEMA_FIELDS, []) + for field in record.fields: + self.assertTrue(field.label, + f"Field type '{field.type}' has a blank label — KC-1163") + + +class TestImporterFieldLabels(unittest.TestCase): + """Importer path: schema fields get labels from $ref when no explicit label.""" + + def _build_schema_fields(self, schema_fields): + """Simulate the schema-building loop in prepare_record_add_or_update.""" + from keepercommander.importer.importer import RecordSchemaField + result = [] + for field in schema_fields: + if '$ref' in field: + f = RecordSchemaField() + f.ref = field['$ref'] + f.label = field.get('label') or field['$ref'] # ← the fix + result.append(f) + return result + + def test_standard_fields_use_ref_as_label(self): + schema = self._build_schema_fields(LOGIN_SCHEMA_FIELDS) + by_ref = {f.ref: f.label for f in schema} + + self.assertEqual(by_ref['login'], 'login') + self.assertEqual(by_ref['password'], 'password') + self.assertEqual(by_ref['url'], 'url') + + def test_explicit_label_override_preserved(self): + schema = self._build_schema_fields(BANKCARD_SCHEMA_FIELDS) + by_ref = {f.ref: f.label for f in schema} + + self.assertEqual(by_ref['text'], 'cardholderName') + self.assertEqual(by_ref['paymentCard'], 'paymentCard') + + def test_no_blank_labels(self): + schema = self._build_schema_fields(LOGIN_SCHEMA_FIELDS) + for f in schema: + self.assertTrue(f.label, + f"Schema field ref='{f.ref}' has blank label — KC-1163") + + +class TestOldBehaviorWouldFail(unittest.TestCase): + """Regression guard: demonstrate what the OLD code produced (should fail now).""" + + def test_old_code_produced_blank_labels(self): + """Confirm the old rf.get('label', '') pattern causes blank labels.""" + fields = [] + for rf in LOGIN_SCHEMA_FIELDS: + ref = rf.get('$ref') + label_old = rf.get('label', '') # OLD behavior + f = vault.TypedField.new_field(ref, None, label_old) + fields.append(f) + + blank = [f.type for f in fields if not f.label] + # With the old code all standard fields would have blank labels + self.assertTrue(len(blank) > 0, + "Expected old code to produce blank labels (regression check)") + + +if __name__ == '__main__': + unittest.main() From 0b312ff730b02f1dc0e269367c6640089814c9a7 Mon Sep 17 00:00:00 2001 From: Sergey Kolupaev Date: Thu, 5 Mar 2026 16:00:20 -0800 Subject: [PATCH 05/15] Support CommandLine approval configuration --- keepercommander/pedm/admin_plugin.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/keepercommander/pedm/admin_plugin.py b/keepercommander/pedm/admin_plugin.py index 682afb743..c9a617622 100644 --- a/keepercommander/pedm/admin_plugin.py +++ b/keepercommander/pedm/admin_plugin.py @@ -311,7 +311,7 @@ def get_collections() -> Iterable[admin_storage.PedmStorageCollection]: collections: List[admin_types.PedmCollection] = [] for collection_dto in get_collections(): try: - if collection_dto.collection_type in (1000, 1001, 1002): + if 1000 <= collection_dto.collection_type < 2000: collection_value = collection_dto.data.decode('utf-8') else: collection_value = crypto.decrypt_aes_v2(collection_dto.data, self.agent_key).decode('utf-8') From 39508c99589e0b7202d15becde3cc1373137b6f8 Mon Sep 17 00:00:00 2001 From: John Walstra Date: Fri, 6 Mar 2026 08:58:43 -0600 Subject: [PATCH 06/15] Make sure user's ACL attributes are correct for a SaaS user. --- keepercommander/commands/pam_debug/info.py | 15 +++++++- keepercommander/commands/pam_saas/set.py | 45 +++++----------------- 2 files changed, 23 insertions(+), 37 deletions(-) diff --git a/keepercommander/commands/pam_debug/info.py b/keepercommander/commands/pam_debug/info.py index 11d1e8cac..09d9bcfa6 100644 --- a/keepercommander/commands/pam_debug/info.py +++ b/keepercommander/commands/pam_debug/info.py @@ -203,7 +203,20 @@ def _print_field(f): f"{acl_content.rotation_settings.get_pwd_complexity(key_bytes)}") print(f" . Disabled = {acl_content.rotation_settings.disabled}") print(f" . NOOP = {acl_content.rotation_settings.noop}") - print(f" . SaaS Config Records = {acl_content.rotation_settings.saas_record_uid_list}") + print(f" . SaaS configuration record UID = " + f"{acl_content.rotation_settings.saas_record_uid_list}") + + if len(acl_content.rotation_settings.saas_record_uid_list) > 0: + if acl_content.rotation_settings.noop: + saas_config_uid = acl_content.rotation_settings.saas_record_uid_list[0] + saas_config = vault.KeeperRecord.load( + params, + saas_config_uid) # type: Optional[TypedRecord] + + print(f" . SaaS configuration record is {saas_config.title}") + else: + print(f"{bcolors.FAIL} . Has SaaS plugin config record, " + f"however it's not NOOP{bcolors.ENDC}") elif record.record_type == PAM_USER: print(f"{bcolors.FAIL} * PAM User has NO acl!!!!!!{bcolors.ENDC}") diff --git a/keepercommander/commands/pam_saas/set.py b/keepercommander/commands/pam_saas/set.py index 5c428cd3b..9d55ae63a 100644 --- a/keepercommander/commands/pam_saas/set.py +++ b/keepercommander/commands/pam_saas/set.py @@ -1,12 +1,10 @@ from __future__ import annotations import argparse -import logging - from ..discover import PAMGatewayActionDiscoverCommandBase, GatewayContext from ... import vault from . import get_plugins_map from ...discovery_common.record_link import RecordLink -from ...discovery_common.constants import PAM_USER, PAM_MACHINE, PAM_DATABASE, PAM_DIRECTORY +from ...discovery_common.constants import PAM_USER from ...discovery_common.types import UserAclRotationSettings from typing import Optional, TYPE_CHECKING @@ -22,8 +20,6 @@ class PAMActionSaasSetCommand(PAMGatewayActionDiscoverCommandBase): help='The UID of the User record') parser.add_argument('--config-record-uid', '-c', required=True, dest='config_record_uid', action='store', help='The UID of the record that has SaaS configuration') - parser.add_argument('--resource-uid', '-r', required=False, dest='resource_uid', action='store', - help='The UID of the Resource record, if needed.') def get_parser(self): return PAMActionSaasSetCommand.parser @@ -114,26 +110,8 @@ def execute(self, params: KeeperParams, **kwargs): f'{", ".join(missing_fields)}')) return - parent_uid = gateway_context.configuration_uid - - # Not sure if SaaS type rotation should be limited to NOOP rotation. - # Allow a resource record to be used. - if resource_uid is not None: - # Check to see if the record exists. - resource_record = vault.KeeperRecord.load(params, resource_uid) # type: Optional[TypedRecord] - if resource_record is None: - print(self._f("The resource record does not exists.")) - return - - # Make sure this user is a PAM User. - if user_record.record_type in [PAM_MACHINE, PAM_DATABASE, PAM_DIRECTORY]: - print(self._f("The resource record does not have the correct record type.")) - return - - parent_uid = resource_uid - record_link = RecordLink(record=gateway_context.configuration, params=params, fail_on_corrupt=False) - acl = record_link.get_acl(user_uid, parent_uid) + acl = record_link.get_acl(user_uid, gateway_context.configuration_uid) if acl is None: if resource_uid is not None: print(self._f("There is no relationship between the user and the resource record.")) @@ -144,25 +122,20 @@ def execute(self, params: KeeperParams, **kwargs): if acl.rotation_settings is None: acl.rotation_settings = UserAclRotationSettings() - if resource_uid is not None and acl.rotation_settings.noop is True: - print(self._f("The rotation is flagged as No Operation, however you passed in a resource record. " - "This combination is not allowed.")) - return - - # If there is a resource record, it not NOOP. - # If there is NO resource record, it is NOOP. - # However, if this is an IAM User, don't set the NOOP - if acl.is_iam_user is False: - acl.rotation_settings.noop = resource_uid is None - # Make sure we are not re-adding the same SaaS config. if config_record_uid in acl.rotation_settings.saas_record_uid_list: print(self._f("The SaaS configuration record is already being used for this user.")) return + # SaaS users are like cloud users, but with noop set to True. + # The frontend logic is if noop = True and saas_record_uid_list has an item; it's a SaaS Rotation. + # Also make sure other attributes don't exist. + acl.rotation_settings.noop = True + acl.is_iam_user = False + acl.is_admin = False acl.rotation_settings.saas_record_uid_list = [config_record_uid] - record_link.belongs_to(user_uid, parent_uid, acl=acl) + record_link.belongs_to(user_uid, gateway_context.configuration_uid, acl=acl) record_link.save() print(self._gr(f"Setting {plugin_name} rotation for the user record.")) From 1cf86400b5c00f553ced4157fd9f4858a7856fff Mon Sep 17 00:00:00 2001 From: idimov-keeper <78815270+idimov-keeper@users.noreply.github.com> Date: Fri, 6 Mar 2026 09:24:02 -0600 Subject: [PATCH 07/15] lookup by field type when filed type label is missing (#1851) --- keepercommander/commands/record_edit.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/keepercommander/commands/record_edit.py b/keepercommander/commands/record_edit.py index d46122028..87676deff 100644 --- a/keepercommander/commands/record_edit.py +++ b/keepercommander/commands/record_edit.py @@ -574,6 +574,11 @@ def assign_typed_fields(self, record, fields): (x for x in record.fields if (not parsed_field.type or x.type == parsed_field.type) and (ignore_label or (x.label or '').lower() == f_label)), None) + # When label is omitted (e.g. "url=value") and there is a single field of this type, use it + if not record_field and not f_label and field_type and rf and rf.multiple != record_types.Multiple.Always: + candidates = [x for x in record.fields if x.type == field_type] + if len(candidates) == 1: + record_field = candidates[0] if record_field: is_field = True else: From aa8a735c41780a971e7dd5bc751dfe86a3a511ec Mon Sep 17 00:00:00 2001 From: idimov-keeper <78815270+idimov-keeper@users.noreply.github.com> Date: Fri, 6 Mar 2026 10:19:58 -0600 Subject: [PATCH 08/15] Added terminal size tracking and sending resize events to guacd (#1847) --- keepercommander/commands/pam_debug/dump.py | 20 +- keepercommander/commands/pam_launch/launch.py | 70 ++++ .../pam_launch/terminal_connection.py | 53 ++- .../commands/pam_launch/terminal_size.py | 302 ++++++++++++++++++ 4 files changed, 405 insertions(+), 40 deletions(-) create mode 100644 keepercommander/commands/pam_launch/terminal_size.py diff --git a/keepercommander/commands/pam_debug/dump.py b/keepercommander/commands/pam_debug/dump.py index 424d1b7b7..aabe2814f 100644 --- a/keepercommander/commands/pam_debug/dump.py +++ b/keepercommander/commands/pam_debug/dump.py @@ -118,7 +118,7 @@ def _on_folder(f): valid_uids.append(rec_uid) - # v6 PAM Configuration records ARE their own graph root — no rotation-cache entry exists for them. + # v6 PAM Configuration records ARE their own graph root - no rotation-cache entry exists for them. if version == 6: config_to_records.setdefault(rec_uid, []).append(rec_uid) record_config_map[rec_uid] = rec_uid @@ -187,7 +187,7 @@ def _on_folder(f): 'revision': revision, } - # data — same structure as `get --format=json` + # data - same structure as `get --format=json` data = {} try: r = api.get_record(params, rec_uid) @@ -199,12 +199,12 @@ def _on_folder(f): except Exception as err: logging.warning('Could not build data for record %s: %s', rec_uid, err) - # graph_sync — dict keyed by config_uid, then by graph name. + # graph_sync - dict keyed by config_uid, then by graph name. # A record may be referenced by more than one PAM Configuration; we query # every already-loaded DAG so cross-config references are captured. # Inner value may contain: - # "vertex_active": bool — present when the record UID is a vertex in that graph - # "edges": [...] — present only when there are active, non-deleted edges + # "vertex_active": bool - present when the record UID is a vertex in that graph + # "edges": [...] - present only when there are active, non-deleted edges # Config/graph keys are omitted when the record has no presence there. graph_sync: Dict[str, Dict[str, dict]] = {} for (c_uid, graph_id), dag in dag_cache.items(): @@ -234,8 +234,8 @@ def _collect_graph_entry(dag: 'DAGType', record_uid: str, params: 'KeeperParams' """Build the per-graph entry for record_uid. Returns a dict with zero or more of: - "vertex_active": bool — record_uid exists as a vertex in this graph - "edges": [...] — active, non-deleted edges referencing record_uid + "vertex_active": bool - record_uid exists as a vertex in this graph + "edges": [...] - active, non-deleted edges referencing record_uid Returns an empty dict when the record has no presence in the graph at all, signalling the caller to omit this graph from the output. @@ -258,7 +258,7 @@ def _collect_edges_for_record(dag: 'DAGType', record_uid: str, params: 'KeeperPa config_uid: str) -> List[dict]: """Return all non-deleted edges that reference record_uid as head or tail. - Inactive edges (active=False) are included — they may represent settings + Inactive edges (active=False) are included - they may represent settings that exist in the graph but have been superseded or are pending deletion. The 'active' field in each output dict lets the caller distinguish them. DELETION and UNDENIAL edges are still excluded (bookkeeping, not data). @@ -325,7 +325,7 @@ def _extract_edge_contents(edge, tail_uid: str, params: 'KeeperParams', config_u fast content_as_dict path has already failed, to avoid unnecessary network round trips. - config_uid is the PAM configuration that owns the DAG being traversed — + config_uid is the PAM configuration that owns the DAG being traversed - passed from the caller so records not in the rotation cache are still handled correctly. """ @@ -357,7 +357,7 @@ def _extract_edge_contents(edge, tail_uid: str, params: 'KeeperParams', config_u except Exception: pass - # All decode/decrypt attempts failed but content exists — return the first + # All decode/decrypt attempts failed but content exists - return the first # 40 bytes as hex so the caller can tell there IS data vs truly absent. raw = edge.content if isinstance(raw, (bytes, str)): diff --git a/keepercommander/commands/pam_launch/launch.py b/keepercommander/commands/pam_launch/launch.py index 2a91dc3cd..3646be303 100644 --- a/keepercommander/commands/pam_launch/launch.py +++ b/keepercommander/commands/pam_launch/launch.py @@ -22,6 +22,7 @@ from keeper_secrets_manager_core.utils import url_safe_str_to_bytes from .terminal_connection import launch_terminal_connection +from .terminal_size import get_terminal_size_pixels, is_interactive_tty from .guac_cli.stdin_handler import StdinHandler from ..base import Command from ..tunnel.port_forward.tunnel_helpers import ( @@ -573,6 +574,32 @@ def signal_handler_fn(signum, frame): stdin_handler.start() logging.debug("STDIN handler started") # (pipe/blob/end mode) + # --- Terminal resize tracking --- + # Resize polling is skipped entirely in non-interactive (piped) + # environments where get_terminal_size() returns a dummy value. + _resize_enabled = is_interactive_tty() + # Poll cols/rows cheaply every N iterations; a timestamp guard + # ensures correctness if the loop sleep interval ever changes. + _RESIZE_POLL_EVERY = 3 # iterations (~0.3 s at 0.1 s/iter) + _RESIZE_POLL_INTERVAL = 0.3 # seconds - authoritative threshold + _RESIZE_DEBOUNCE = 0.25 # seconds - max send rate during drag + _resize_poll_counter = 0 + _last_resize_poll_time = 0.0 + _last_resize_send_time = 0.0 + # Track the last *sent* size; only updated when we actually send. + # This keeps re-detecting the change each poll during rapid resize + # so the final resting size is always dispatched. + _last_sent_cols = 0 + _last_sent_rows = 0 + if _resize_enabled: + try: + _init_ts = shutil.get_terminal_size() + _last_sent_cols = _init_ts.columns + _last_sent_rows = _init_ts.lines + except Exception: + _resize_enabled = False + logging.debug("Could not query initial terminal size - resize polling disabled") + elapsed = 0 while not shutdown_requested and python_handler.running: # Check if tube/connection is closed @@ -588,6 +615,49 @@ def signal_handler_fn(signum, frame): time.sleep(0.1) elapsed += 0.1 + # --- Resize polling (Phase 1: cheap cols/rows check) --- + # Check every _RESIZE_POLL_EVERY iterations AND at least + # _RESIZE_POLL_INTERVAL seconds since the last poll, so the + # check stays correct if the loop sleep ever changes. + if _resize_enabled: + _resize_poll_counter += 1 + _now = time.time() + if ( + _resize_poll_counter % _RESIZE_POLL_EVERY == 0 + and _now - _last_resize_poll_time >= _RESIZE_POLL_INTERVAL + ): + _last_resize_poll_time = _now + try: + _cur_ts = shutil.get_terminal_size() + _cur_cols = _cur_ts.columns + _cur_rows = _cur_ts.lines + except Exception: + _cur_cols, _cur_rows = _last_sent_cols, _last_sent_rows + + if (_cur_cols, _cur_rows) != (_last_sent_cols, _last_sent_rows): + # Phase 2: size changed - apply debounce then + # fetch exact pixels and send. + if _now - _last_resize_send_time >= _RESIZE_DEBOUNCE: + try: + _si = get_terminal_size_pixels(_cur_cols, _cur_rows) + python_handler.send_size( + _si['pixel_width'], + _si['pixel_height'], + _si['dpi'], + ) + _last_sent_cols = _cur_cols + _last_sent_rows = _cur_rows + _last_resize_send_time = _now + logging.debug( + f"Terminal resized: {_cur_cols}x{_cur_rows} cols/rows " + f"-> {_si['pixel_width']}x{_si['pixel_height']}px " + f"@ {_si['dpi']}dpi" + ) + except Exception as _e: + logging.debug(f"Failed to send resize: {_e}") + # else: debounce active - _last_sent_cols/rows unchanged + # so the change is re-detected on the next eligible poll. + # Status indicator every 30 seconds if elapsed % 30.0 < 0.1 and elapsed > 0.1: rx = python_handler.messages_received diff --git a/keepercommander/commands/pam_launch/terminal_connection.py b/keepercommander/commands/pam_launch/terminal_connection.py index 90c896f00..5e182b612 100644 --- a/keepercommander/commands/pam_launch/terminal_connection.py +++ b/keepercommander/commands/pam_launch/terminal_connection.py @@ -23,7 +23,6 @@ import base64 import json import secrets -import shutil import time import uuid from typing import TYPE_CHECKING, Optional, Dict, Any @@ -101,29 +100,23 @@ class ProtocolType: ProtocolType.SQLSERVER: 1433, } -# Default terminal metrics used to translate local console dimensions into the -# pixel-based values that Guacamole expects. -DEFAULT_TERMINAL_COLUMNS = 80 -DEFAULT_TERMINAL_ROWS = 24 -DEFAULT_CELL_WIDTH_PX = 10 -DEFAULT_CELL_HEIGHT_PX = 19 -DEFAULT_SCREEN_DPI = 96 - - -def _build_screen_info(columns: int, rows: int) -> Dict[str, int]: - """Convert character columns/rows into pixel measurements for the Gateway.""" - col_value = columns if isinstance(columns, int) and columns > 0 else DEFAULT_TERMINAL_COLUMNS - row_value = rows if isinstance(rows, int) and rows > 0 else DEFAULT_TERMINAL_ROWS - return { - "columns": col_value, - "rows": row_value, - "pixel_width": col_value * DEFAULT_CELL_WIDTH_PX, - "pixel_height": row_value * DEFAULT_CELL_HEIGHT_PX, - "dpi": DEFAULT_SCREEN_DPI, - } - +from .terminal_size import ( + DEFAULT_TERMINAL_COLUMNS, + DEFAULT_TERMINAL_ROWS, + DEFAULT_CELL_WIDTH_PX, + DEFAULT_CELL_HEIGHT_PX, + DEFAULT_SCREEN_DPI, + _build_screen_info, + get_terminal_size_pixels, +) -DEFAULT_SCREEN_INFO = _build_screen_info(DEFAULT_TERMINAL_COLUMNS, DEFAULT_TERMINAL_ROWS) +# Computed at import time using the best available platform APIs so the initial +# offer payload carries accurate pixel dimensions even before the connection +# loop runs. Falls back to fixed cell-size constants if the query fails. +try: + DEFAULT_SCREEN_INFO = get_terminal_size_pixels() +except Exception: + DEFAULT_SCREEN_INFO = _build_screen_info(DEFAULT_TERMINAL_COLUMNS, DEFAULT_TERMINAL_ROWS) MAX_MESSAGE_SIZE_LINE = "a=max-message-size:1073741823" @@ -1213,16 +1206,16 @@ def _open_terminal_webrtc_tunnel(params: KeeperParams, # Prepare the offer data with terminal-specific parameters # Match webvault format: host, size, audio, video, image (for guacd configuration) # These parameters are needed by Gateway to configure guacd BEFORE OpenConnection - raw_columns = DEFAULT_TERMINAL_COLUMNS - raw_rows = DEFAULT_TERMINAL_ROWS - # Get terminal size for Guacamole size parameter + # Get terminal size for Guacamole size parameter (offer payload). + # get_terminal_size_pixels() queries the terminal internally and uses + # platform-specific APIs (Windows: GetCurrentConsoleFontEx; Unix: + # TIOCGWINSZ) to obtain exact pixel dimensions before falling back to + # the fixed cell-size estimate. try: - terminal_size = shutil.get_terminal_size(fallback=(DEFAULT_TERMINAL_COLUMNS, DEFAULT_TERMINAL_ROWS)) - raw_columns = terminal_size.columns - raw_rows = terminal_size.lines + screen_info = get_terminal_size_pixels() except Exception: logging.debug("Falling back to default terminal size for offer payload") - screen_info = _build_screen_info(raw_columns, raw_rows) + screen_info = _build_screen_info(DEFAULT_TERMINAL_COLUMNS, DEFAULT_TERMINAL_ROWS) logging.debug( f"Using terminal metrics columns={screen_info['columns']} rows={screen_info['rows']} -> " f"{screen_info['pixel_width']}x{screen_info['pixel_height']}px @ {screen_info['dpi']}dpi" diff --git a/keepercommander/commands/pam_launch/terminal_size.py b/keepercommander/commands/pam_launch/terminal_size.py new file mode 100644 index 000000000..5f304391e --- /dev/null +++ b/keepercommander/commands/pam_launch/terminal_size.py @@ -0,0 +1,302 @@ +# _ __ +# | |/ /___ ___ _ __ ___ _ _ ® +# | ' Dict[str, int]: + """Convert character columns/rows into pixel measurements for the Gateway.""" + col_value = columns if isinstance(columns, int) and columns > 0 else DEFAULT_TERMINAL_COLUMNS + row_value = rows if isinstance(rows, int) and rows > 0 else DEFAULT_TERMINAL_ROWS + return { + "columns": col_value, + "rows": row_value, + "pixel_width": col_value * DEFAULT_CELL_WIDTH_PX, + "pixel_height": row_value * DEFAULT_CELL_HEIGHT_PX, + "dpi": DEFAULT_SCREEN_DPI, + } + + +# --------------------------------------------------------------------------- +# Module-level caches +# --------------------------------------------------------------------------- + +# DPI is cached for the lifetime of the process. Display DPI rarely changes +# during a session - it would only change if the user moves the console window +# to a different-DPI monitor, which is not worth the overhead of re-querying +# on every resize event. +_dpi: Optional[int] = None + +# TIOCGWINSZ pixel support: None = untested, True = returns non-zero pixels, +# False = permanently disabled (returned all-zero pixel fields). When False, +# _get_pixels_unix() returns (0, 0) immediately without retrying the ioctl. +_tiocgwinsz_works: Optional[bool] = None + +# Interactive TTY flag, cached after first call. +_is_tty: Optional[bool] = None + + +# --------------------------------------------------------------------------- +# TTY detection +# --------------------------------------------------------------------------- + +def is_interactive_tty() -> bool: + """Return True if both stdin and stdout are connected to a real TTY. + + Cached after the first call. When running in a non-interactive environment + (piped I/O, CI, scripted launch) resize polling should be skipped entirely + to avoid spurious or meaningless size-change events. + """ + global _is_tty + if _is_tty is None: + try: + _is_tty = sys.stdin.isatty() and sys.stdout.isatty() + except Exception: + _is_tty = False + return _is_tty + + +# --------------------------------------------------------------------------- +# Platform DPI helpers +# --------------------------------------------------------------------------- + +def _get_dpi_windows() -> int: + """Return display DPI on Windows via ctypes, cached for the session. + + Tries GetDpiForSystem (shcore.dll, Windows 8.1+) first, then falls back + to GetDeviceCaps(LOGPIXELSX). Returns DEFAULT_SCREEN_DPI (96) on failure. + """ + global _dpi + if _dpi is not None: + return _dpi + try: + import ctypes + # GetDpiForSystem - available on Windows 8.1+ via shcore.dll + try: + dpi = ctypes.windll.shcore.GetDpiForSystem() + if dpi and dpi > 0: + _dpi = int(dpi) + return _dpi + except Exception: + pass + # Fallback: GDI GetDeviceCaps(LOGPIXELSX) + LOGPIXELSX = 88 + hdc = ctypes.windll.user32.GetDC(0) + if hdc: + try: + dpi = ctypes.windll.gdi32.GetDeviceCaps(hdc, LOGPIXELSX) + if dpi and dpi > 0: + _dpi = int(dpi) + return _dpi + finally: + ctypes.windll.user32.ReleaseDC(0, hdc) + except Exception as e: + logging.debug(f"Could not query Windows DPI: {e}") + _dpi = DEFAULT_SCREEN_DPI + return _dpi + + +def _get_dpi_unix() -> int: + """Return display DPI on Unix/macOS, cached for the session. + + There is no portable, connection-independent way to query DPI from a + terminal process on Unix without a display-server connection. Standard + Guacamole sessions use 96 DPI as the baseline, so we return that. + """ + global _dpi + if _dpi is None: + _dpi = DEFAULT_SCREEN_DPI + return _dpi + + +# --------------------------------------------------------------------------- +# Platform pixel-dimension helpers +# --------------------------------------------------------------------------- + +def _get_pixels_windows(columns: int, rows: int): + """Return (pixel_width, pixel_height) on Windows via GetCurrentConsoleFontEx. + + Retrieves the console font glyph size in pixels (dwFontSize.X / .Y) and + multiplies by columns/rows to get the total terminal window pixel size. + Returns (0, 0) on any failure so the caller can fall back gracefully. + """ + try: + import ctypes + import ctypes.wintypes + + STD_OUTPUT_HANDLE = -11 + handle = ctypes.windll.kernel32.GetStdHandle(STD_OUTPUT_HANDLE) + if not handle or handle == ctypes.wintypes.HANDLE(-1).value: + return 0, 0 + + class COORD(ctypes.Structure): + _fields_ = [('X', ctypes.c_short), ('Y', ctypes.c_short)] + + class CONSOLE_FONT_INFOEX(ctypes.Structure): + _fields_ = [ + ('cbSize', ctypes.c_ulong), + ('nFont', ctypes.c_ulong), + ('dwFontSize', COORD), + ('FontFamily', ctypes.c_uint), + ('FontWeight', ctypes.c_uint), + ('FaceName', ctypes.c_wchar * 32), + ] + + font_info = CONSOLE_FONT_INFOEX() + font_info.cbSize = ctypes.sizeof(CONSOLE_FONT_INFOEX) + + if ctypes.windll.kernel32.GetCurrentConsoleFontEx(handle, False, ctypes.byref(font_info)): + fw = font_info.dwFontSize.X + fh = font_info.dwFontSize.Y + if fw > 0 and fh > 0: + return columns * fw, rows * fh + + return 0, 0 + except Exception as e: + logging.debug(f"GetCurrentConsoleFontEx failed: {e}") + return 0, 0 + + +def _get_pixels_unix(columns: int, rows: int): + """Return (pixel_width, pixel_height) on Unix/macOS via TIOCGWINSZ. + + The kernel struct winsize includes ws_xpixel and ws_ypixel holding the + total terminal pixel dimensions. If those fields are zero on the first + attempt, the failure is cached permanently (_tiocgwinsz_works = False) + and subsequent calls return (0, 0) without retrying the ioctl. + """ + global _tiocgwinsz_works + if _tiocgwinsz_works is False: + return 0, 0 + try: + import fcntl + import termios + + buf = struct.pack('HHHH', 0, 0, 0, 0) + result = fcntl.ioctl(sys.stdout.fileno(), termios.TIOCGWINSZ, buf) + # struct winsize layout: ws_row, ws_col, ws_xpixel, ws_ypixel + _ws_row, _ws_col, ws_xpixel, ws_ypixel = struct.unpack('HHHH', result) + if ws_xpixel > 0 and ws_ypixel > 0: + _tiocgwinsz_works = True + return ws_xpixel, ws_ypixel + # Pixel fields are zero - terminal emulator does not populate them. + _tiocgwinsz_works = False + return 0, 0 + except Exception as e: + logging.debug(f"TIOCGWINSZ failed: {e}") + _tiocgwinsz_works = False + return 0, 0 + + +# --------------------------------------------------------------------------- +# Public API +# --------------------------------------------------------------------------- + +def get_terminal_size_pixels( + columns: Optional[int] = None, + rows: Optional[int] = None, +) -> Dict[str, int]: + """Return terminal size in pixels and DPI for a Guacamole 'size' instruction. + + Always re-queries the terminal size internally via shutil.get_terminal_size + for maximum accuracy. The optional *columns* and *rows* arguments serve as + a fallback used only when the internal query fails. + + Platform behaviour + ------------------ + Windows + Uses GetCurrentConsoleFontEx to obtain the console font glyph size in + pixels, then multiplies columns × rows for exact pixel dimensions. + DPI is obtained via GetDpiForSystem (or GetDeviceCaps as fallback). + Both are cached for the session. + + Unix / macOS + Tries TIOCGWINSZ ws_xpixel / ws_ypixel for pixel dimensions. If those + fields are zero (common - many terminal emulators do not fill them in), + the failure is cached permanently and the cell-size fallback is used on + every subsequent call without retrying the ioctl. + + Fallback + When platform-specific pixel APIs return (0, 0), falls back to + _build_screen_info(columns, rows) which uses DEFAULT_CELL_WIDTH_PX / + DEFAULT_CELL_HEIGHT_PX to estimate pixel dimensions from char cells. + + Returns + ------- + dict with keys: columns, rows, pixel_width, pixel_height, dpi + (same structure as _build_screen_info - drop-in compatible) + """ + # Resolve caller-supplied hints as fallback values + fallback_cols = columns if (isinstance(columns, int) and columns > 0) else DEFAULT_TERMINAL_COLUMNS + fallback_rows = rows if (isinstance(rows, int) and rows > 0) else DEFAULT_TERMINAL_ROWS + + # Always re-query for maximum accuracy; use hints only if query fails + try: + ts = shutil.get_terminal_size(fallback=(fallback_cols, fallback_rows)) + actual_cols = ts.columns + actual_rows = ts.lines + except Exception: + actual_cols = fallback_cols + actual_rows = fallback_rows + + # Platform-specific pixel dimensions + if sys.platform == 'win32': + pixel_w, pixel_h = _get_pixels_windows(actual_cols, actual_rows) + dpi = _get_dpi_windows() + else: + pixel_w, pixel_h = _get_pixels_unix(actual_cols, actual_rows) + dpi = _get_dpi_unix() + + # Fallback: platform API returned (0, 0) - use fixed cell-size estimate + if pixel_w <= 0 or pixel_h <= 0: + return _build_screen_info(actual_cols, actual_rows) + + return { + "columns": actual_cols, + "rows": actual_rows, + "pixel_width": pixel_w, + "pixel_height": pixel_h, + "dpi": dpi, + } From 9fcc968c88eb57838074156d2fc74d9c8fe9e1c2 Mon Sep 17 00:00:00 2001 From: lthievenaz-keeper Date: Fri, 6 Mar 2026 18:03:30 +0000 Subject: [PATCH 09/15] Add script for exporting KCM resources into a PAM Project template (#1849) * Create kcm_export.py Add folder and script to convert KCM resources to PAM Project Extend template * Create KCM_mappings.json Add mapping dictionary of KCM parameters, to use in conjunction with the kcm_export.py script * Added comment about KCM_mappings * Fixed syntax for f strings with older python version Older versions of python don't support using the same quote characters on f strings - fixed * Updated naming scheme for resource --- examples/pam-kcm-import/KCM_mappings.json | 158 +++++++ examples/pam-kcm-import/kcm_export.py | 547 ++++++++++++++++++++++ 2 files changed, 705 insertions(+) create mode 100644 examples/pam-kcm-import/KCM_mappings.json create mode 100644 examples/pam-kcm-import/kcm_export.py diff --git a/examples/pam-kcm-import/KCM_mappings.json b/examples/pam-kcm-import/KCM_mappings.json new file mode 100644 index 000000000..d027b7271 --- /dev/null +++ b/examples/pam-kcm-import/KCM_mappings.json @@ -0,0 +1,158 @@ +{ + "users":{ + "username":"login", + "password":"password", + "private-key": "private_pem_key", + "public-key": "log", + "passphrase": "log", + "totp-algorithm": "totp-algorithm", + "totp-digits": "totp-digits", + "totp-period": "totp-period", + "totp-secret": "totp-secret" + }, + "resources":{ + "domain": "domain_name", + "create-recording-path": "pam_settings.options.graphical_session_recording=on", + "create-typescript-path": "pam_settings.options.text_session_recording=on", + "recording-include-keys": "pam_settings.connection.recording_include_keys", + "security": "pam_settings.connection.security", + "color-depth": null, + "enable-audio": null, + "disable-copy": "pam_settings.connection.disable_copy", + "disable-paste": "pam_settings.connection.disable_paste", + "force-lossless": null, + "read-only": null, + "backspace": null, + "url": "url", + "allow-url-manipulation": "pam_settings.connection.allow_url_manipulation", + "ignore-initial-ssl-cert": null, + "allowed-resource-url-patterns": "pam_settings.connection.allowed_resource_url_patterns", + "allowed-url-patterns": "pam_settings.connection.allowed_url_patterns", + "autofill-configuration": "pam_settings.connection.autofill_targets", + "disable-audio": "pam_settings.connection.disable_audio", + "audio-bps": null, + "audio-channels": null, + "audio-sample-rate": null, + "ca-cert": "pam_settings.connection.ca_certificate", + "client-cert": "pam_settings.connection.client_certificate", + "client-key": "pam_settings.connection.client_key", + "color-scheme": "pam_settings.connection.color_scheme", + "font-name": null, + "font-size": "pam_settings.connection.font_size", + "scrollback": null, + "ignore-cert": "pam_settings.connection.ignore_server_cert", + "namespace": "pam_settings.connection.namespace", + "pod": "pam_settings.connection.pod_name", + "container": "pam_settings.connection.container", + "use-ssl": "use_ssl", + "database": "pam_settings.connection.default_database", + "disable-csv-export": "pam_settings.connection.disable_csv_export", + "disable-csv-import": "pam_settings.connection.disable_csv_import", + "client-name": null, + "console": null, + "console-audio": null, + "disable-auth": "pam_settings.connection.disable_authentication", + "disable-bitmap-caching": null, + "disable-glyph-caching": null, + "disable-offscreen-caching": null, + "dpi": null, + "enable-audio-input": null, + "disable-display-resize": "pam_settings.connection.disable_dynamic_resizing", + "enable-desktop-composition": "pam_settings.connection.enableDesktopComposition", + "enable-font-smoothing": "pam_settings.connection.enableFontSmooting", + "enable-full-window-drag": "pam_settings.connection.enable_full_window_drag", + "enable-menu-animations": null, + "enable-printing": null, + "enable-theming": null, + "enable-touch": null, + "enable-wallpaper": "pam_settings.connection.enable_wallpaper", + "initial-program": null, + "load-balance-info": "pam_settings.connection.load_balance_info", + "normalize-clipboard": null, + "preconnection-blob": "pam_settings.connection.preconnection_blob", + "preconnection-id": "pam_settings.connection.preconnection_id", + "printer-name": null, + "remote-app": null, + "remote-app-args": null, + "remote-app-dir": null, + "resize-method": null, + "timezone": null, + "width": null, + "height": null, + "locale": null, + "host-key": "pam_settings.connection.public_host_key", + "command": "pam_settings.connection.command", + "server-alive-interval": null, + "terminal-type": null, + "login-failure-regex": "pam_settings.connection.login_failure_regex", + "login-success-regex": "pam_settings.connection.login_success_regex", + "password-regex": "pam_settings.connection.password_regex", + "username-regex": "pam_settings.connection.username_regex", + "audio-servername": null, + "clipboard-buffer-size": null, + "clipboard-encoding": null, + "compress-level": null, + "cursor": null, + "dest-host": null, + "dest-port": null, + "disable-server-input": null, + "encodings": null, + "quality-level": null, + "swap-red-blue": null, + "wol-broadcast-addr": null, + "wol-mac-addr": null, + "wol-send-packet": null, + "wol-udp-port": null, + "wol-wait-time": null, + "create-profile-directory": null, + "profile-storage-directory": null, + "exec-command": null, + "unix-socket": null, + "cert-fingerprints": null, + "cert-tofu": null, + "disable-download": null, + "disable-gfx": null, + "disable-upload": null, + "drive-name": null, + "drive-path": null, + "enable-drive": null, + "create-drive-path": null, + "gateway-domain": null, + "gateway-hostname": null, + "gateway-password": null, + "gateway-port": null, + "gateway-username": null, + "server-layout": null, + "static-channels": null, + "timeout": null, + "ca-certificate": null, + "disable-cert-hostname-verification": null, + "force-encryption": null, + "protocol-version": null, + "ksm-user-config-enabled": "ignore", + "recording-name": "ignore", + "recording-path": "ignore", + "recording-write-existing": "ignore", + "typescript-name": "ignore", + "typescript-path": "ignore", + "typescript-write-existing": "ignore", + "recording-exclude-mouse": null, + "recording-exclude-output": null, + "recording-exclude-touch": null, + "enable-sftp": "pam_settings.connection.sftp.enable_sftp", + "sftp-directory": "pam_settings.connection.sftp.sftp_upload_directory", + "sftp-disable-download": null, + "sftp-disable-upload": null, + "sftp-host-key": null, + "sftp-hostname": "pam_settings.connection.sftp.host", + "sftp-passphrase": null, + "sftp-password": "pam_settings.connection.sftp.password", + "sftp-port": "pam_settings.connection.sftp.port", + "sftp-private-key": "pam_settings.connection.sftp.private_key", + "sftp-public-key": null, + "sftp-root-directory": "pam_settings.connection.sftp.sftp_root_directory", + "sftp-server-alive-interval": "pam_settings.connection.sftp.sftp_keepalive_interval", + "sftp-timeout":null, + "sftp-username": "pam_settings.connection.sftp.login" + } +} diff --git a/examples/pam-kcm-import/kcm_export.py b/examples/pam-kcm-import/kcm_export.py new file mode 100644 index 000000000..b91eab42f --- /dev/null +++ b/examples/pam-kcm-import/kcm_export.py @@ -0,0 +1,547 @@ +#!/usr/bin/env python3 +""" +Connects to KCM Database (local/remote) and exports connections and connection groups. +Generates JSON file ready to be imported by pam project extend command. + +Must be run along with a dictionary of KCM parameters named KCM_mappings.json. + +Can handle the import of Connection Groups in three ways: +1 - Keeps the Connection Group nesting, except if the Group has a KSM configuration set, in which case it will mapped as a root gateway shared folder. + ROOT/ + └ Connection group A (no config)/ + └ Connection group A1 (no config)/ + Connection group B (config)/ + └ Connection group B1 (no config)/ + +2 - Keeps the exact Connection Group nesting + ROOT/ + ├ Connection group A/ + │ └ Connection group A1/ + └ Connection group B/ + └ Connection group B1/ + +3 - Maps all Connection Groups as root gateway shared folder + ROOT/ + Connection group A/ + Connection group A1/ + Connection group B/ + Connection group B1/ +""" + +from json import dump,dumps,loads + +## RICH Console styling - can be removed if rich was not imported ## +from rich.console import Console +from rich.markdown import Markdown +## RICH Console styling ## + +DEBUG = False + +HOSTNAME = '127.0.0.1' + +DB_CONFIG = { + 'host': HOSTNAME, + 'user': 'guacamole_user', + 'password': 'password', + 'database': 'guacamole_db', + 'port': 3306 +} + +TOTP_ACCOUNT = 'kcm-totp%40keepersecurity.com' + +SQL = { + 'groups': """ +SELECT + cg.connection_group_id, + parent_id, + connection_group_name, + cga.attribute_value AS ksm_config +FROM + guacamole_connection_group cg +LEFT JOIN + guacamole_connection_group_attribute cga +ON + cg.connection_group_id = cga.connection_group_id + AND cga.attribute_name = 'ksm-config' +""", + 'connections': """ +SELECT + c.connection_id, + c.connection_name AS name, + c.protocol, + cp.parameter_name, + cp.parameter_value, + e.name AS entity_name, + e.type AS entity_type, + g.connection_group_id, + g.parent_id, + g.connection_group_name AS group_name, + ca.attribute_name, + ca.attribute_value +FROM + guacamole_connection c +LEFT JOIN + guacamole_connection_parameter cp ON c.connection_id = cp.connection_id +LEFT JOIN + guacamole_connection_attribute ca ON c.connection_id = ca.connection_id +LEFT JOIN + guacamole_connection_group g ON c.parent_id = g.connection_group_id +LEFT JOIN + guacamole_connection_permission p ON c.connection_id = p.connection_id +LEFT JOIN + guacamole_entity e ON p.entity_id = e.entity_id; +""" +} + +# Utils and CLI +USE_RICH = False + +try: + console = Console() + USE_RICH = True +except: + pass + +def display(text,style=None): + if USE_RICH: + console.print(Markdown(text),style=style) + else: + print(text) + + +def list_items(items,style='italic yellow'): + for item in items: + display(f'- {item}',style) + + +def handle_prompt(valid_inputs,prompt='Input: '): + response = input(prompt) + if response.lower() in valid_inputs: + return valid_inputs[response] + display('Invalid input') + return handle_prompt(valid_inputs,prompt=prompt) + + +def validate_file_upload(format,filename=None): + if not filename: + filename = input('File path: ') + try: + with open(filename,'r') as file: + if format=='csv': + from csv import DictReader + return list(DictReader(file)) + elif format=='json': + from json import load + return load(file) + elif format=='yaml': + from yaml import safe_load + return safe_load(file) + + except Exception as e: + display(f'Error: Exception {e} raised','bold red') + return validate_file_upload(format) + + +def debug(text,DEBUG): + if DEBUG: + print(f'>>DEBUG: {text}') + + +class KCM_export: + def __init__(self,DEBUG=DEBUG): + self.mappings = validate_file_upload('json','KCM_mappings.json') + self.debug = DEBUG + self.db_config = DB_CONFIG + self.folder_structure = 'ksm_based' + self.separator = '/' + self.dynamic_tokens = [] + self.logged_records = {} + + display('# KCM Import','bold yellow') + # Collect import method + display('What database are you running on KCM?', 'cyan') + list_items(['(1) MySQL','(2) PostgreSQL']) + self.database = handle_prompt({'1':'MYSQL','2':'POSTGRES'}) + + # Collect db credentials + self.collect_db_config() + + # Connect to db + connect = self.connect_to_db() + if not connect: + display('Unable to connect to database, ending program','bold red') + return + + # Generate template + json_template = self.generate_data() + + display('# Data collected and import-ready', 'green') + display('Exporting JSON template...') + with open('pam_import.json','w') as user_file: + dump(json_template,user_file,indent=2) + display('Exported pam_import.json successfully','italic green') + + return + + + def collect_db_config(self): + display('How do you wish to provide your database details?', 'cyan') + list_items([ + '(1) By docker-compose.yml file', + '(2) I have hardcoded them in the Python script' + ]) + if handle_prompt({'1':'file','2':'code'}) == 'file': + display('## Please upload your docker-compose file', 'cyan') + self.docker_compose = validate_file_upload('yaml') + + port={'MYSQL':3306,'POSTGRES':5432} + custom_port = None + + debug('Analysing services',self.debug) + guacamole_env = self.docker_compose['services']['guacamole']['environment'] + db_in_compose = True + host = "127.0.0.1" + if guacamole_env.get(f'{self.database}_HOSTNAME','db') != 'db': + debug('Alternate DB hostname detected',self.debug) + host = guacamole_env[f'{self.database}_HOSTNAME'] + db_in_compose=False + if db_in_compose and 'ports' in guacamole_env: + custom_port = int(self.docker_compose["services"][guacamole_env[f"{self.database}_HOSTNAME"]]["ports"][0].split(':')[0]) + try: + self.db_config = { + 'host': host, + 'user': guacamole_env[f'{self.database}_USERNAME'], + 'password': guacamole_env[f'{self.database}_PASSWORD'], + 'database': guacamole_env[f'{self.database}_DATABASE'], + 'port': custom_port or port[self.database] + } + except: + display('Unable to parse environment variables into suitable DB details. Please check that your docker-compose file has all relevant Guacamole variables, or hardcode them in the script','italic red') + self.collect_db_config() + + + def connect_to_db(self): + if self.database == 'MYSQL': + try: + from mysql.connector import connect + debug('Attempting connection to database',self.debug) + conn = connect(**self.db_config) + cursor = conn.cursor(dictionary=True) + + display('Database connection successful. Extracting data...','italic green') + + debug('Extracting connection group data',self.debug) + cursor.execute(SQL['groups']) + self.group_data = cursor.fetchall() + + debug('Extracting connection data',self.debug) + cursor.execute(SQL['connections']) + self.connection_data = cursor.fetchall() + + display('Done','italic green') + + return True + + except mysql.connector.Error as e: + display(f'MYSQL connector error: {e}','bold red') + return False + + elif self.database == 'POSTGRES': + try: + from psycopg2 import connect, OperationalError + from psycopg2.extras import RealDictCursor + debug('Attempting connection to database',self.debug) + conn = connect(**self.db_config) + cursor = conn.cursor(cursor_factory=RealDictCursor) + + display('Database connection successful. Extracting data...','italic green') + + debug('Extracting connection group data',self.debug) + cursor.execute(SQL['groups']) + group_rows = cursor.fetchall() + self.group_data = [dict(row) for row in group_rows] + + debug('Extracting connection data',self.debug) + cursor.execute(SQL['connections']) + connection_rows = cursor.fetchall() + self.connection_data = [dict(row) for row in connection_rows] + + display('Done','italic green') + + return True + except OperationalError as e: + display(f'POSTGRESQL connector error: {e}','bold red') + return False + + def generate_data(self): + display('What handling do you want to apply to Connection Groups?','cyan') + display('(1) Set Groups with KSM Config as Root Shared Folders (recommended)') + display('''The folder structure will largely follow that of KCM, however any Connection Group with a KSM Service Configuration will be created as a root shared folder: +ROOT/ +. └ Connection group A (no config)/ +. └ Connection group A1 (no config)/ +Connection group B (config)/ +. └ Connection group B1 (no config)/ + ''', 'yellow') + display('(2) Keep exact KCM nesting') + display('''The folder structure will replicate the exact same structure as KCM's: +ROOT/ +. ├ Connection group A/ +. │ └ Connection group A1/ +. └ Connection group B/ +. └ Connection group B1/ + ''', 'yellow') + display('(3) Flat') + display('''All connection groups will be created as root shared folders: +ROOT/ +Connection group A/ +Connection group A1/ +Connection group B/ +Connection group B1/ + ''', 'yellow') + self.folder_structure = handle_prompt({'1':'ksm_based','2':'nested','3':'flat'}) + + self.group_paths = {} + + def resolve_path(group_id): + if group_id is None: + return "ROOT" + if group_id in self.group_paths: + return self.group_paths[group_id] + # Find the group details + group = next(g for g in self.group_data if g['connection_group_id'] == group_id) + if self.folder_structure == 'ksm_based' and group['ksm_config']: + self.group_paths[group_id] = group['connection_group_name'] + return group['connection_group_name'] + parent_path = resolve_path(group['parent_id']) + full_path = f"{parent_path}{self.separator}{group['connection_group_name']}" + self.group_paths[group_id] = full_path + return full_path + + # Resolve paths for all groups + for group in self.group_data: + if self.folder_structure=='flat': + self.group_paths[group['connection_group_id']] = group['connection_group_name'] + else: + resolve_path(group['connection_group_id']) + + self.connections = {} + self.users = {} + self.shared_folders = [] + print(self.group_paths) + + for connection in self.connection_data: + id = connection['connection_id'] + name = connection["name"] + debug(f'Importing Connection {name}',self.debug) + + # Resolving folder path + KCM_folder_path = self.group_paths.get(connection['connection_group_id'],'ROOT') + folder_array = KCM_folder_path.split(self.separator) + # Log Shared folder + if folder_array[0] not in self.shared_folders: + self.shared_folders.append(folder_array[0]) + + # Add users + if id not in self.users: + # Create bespoke user folders + folder_path = f'KCM Users - {folder_array[0]}' + if len(folder_array)>1: + folder_path += self.separator+self.separator.join(folder_array[1:]) + # Create user + user = { + 'folder_path': folder_path, + 'title': f'KCM User - {name}', + 'type': "pamUser", + 'rotation_settings':{} + } + self.users[id] = user + + # Add resources + if id not in self.connections: + # Create bespoke resource folders + folder_path = f'KCM Resources - {folder_array[0]}' + if len(folder_array)>1: + folder_path += self.separator+self.separator.join(folder_array[1:]) + + # Define record-type + types = { + 'http': 'pamRemoteBrowser', + 'mysql': 'pamDatabase', + 'postgres': 'pamDatabase', + 'sql-server': 'pamDatabase', + } + + resource = { + 'folder_path':folder_path, + 'title': f'KCM Resource - {name}', + 'type':types.get(connection['protocol'],'pamMachine'), + "host": "", + "pam_settings": { + "options": { + "rotation": "off", + "connections": "on", + "tunneling": "off", + "graphical_session_recording": "off" + }, + "connection": { + "protocol": connection['protocol'] if connection['protocol'] != "postgres" else "postgresql", + "launch_credentials": f'KCM User - {name}' + } + } + } + self.connections[id] = resource + + def handle_arg(id,name,arg,value): + def handle_mapping(mapping,value,dir): + if mapping == 'ignore': + debug(f'Mapping {arg} ignored',self.debug) + return dir + if mapping=='log': + if name not in self.logged_records: + debug(f'Adding record {name} to logged records',self.debug) + self.logged_records[name] = {'name':name, arg:value} + else: + self.logged_records[name][arg] = value + return dir + if mapping is None: + debug(f'Mapping {arg} recognized but not supported',self.debug) + return dir + if '=' in mapping: + value = mapping.split('=')[1] + mapping = mapping.split('=')[0] + if '.' in mapping: + param_array = mapping.split('.') + if len(param_array)>=2: + if param_array[0] not in dir[id]: + dir[id][param_array[0]] = {} + if len(param_array)==2: + dir[id][param_array[0]][param_array[1]] = value + if len(param_array)>=3: + if param_array[1] not in dir[id][param_array[0]]: + dir[id][param_array[0]][param_array[1]] = {} + if len(param_array)==3: + dir[id][param_array[0]][param_array[1]][param_array[2]] = value + if len(param_array)>=4: + if param_array[2] not in dir[id][param_array[0]][param_array[1]]: + dir[id][param_array[0]][param_array[1]][param_array[2]] = {} + dir[id][param_array[0]][param_array[1]][param_array[2]][param_array[3]] = value + else: + dir[id][mapping] = value + return dir + + if value.startswith('${KEEPER_') and id not in self.dynamic_tokens: + debug('Dynamic token detected',self.debug) + self.dynamic_tokens.append(id) + if name not in self.logged_records: + self.logged_records[name] = {'name':name, 'dynamic_token':True} + else: + self.logged_records[name]['dynamic_token'] = True + elif value and arg.startswith('totp-'): + if 'oneTimeCode' not in user: + user['oneTimeCode'] = { + "totp-algorithm": '', + "totp-digits": "", + "totp-period": "", + "totp-secret": "" + } + user['oneTimeCode'][arg] = value + elif value and arg == 'hostname': + resource['host'] = value + elif value and arg == 'port': + resource['pam_settings']['connection']['port'] = value + elif value and arg in self.mappings['users']: + self.users = handle_mapping(self.mappings['users'][arg],value,self.users) + elif arg in self.mappings['resources']: + self.connections = handle_mapping(self.mappings['resources'][arg],value,self.connections) + else: + display(f'Error: Unknown parameter detected: {arg}. Add it to KCM_mappings.json to resolve this error','bold red') + + # Handle args + if connection['parameter_name']: + handle_arg(id,connection['name'],connection['parameter_name'],connection['parameter_value']) + # Handle attributes + if connection['attribute_name']: + handle_arg(id,connection['name'],connection['attribute_name'],connection['attribute_value']) + + + self.user_records = list(user for user in self.users.values()) + self.resource_records = list(conn for conn in self.connections.values()) + + # Sanitize totp + for user in self.user_records: + if 'oneTimeCode' in user: + alg = user['oneTimeCode']["totp-algorithm"] + dig = user['oneTimeCode']["totp-digits"] + period = user['oneTimeCode']["totp-period"] + secret = user['oneTimeCode']["totp-secret"] + stripped_secret = ''.join([x for x in secret if x.isnumeric()]) + user['otp'] = f'otpauth://totp/{TOTP_ACCOUNT}?secret={stripped_secret}&issuer=&algorithm={alg}&digits={dig}&period={period}' + + # Handle SFTP records + for resource in self.resource_records: + if 'sftp' in resource['pam_settings']['connection']: + sftp_settings = resource['pam_settings']['connection']['sftp'] + # Create resource for SFTP + sftp_resource = { + 'folder_path':resource['folder_path']+'/SFTP Resources', + 'title': f'SFTP connection for resource {resource["host"]}', + 'type':'pamMachine', + "host": sftp_settings.get("host",""), + "port": sftp_settings.get("port",""), + "pam_settings": { + "options": { + "rotation": "off", + "connections": "off", + "tunneling": "off", + "graphical_session_recording": "off" + }, + "connection": { + "protocol": 'ssh', + "launch_credentials": f'KCM User - {name}' + } + } + } + self.resource_records.append(sftp_resource) + # Create User for SFTP + sftp_user = { + 'folder_path':f'KCM Users - {resource["folder_path"][16:]}/SFTP Users', + 'title': f'SFTP credentials for resource {resource["host"]}', + 'type':'pamUsers', + 'login': sftp_settings.get("login",""), + 'password': sftp_settings.get("password",""), + 'private_pem_key': sftp_settings.get("private_key","") + } + self.user_records.append(sftp_user) + # Set correct SFTP settings + resource['pam_settings']['connection']['sftp'].update({ + "sftp_resource": f'SFTP connection for resource {resource["host"]}', + "sftp_user_credentials": f'SFTP credentials for resource {resource["host"]}' + }) + + if self.dynamic_tokens: + display(f'{len(self.dynamic_tokens)} dynamic tokens detected, they will be added to the JSON file.') + if self.logged_records: + display(f'{len(self.logged_records)-len(self.dynamic_tokens)} records logged, they will be added to the JSON file.') + + logged_records = [] + if self.logged_records: + logged_records = (list(record for record in self.logged_records.values())) + + shared_folders = [] + for folder in self.shared_folders: + shared_folders.extend([f'KCM Users - {folder}',f'KCM Resources - {folder}']) + display('Make sure to add the following Shared Folders to your Gateway Application before importing:') + list_items(shared_folders) + + return { + "pam_data": { + "shared_folders": shared_folders, + "logged_records": logged_records, + "resources": self.resource_records, + "users": [user for user in self.user_records if len(user)>4] + } + } + + +KCM_export() From 9714f015052f9debb960769cec176641c3c38be6 Mon Sep 17 00:00:00 2001 From: lthievenaz-keeper Date: Mon, 9 Mar 2026 15:07:01 +0000 Subject: [PATCH 10/15] Added support for template files in PAM-KCM-Importer + general improvements (#1854) * Create kcm_export.py Add folder and script to convert KCM resources to PAM Project Extend template * Create KCM_mappings.json Add mapping dictionary of KCM parameters, to use in conjunction with the kcm_export.py script * Added comment about KCM_mappings * Fixed syntax for f strings with older python version Older versions of python don't support using the same quote characters on f strings - fixed * Updated naming scheme for resource * Add support for template file to PAM KCM import script + general improvements - A prompt now exists to collect a template JSON file. - Added deepcopies to ensure no reference problem - Improved nested dictionary function - Support for adding file path encapsulated in quotes --- examples/pam-kcm-import/kcm_export.py | 128 +++++++++++++++++--------- 1 file changed, 82 insertions(+), 46 deletions(-) diff --git a/examples/pam-kcm-import/kcm_export.py b/examples/pam-kcm-import/kcm_export.py index b91eab42f..d589ddd71 100644 --- a/examples/pam-kcm-import/kcm_export.py +++ b/examples/pam-kcm-import/kcm_export.py @@ -28,7 +28,8 @@ Connection group B1/ """ -from json import dump,dumps,loads +from json import dump,dumps,load,loads +from copy import deepcopy ## RICH Console styling - can be removed if rich was not imported ## from rich.console import Console @@ -125,13 +126,16 @@ def handle_prompt(valid_inputs,prompt='Input: '): def validate_file_upload(format,filename=None): if not filename: filename = input('File path: ') + if filename[0] in ['"',"'"]: + filename = filename[1:] + if filename[-1] in ['"',"'"]: + filename = filename[:-1] try: with open(filename,'r') as file: if format=='csv': from csv import DictReader return list(DictReader(file)) elif format=='json': - from json import load return load(file) elif format=='yaml': from yaml import safe_load @@ -142,6 +146,12 @@ def validate_file_upload(format,filename=None): return validate_file_upload(format) +def set_nested(d, keys, value): + for key in keys[:-1]: + d = d.setdefault(key, {}) + d[keys[-1]] = value + + def debug(text,DEBUG): if DEBUG: print(f'>>DEBUG: {text}') @@ -152,6 +162,8 @@ def __init__(self,DEBUG=DEBUG): self.mappings = validate_file_upload('json','KCM_mappings.json') self.debug = DEBUG self.db_config = DB_CONFIG + self.template_rs = None + self.template_usr = None self.folder_structure = 'ksm_based' self.separator = '/' self.dynamic_tokens = [] @@ -162,6 +174,7 @@ def __init__(self,DEBUG=DEBUG): display('What database are you running on KCM?', 'cyan') list_items(['(1) MySQL','(2) PostgreSQL']) self.database = handle_prompt({'1':'MYSQL','2':'POSTGRES'}) + print() # Collect db credentials self.collect_db_config() @@ -171,14 +184,15 @@ def __init__(self,DEBUG=DEBUG): if not connect: display('Unable to connect to database, ending program','bold red') return + print() # Generate template - json_template = self.generate_data() + json_output = self.generate_data() display('# Data collected and import-ready', 'green') display('Exporting JSON template...') - with open('pam_import.json','w') as user_file: - dump(json_template,user_file,indent=2) + with open('pam_import.json','w') as result_file: + dump(json_output,result_file,indent=2) display('Exported pam_import.json successfully','italic green') return @@ -300,6 +314,36 @@ def generate_data(self): Connection group B1/ ''', 'yellow') self.folder_structure = handle_prompt({'1':'ksm_based','2':'nested','3':'flat'}) + print() + + display('Do you wish to use a template file?','cyan') + display('''A JSON template file can be used to set default parameters on your resources / users. +The format of this file is as follows: +{ +. "pam_data": { +. "resources": [ +. { +. ...pamDirectory parameters, +. "users": [ ...pamDirectory users ] +. }, +. { +. ...pamMachine default parameters, +. "users": [ { ...pamUser default parameters } ] +. } +. ] +. } +} + ''','yellow') + list_items(['(1) Yes','(2) No']) + if handle_prompt({'1':True,'2':False}): + display('## Please upload your JSON template', 'cyan') + templ = validate_file_upload('json') + templ_resources = templ.get('pam_data',{}).get('resources',[]) + if len(templ_resources)>1: + self.template_rs = templ_resources[1] + if self.template_rs.get('users',[]): + self.template_usr = self.template_rs['users'][0] + print() self.group_paths = {} @@ -328,7 +372,6 @@ def resolve_path(group_id): self.connections = {} self.users = {} self.shared_folders = [] - print(self.group_paths) for connection in self.connection_data: id = connection['connection_id'] @@ -349,12 +392,13 @@ def resolve_path(group_id): if len(folder_array)>1: folder_path += self.separator+self.separator.join(folder_array[1:]) # Create user - user = { + user = deepcopy(self.template_usr) if self.template_usr else deepcopy({ 'folder_path': folder_path, 'title': f'KCM User - {name}', 'type': "pamUser", 'rotation_settings':{} - } + }) + if self.template_usr: user.update({'folder_path': folder_path,'title': f'KCM User - {name}'}) self.users[id] = user # Add resources @@ -372,7 +416,7 @@ def resolve_path(group_id): 'sql-server': 'pamDatabase', } - resource = { + resource = deepcopy(self.template_rs) if self.template_rs else deepcopy({ 'folder_path':folder_path, 'title': f'KCM Resource - {name}', 'type':types.get(connection['protocol'],'pamMachine'), @@ -389,45 +433,34 @@ def resolve_path(group_id): "launch_credentials": f'KCM User - {name}' } } - } + }) + if self.template_rs: + resource.update({ + 'folder_path':folder_path, + 'title': f'KCM Resource - {name}', + 'type':types.get(connection['protocol'],'pamMachine'), + 'users':[] + }) + resource['pam_settings']['connection']['protocol'] = connection['protocol'] if connection['protocol'] != "postgres" else "postgresql" + resource['pam_settings']['connection']['launch_credentials'] = f'KCM User - {name}' self.connections[id] = resource - def handle_arg(id,name,arg,value): - def handle_mapping(mapping,value,dir): + def handle_arg(id,name,arg,value,resource,user): + def handle_mapping(mapping, value, dir): if mapping == 'ignore': - debug(f'Mapping {arg} ignored',self.debug) + debug(f'Mapping {arg} ignored', self.debug) return dir - if mapping=='log': - if name not in self.logged_records: - debug(f'Adding record {name} to logged records',self.debug) - self.logged_records[name] = {'name':name, arg:value} - else: - self.logged_records[name][arg] = value + if mapping == 'log': + record = self.logged_records.setdefault(name, {'name': name}) + record[arg] = value return dir if mapping is None: - debug(f'Mapping {arg} recognized but not supported',self.debug) + debug(f'Mapping {arg} recognized but not supported', self.debug) return dir if '=' in mapping: - value = mapping.split('=')[1] - mapping = mapping.split('=')[0] - if '.' in mapping: - param_array = mapping.split('.') - if len(param_array)>=2: - if param_array[0] not in dir[id]: - dir[id][param_array[0]] = {} - if len(param_array)==2: - dir[id][param_array[0]][param_array[1]] = value - if len(param_array)>=3: - if param_array[1] not in dir[id][param_array[0]]: - dir[id][param_array[0]][param_array[1]] = {} - if len(param_array)==3: - dir[id][param_array[0]][param_array[1]][param_array[2]] = value - if len(param_array)>=4: - if param_array[2] not in dir[id][param_array[0]][param_array[1]]: - dir[id][param_array[0]][param_array[1]][param_array[2]] = {} - dir[id][param_array[0]][param_array[1]][param_array[2]][param_array[3]] = value - else: - dir[id][mapping] = value + mapping, value = mapping.split('=', 1) + keys = mapping.split('.') + set_nested(dir[id], keys, value) return dir if value.startswith('${KEEPER_') and id not in self.dynamic_tokens: @@ -459,10 +492,10 @@ def handle_mapping(mapping,value,dir): # Handle args if connection['parameter_name']: - handle_arg(id,connection['name'],connection['parameter_name'],connection['parameter_value']) + handle_arg(id,connection['name'],connection['parameter_name'],connection['parameter_value'],self.connections[id],self.users[id]) # Handle attributes if connection['attribute_name']: - handle_arg(id,connection['name'],connection['attribute_name'],connection['attribute_value']) + handle_arg(id,connection['name'],connection['attribute_name'],connection['attribute_value'],self.connections[id],self.users[id]) self.user_records = list(user for user in self.users.values()) @@ -518,11 +551,13 @@ def handle_mapping(mapping,value,dir): "sftp_resource": f'SFTP connection for resource {resource["host"]}', "sftp_user_credentials": f'SFTP credentials for resource {resource["host"]}' }) - + + display('# Export Results') + if self.dynamic_tokens: - display(f'{len(self.dynamic_tokens)} dynamic tokens detected, they will be added to the JSON file.') - if self.logged_records: - display(f'{len(self.logged_records)-len(self.dynamic_tokens)} records logged, they will be added to the JSON file.') + display(f'- {len(self.dynamic_tokens)} dynamic tokens detected, they will be added to the JSON file.','yellow') + if len(self.logged_records)-len(self.dynamic_tokens)>0: + display(f'- {len(self.logged_records)-len(self.dynamic_tokens)} records logged, they will be added to the JSON file.','yellow') logged_records = [] if self.logged_records: @@ -534,6 +569,7 @@ def handle_mapping(mapping,value,dir): display('Make sure to add the following Shared Folders to your Gateway Application before importing:') list_items(shared_folders) + return { "pam_data": { "shared_folders": shared_folders, From 06e4acfea2a6282dceb9976aff9abf4aaed811e0 Mon Sep 17 00:00:00 2001 From: amangalampalli-ks Date: Tue, 10 Mar 2026 18:22:39 +0530 Subject: [PATCH 11/15] Implement custom-field fallback for no file storage for config and service_config files (#1852) (#1857) --- docker_ksm_utility.py | 19 ++++- keepercommander/service/config/cli_handler.py | 40 +++++++++- .../service/config/record_handler.py | 67 +++++++++++++++++ keepercommander/service/docker/setup_base.py | 75 ++++++++++++------- 4 files changed, 170 insertions(+), 31 deletions(-) diff --git a/docker_ksm_utility.py b/docker_ksm_utility.py index 858f2a4f2..4b5088593 100755 --- a/docker_ksm_utility.py +++ b/docker_ksm_utility.py @@ -306,6 +306,10 @@ def _get_secret_by_uid_or_title(secrets_manager, record_identifier): def download_config(ksm_config_path, ksm_token, record_identifier, output_path): """ Download config.json from KSM record. + + Tries file attachments first. If no config.json attachment exists + (e.g. the account has no file storage plan), falls back to the + ``config_json`` custom field written by the setup commands. Args: ksm_config_path (str): Path to KSM config file (optional) @@ -344,15 +348,24 @@ def download_config(ksm_config_path, ksm_token, record_identifier, output_path): if not secret: return False - # Find config.json attachment + # Try file attachment first for file in secret.files: if file.name.lower() == 'config.json': - # Ensure output directory exists os.makedirs(os.path.dirname(output_path), exist_ok=True) file.save_file(output_path, True) return True - print("ERROR: config.json attachment not found in record") + # Fallback: check for config_json custom field + config_value = secret.custom_field('config_json', single=True) + if config_value: + os.makedirs(os.path.dirname(output_path), exist_ok=True) + with open(output_path, 'w', encoding='utf-8') as f: + f.write(config_value) + os.chmod(output_path, 0o600) + print("config.json restored from custom field") + return True + + print("ERROR: config.json not found as attachment or custom field in record") return False except Exception as e: diff --git a/keepercommander/service/config/cli_handler.py b/keepercommander/service/config/cli_handler.py index 9329c22a0..3b5a048bc 100644 --- a/keepercommander/service/config/cli_handler.py +++ b/keepercommander/service/config/cli_handler.py @@ -85,10 +85,44 @@ def get_help_output(self, params: KeeperParams) -> str: def download_config_from_vault(self, params: KeeperParams, title: str, config_dir: Path) -> bool: """Download config file from vault if it exists.""" try: - if record_uid := self.find_config_record(params, title): - self.execute_cli_command(params, f"download-attachment {record_uid} --out-dir '{config_dir}'") + record_uid = self.find_config_record(params, title) + if not record_uid: + return False + + self.execute_cli_command(params, f"download-attachment {record_uid} --out-dir '{config_dir}'") + + json_path = config_dir / 'service_config.json' + yaml_path = config_dir / 'service_config.yaml' + if json_path.exists() or yaml_path.exists(): return True - return False + + return self._restore_config_from_custom_field(params, record_uid, config_dir) except Exception as e: logger.error(f"Error downloading config from vault: {e}") + return False + + def _restore_config_from_custom_field(self, params: KeeperParams, record_uid: str, config_dir: Path) -> bool: + """Write service_config content from a custom field to disk.""" + try: + from ... import vault + record = vault.KeeperRecord.load(params, record_uid) + if not isinstance(record, vault.TypedRecord) or not record.custom: + return False + + field_map = { + 'service_config_json': config_dir / 'service_config.json', + 'service_config_yaml': config_dir / 'service_config.yaml', + } + for field in record.custom: + if field.label in field_map and field.get_default_value(): + dest = field_map[field.label] + dest.write_text(field.get_default_value()) + from ... import utils + utils.set_file_permissions(str(dest)) + logger.debug(f"Restored {dest.name} from custom field") + return True + + return False + except Exception as e: + logger.error(f"Error restoring config from custom field: {e}") return False \ No newline at end of file diff --git a/keepercommander/service/config/record_handler.py b/keepercommander/service/config/record_handler.py index f93f4efb5..9a24b7013 100644 --- a/keepercommander/service/config/record_handler.py +++ b/keepercommander/service/config/record_handler.py @@ -25,6 +25,21 @@ def __init__(self): self.validator = ConfigValidator() self.cli_handler = CommandHandler() + @staticmethod + def has_file_storage(params) -> bool: + """Check whether the current user can upload file attachments.""" + if not params.license or 'bytes_total' not in params.license: + return False + if int(params.license['bytes_total']) <= 0: + return False + if params.enforcements and 'booleans' in params.enforcements: + restricted = next( + (x['value'] for x in params.enforcements['booleans'] + if x['key'] == 'restrict_file_upload'), False) + if restricted: + return False + return True + @debug_decorator def create_record(self, is_advanced_security_enabled: str, commands: str, token_expiration: str = None, record_uid: str = None) -> Dict[str, Any]: """Create a new configuration record.""" @@ -50,6 +65,13 @@ def create_record(self, is_advanced_security_enabled: str, commands: str, token_ def update_or_add_record(self, params: KeeperParams, title: str, config_path: Path) -> None: """Update existing record or add new one.""" + if self.has_file_storage(params): + self._update_or_add_record_attachment(params, title, config_path) + else: + self._update_or_add_record_custom_field(params, title, config_path) + + def _update_or_add_record_attachment(self, params: KeeperParams, title: str, config_path: Path) -> None: + """Upload service_config as a file attachment (original behaviour).""" try: record_uid = self.cli_handler.find_config_record(params, title) @@ -74,6 +96,51 @@ def update_or_add_record(self, params: KeeperParams, title: str, config_path: Pa except Exception as e: print(f"Error updating/adding record: {e}") + def _update_or_add_record_custom_field(self, params: KeeperParams, title: str, config_path: Path) -> None: + """Store service_config content as a custom field (no file storage plan).""" + try: + from ... import api, vault, record_management + + config_content = config_path.read_text() + field_label = f'service_config_{config_path.suffix.lstrip(".")}' + + record_uid = self.cli_handler.find_config_record(params, title) + + if record_uid: + record = vault.KeeperRecord.load(params, record_uid) + else: + record = vault.KeeperRecord.create(params, 'login') + record.record_uid = utils.generate_uid() + record.record_key = utils.generate_aes_key() + record.title = title + record.type_name = 'login' + record_management.add_record_to_folder(params, record) + api.sync_down(params) + + if not isinstance(record, vault.TypedRecord): + print("Error: Invalid record type for custom field storage") + return + + if record.custom is None: + record.custom = [] + record.custom = [ + f for f in record.custom + if f.label not in ('service_config_json', 'service_config_yaml') + ] + record.custom.append(vault.TypedField.new_field('secret', config_content, field_label)) + + record_management.update_record(params, record) + params.sync_data = True + api.sync_down(params) + + if not record_uid: + self.record_uid = record.record_uid + + logger.debug(f"Service config stored as custom field '{field_label}' (no file storage plan)") + + except Exception as e: + print(f"Error storing service config as custom field: {e}") + def update_or_add_cert_record(self, params: KeeperParams, title: str) -> None: """Update existing certificate record or add a new one in Keeper Vault.""" try: diff --git a/keepercommander/service/docker/setup_base.py b/keepercommander/service/docker/setup_base.py index 9b0457b72..e643af86b 100644 --- a/keepercommander/service/docker/setup_base.py +++ b/keepercommander/service/docker/setup_base.py @@ -33,6 +33,7 @@ from .models import SetupResult, SetupStep, DockerSetupConstants from .printer import DockerSetupPrinter from ..config.config_validation import ConfigValidator, ValidationError +from ..config.record_handler import RecordHandler class DockerSetupBase: @@ -62,8 +63,8 @@ def run_setup_steps(self, params, folder_name: str, app_name: str, record_name: DockerSetupPrinter.print_step(SetupStep.CREATE_RECORD.value, total_steps, f"Creating record '{record_name}'...") record_uid = self._create_config_record(params, record_name, folder_uid) - # Step 4: Upload config file - DockerSetupPrinter.print_step(SetupStep.UPLOAD_CONFIG.value, total_steps, "Uploading config.json attachment...") + # Step 4: Store config file (attachment or custom field) + DockerSetupPrinter.print_step(SetupStep.UPLOAD_CONFIG.value, total_steps, "Storing config.json...") self._upload_config_file(params, record_uid, config_path) # Step 5: Create KSM app @@ -180,41 +181,65 @@ def _create_config_record(self, params, record_name: str, folder_uid: str) -> st raise CommandError('docker-setup', f'Failed to create record: {str(e)}') def _upload_config_file(self, params, record_uid: str, config_path: str) -> None: - """Upload config.json as attachment to the record""" + """Upload config.json as attachment, or store as custom field if no file storage plan.""" temp_config_path = None try: - # Clean the config first cleaned_config_path = self._clean_config_json(config_path) if cleaned_config_path != config_path: temp_config_path = cleaned_config_path - - record = vault.KeeperRecord.load(params, record_uid) - if not isinstance(record, vault.TypedRecord): - raise CommandError('docker-setup', 'Invalid record type for attachments') - # Delete existing config.json attachments to prevent duplicates - self._delete_existing_config_attachments(record, params) - - # Upload attachment - upload_task = attachment.FileUploadTask(cleaned_config_path) - upload_task.title = 'config.json' - - attachment.upload_attachments(params, record, [upload_task]) - record_management.update_record(params, record) - params.sync_data = True - api.sync_down(params) - - DockerSetupPrinter.print_success("Config file uploaded successfully") + + if RecordHandler.has_file_storage(params): + self._upload_config_as_attachment(params, record_uid, cleaned_config_path) + else: + self._store_config_as_custom_field(params, record_uid, cleaned_config_path) + except CommandError: + raise except Exception as e: - raise CommandError('docker-setup', f'Failed to upload config file: {str(e)}') + raise CommandError('docker-setup', f'Failed to store config file: {str(e)}') finally: if temp_config_path and os.path.exists(temp_config_path): try: os.unlink(temp_config_path) - except OSError as e: - # Log or handle specifically - print(f"Warning: Could not delete temporary config file: {e}") + except OSError: pass + def _upload_config_as_attachment(self, params, record_uid: str, config_path: str) -> None: + """Upload config.json as a file attachment (requires file storage plan).""" + record = vault.KeeperRecord.load(params, record_uid) + if not isinstance(record, vault.TypedRecord): + raise CommandError('docker-setup', 'Invalid record type for attachments') + self._delete_existing_config_attachments(record, params) + + upload_task = attachment.FileUploadTask(config_path) + upload_task.title = 'config.json' + + attachment.upload_attachments(params, record, [upload_task]) + record_management.update_record(params, record) + params.sync_data = True + api.sync_down(params) + + DockerSetupPrinter.print_success("Config file uploaded as attachment") + + def _store_config_as_custom_field(self, params, record_uid: str, config_path: str) -> None: + """Store config.json content as a custom field (fallback when no file storage plan).""" + with open(config_path, 'r') as f: + config_content = f.read() + + record = vault.KeeperRecord.load(params, record_uid) + if not isinstance(record, vault.TypedRecord): + raise CommandError('docker-setup', 'Invalid record type') + + if record.custom is None: + record.custom = [] + record.custom = [f for f in record.custom if f.label != 'config_json'] + record.custom.append(vault.TypedField.new_field('secret', config_content, 'config_json')) + + record_management.update_record(params, record) + params.sync_data = True + api.sync_down(params) + + DockerSetupPrinter.print_success("Config stored as custom field (no file storage plan)") + def _delete_existing_config_attachments(self, record, params) -> None: """Delete any existing config.json attachments to prevent duplicates""" # Modern records use TypedRecord with fileRef system From 59c4da4838218d8ccd4a37d3acd609efb25a9b50 Mon Sep 17 00:00:00 2001 From: ukumar-ks Date: Tue, 10 Mar 2026 16:37:58 +0530 Subject: [PATCH 12/15] KC-1116: Bugfix changes (#1834) * KC-1116: Bugfix changes * Kepm Pr review changes --- keepercommander/commands/pedm/pedm_admin.py | 99 ++++++++++++++++++--- keepercommander/pedm/admin_plugin.py | 7 +- 2 files changed, 92 insertions(+), 14 deletions(-) diff --git a/keepercommander/commands/pedm/pedm_admin.py b/keepercommander/commands/pedm/pedm_admin.py index f029bf93f..2725dbd45 100644 --- a/keepercommander/commands/pedm/pedm_admin.py +++ b/keepercommander/commands/pedm/pedm_admin.py @@ -62,11 +62,28 @@ def resolve_existing_policies(pedm: admin_plugin.PedmPlugin, policy_names: Any) found_policies: Dict[str, admin_types.PedmPolicy] = {} p: Optional[admin_types.PedmPolicy] if isinstance(policy_names, list): + resolve_by_name = [] for policy_name in policy_names: p = pedm.policies.get_entity(policy_name) - if p is None: - raise base.CommandError(f'Policy name "{policy_name}" is not found') - found_policies[p.policy_uid] = p + if p is not None: + found_policies[p.policy_uid] = p + else: + resolve_by_name.append(policy_name) + + if resolve_by_name: + all_policies = list(pedm.policies.get_all_entities()) + for policy_name in resolve_by_name: + l_name = policy_name.lower() + matches = [x for x in all_policies + if isinstance(x.data, dict) and + isinstance(x.data.get('PolicyName'), str) and + x.data['PolicyName'].lower() == l_name] + if len(matches) == 0: + raise base.CommandError(f'Policy "{policy_name}" is not found') + if len(matches) > 1: + raise base.CommandError(f'Policy "{policy_name}" is not unique. Please use Policy UID') + found_policies[matches[0].policy_uid] = matches[0] + if len(found_policies) == 0: raise base.CommandError('No policies were found') return list(found_policies.values()) @@ -79,7 +96,17 @@ def resolve_single_policy(pedm: admin_plugin.PedmPlugin, policy_uid: Any) -> adm if isinstance(policy, admin_types.PedmPolicy): return policy - raise base.CommandError(f'Policy UID \"{policy_uid}\" does not exist') + + l_name = policy_uid.lower() + matches = [x for x in pedm.policies.get_all_entities() + if isinstance(x.data, dict) and + isinstance(x.data.get('PolicyName'), str) and + x.data['PolicyName'].lower() == l_name] + if len(matches) == 1: + return matches[0] + if len(matches) > 1: + raise base.CommandError(f'Policy \"{policy_uid}\" is not unique. Please use Policy UID') + raise base.CommandError(f'Policy \"{policy_uid}\" does not exist') @staticmethod def resolve_single_approval(pedm: admin_plugin.PedmPlugin, approval_uid: Any) -> admin_types.PedmApproval: @@ -1268,6 +1295,10 @@ def execute(self, context: KeeperParams, **kwargs) -> Any: actions = data.get('Actions') or {} on_success = actions.get('OnSuccess') or {} controls = on_success.get('Controls') or '' + if isinstance(controls, list): + controls = ', '.join(str(c) for c in controls) + elif isinstance(controls, str): + controls = controls.replace('\n', ', ') collections = [x.collection_uid for x in plugin.storage.collection_links.get_links_for_object(policy.policy_uid)] collections = ['*' if x == all_agents else x for x in collections] @@ -1476,13 +1507,45 @@ def execute(self, context: KeeperParams, **kwargs) -> Any: policy = PedmUtils.resolve_single_policy(plugin, kwargs.get('policy')) - body = json.dumps(policy.data, indent=4) + fmt = kwargs.get('format', 'table') filename = kwargs.get('output') - if kwargs.get('format') == 'json' and filename: - with open(filename, 'w') as f: - f.write(body) - else: - return body + + if fmt == 'json': + body = json.dumps(policy.data, indent=4) + if filename: + with open(filename, 'w') as f: + f.write(body) + return + else: + return body + + data = policy.data or {} + all_agents = utils.base64_url_encode(plugin.all_agents) + headers = ['policy_uid', 'policy_name', 'policy_type', 'status', 'controls', + 'users', 'machines', 'applications', 'collections'] + + actions = data.get('Actions') or {} + on_success = actions.get('OnSuccess') or {} + controls = on_success.get('Controls') or '' + if isinstance(controls, list): + controls = ', '.join(str(c) for c in controls) + elif isinstance(controls, str): + controls = controls.replace('\n', ', ') + + collections = [x.collection_uid for x in plugin.storage.collection_links.get_links_for_object(policy.policy_uid)] + collections = ['*' if x == all_agents else x for x in collections] + collections.sort() + + status = data.get('Status') + if policy.disabled: + status = 'off' + + table = [[policy.policy_uid, data.get('PolicyName'), data.get('PolicyType'), status, + controls, data.get('UserCheck'), data.get('MachineCheck'), + data.get('ApplicationCheck'), collections]] + + headers = [report_utils.field_to_title(x) for x in headers] + return report_utils.dump_report_data(table, headers, fmt=fmt, filename=filename) class PedmPolicyDeleteCommand(base.ArgparseCommand): @@ -1503,6 +1566,9 @@ def execute(self, context: KeeperParams, **kwargs) -> None: if isinstance(status, admin_types.EntityStatus) and not status.success: raise base.CommandError(f'Failed to delete policy "{status.entity_uid}": {status.message}') + policy_names = ', '.join(p.data.get('PolicyName') or p.policy_uid for p in policies) + logging.info('Successfully deleted policy: %s', policy_names) + class PedmPolicyAgentsCommand(base.ArgparseCommand): def __init__(self): @@ -1525,7 +1591,8 @@ def execute(self, context: KeeperParams, **kwargs) -> None: rq.policyUid.extend(policy_uids) rq.summaryOnly = False rs = api.execute_router(context, "pedm/get_policy_agents", rq, rs_type=pedm_pb2.PolicyAgentResponse) - assert rs is not None + if rs is None: + rs = pedm_pb2.PolicyAgentResponse() table = [] headers = ['key', 'uid', 'name', 'status'] @@ -1580,6 +1647,9 @@ def execute(self, context: KeeperParams, **kwargs) -> None: if len(policy_uids) == 0: raise base.CommandError('Nothing to do') + if len(collection_uids) == 0: + raise base.CommandError('No collections specified. Use -c/--collection to specify collections to assign. Use "*" or "all" to assign all agents.') + statuses = plugin.assign_policy_collections(policy_uids, collection_uids) for status in statuses.add: if not status.success: @@ -1588,6 +1658,13 @@ def execute(self, context: KeeperParams, **kwargs) -> None: if not status.success: raise base.CommandError(f'Failed to remove from policy: {status.message}') + policy_names = ', '.join(p.data.get('PolicyName') or p.policy_uid for p in policies) + collection_labels = ', '.join( + '*' if c == plugin.all_agents else utils.base64_url_encode(c) + for c in collection_uids + ) + logging.info('Successfully assigned collection(s) [%s] to policy: %s', collection_labels, policy_names) + class PedmCollectionCommand(base.GroupCommandNew): def __init__(self): diff --git a/keepercommander/pedm/admin_plugin.py b/keepercommander/pedm/admin_plugin.py index c9a617622..d3296e442 100644 --- a/keepercommander/pedm/admin_plugin.py +++ b/keepercommander/pedm/admin_plugin.py @@ -320,7 +320,7 @@ def get_collections() -> Iterable[admin_storage.PedmStorageCollection]: collection_uid=collection_dto.collection_uid, collection_type=collection_dto.collection_type, collection_data=collection_data, created=collection_dto.created) except Exception as e: - self.logger.info('Collection "%s" load error: %s', collection_dto.collection_uid, e) + self.logger.error('Collection "%s" load error: %s', collection_dto.collection_uid, e) collection = admin_types.PedmCollection( collection_uid=collection_dto.collection_uid, collection_type=collection_dto.collection_type, collection_data={}, created=collection_dto.created) @@ -532,9 +532,10 @@ def assign_policy_collections( rq_link.collectionUid.extend(collections) rq.setCollection.append(rq_link) - status_rs = api.execute_router(self.params, rq, 'pedm/set_policy_collections', rs_type=pedm_pb2.PedmStatusResponse) + status_rs = api.execute_router(self.params, 'pedm/set_policy_collections', rq, rs_type=pedm_pb2.PedmStatusResponse) self._need_sync = True - assert status_rs is not None + if status_rs is None: + return admin_types.ModifyStatus(add=[], update=[], remove=[]) return admin_types.ModifyStatus.from_proto(status_rs) def modify_policies(self, *, From 1c97da48cce3b8fe9b750c1de3b7793a7fee720c Mon Sep 17 00:00:00 2001 From: Joao Paulo Oliveira Santos Date: Wed, 11 Mar 2026 12:13:38 -0400 Subject: [PATCH 13/15] KC-1158: Add 'CSPM' as a supported integrations type for 'public-api-key' (#1862) # Conflicts: # keepercommander/commands/enterprise_api_keys.py # unit-tests/test_command_enterprise_api_keys.py --- keepercommander/commands/enterprise_api_keys.py | 14 +++++++++++--- unit-tests/test_command_enterprise_api_keys.py | 17 ++++++++++++++++- 2 files changed, 27 insertions(+), 4 deletions(-) diff --git a/keepercommander/commands/enterprise_api_keys.py b/keepercommander/commands/enterprise_api_keys.py index 10c93f43a..705bfecfe 100644 --- a/keepercommander/commands/enterprise_api_keys.py +++ b/keepercommander/commands/enterprise_api_keys.py @@ -54,6 +54,9 @@ # Generate permanent API key with read-only access public-api-key generate --name "Monitoring Tool" --integrations "SIEM:1" --expires never + # Generate API key with CSPM role and 30-day expiration + public-api-key generate --name "CSPM Integration" --integrations "CSPM:1" --expires 30d + # Generate API key with BILLING role and save details to JSON file public-api-key generate --name "Billing Integration" --integrations "BILLING:2" --expires 1y --format json --output billing_key.json ''', @@ -64,11 +67,12 @@ api_key_generate_parser.add_argument('--integrations', dest='integrations', required=True, action='store', help='''Integration with action type. Format: "RoleName:ActionType" -Available integrations: SIEM, BILLING +Available integrations: SIEM, CSPM, BILLING Action types: 1=READ (read-only), 2=READ_WRITE (full access) Examples: --integrations "SIEM:2" # SIEM role with read-write access --integrations "SIEM:1" # SIEM role with read-only access + --integrations "CSPM:1" # CSPM role with read-only access --integrations "BILLING:2" # BILLING role with read-write access --integrations "BILLING:1" # BILLING role with read-only access''') api_key_generate_parser.add_argument('--expires', dest='expires', action='store', @@ -153,6 +157,9 @@ def print_help(self, **kwargs): print(' # Generate a new API key for SIEM integration (30-day expiration)') print(' public-api-key generate --name "SIEM Tool" --integrations "SIEM:2" --expires 30d') print() + print(' # Generate a new API key for CSPM integration (30-day expiration)') + print(' public-api-key generate --name "CSPM Integration" --integrations "CSPM:1" --expires 30d') + print() print(' # Generate a new API key for BILLING integration (30-day expiration)') print(' public-api-key generate --name "Billing Tool" --integrations "BILLING:2" --expires 30d') print() @@ -161,6 +168,7 @@ def print_help(self, **kwargs): print() print('Available Integrations:') print(' SIEM - Security Information and Event Management') + print(' CSPM - Cloud Security Posture Management') print(' BILLING - Billing and subscription management') print() print('Role Action Types:') @@ -275,7 +283,7 @@ def execute(self, params, **kwargs): # Parse integrations - now required integrations_str = kwargs.get('integrations') if not integrations_str: - print("At least one integration is required. Example: --integrations 'SIEM:2' or --integrations 'BILLING:2'") + print("At least one integration is required. Example: --integrations 'SIEM:2' or --integrations 'CSPM:1' or --integrations 'BILLING:2'") return for integration_spec in integrations_str.split(','): @@ -285,7 +293,7 @@ def execute(self, params, **kwargs): if ':' in integration_spec: integration_id_str, action_type_str = integration_spec.split(':', 1) - allowed_integrations = [("SIEM", 1), ("BILLING", 3)] + allowed_integrations = [("SIEM", 1), ("CSPM", 2), ("BILLING", 3)] allowed_integration_names = [integration[0].upper() for integration in allowed_integrations] if integration_id_str.strip().upper() not in allowed_integration_names: print(f"Integration '{integration_id_str.strip()}' does not match allowed integrations: {', '.join(allowed_integration_names)}. Skipping.") diff --git a/unit-tests/test_command_enterprise_api_keys.py b/unit-tests/test_command_enterprise_api_keys.py index 5be8f4437..e90108927 100644 --- a/unit-tests/test_command_enterprise_api_keys.py +++ b/unit-tests/test_command_enterprise_api_keys.py @@ -294,7 +294,7 @@ def test_api_key_generate_missing_roles(self): with mock.patch('builtins.print') as mock_print: cmd.execute(params, name='Test Key') - mock_print.assert_called_with("At least one integration is required. Example: --integrations 'SIEM:2' or --integrations 'BILLING:2'") + mock_print.assert_called_with("At least one integration is required. Example: --integrations 'SIEM:2' or --integrations 'CSPM:1' or --integrations 'BILLING:2'") def test_api_key_generate_invalid_role_format(self): """Test API key generation fails with invalid integration format""" @@ -349,6 +349,20 @@ def test_api_key_generate_billing_msp(self): self.assertEqual(len(TestEnterpriseApiKeys.expected_commands), 0) + def test_api_key_generate_success_cspm(self): + """Test successful API key generation with CSPM integration""" + params = get_connected_params() + + cmd = enterprise_api_keys.ApiKeyGenerateCommand() + TestEnterpriseApiKeys.expected_commands = ['generate_token'] + + # Mock get_enterprise_id to avoid API call + with mock.patch.object(cmd, 'get_enterprise_id', return_value=8560 << 32): + with mock.patch('builtins.print'): + cmd.execute(params, name='CSPM Integration', integrations='CSPM:1', expires='30d') + + self.assertEqual(len(TestEnterpriseApiKeys.expected_commands), 0) + def test_api_key_revoke_success(self): """Test successful API key revocation""" params = get_connected_params() @@ -709,6 +723,7 @@ def _get_role_name_by_id(role_id): """Helper method to map role IDs to names""" role_map = { 1: "SIEM", + 2: "CSPM", 3: "BILLING" } return role_map.get(role_id, f"Role_{role_id}") \ No newline at end of file From 0b90a0b2de5cd332a5fe14fc1b9d0da365fefa64 Mon Sep 17 00:00:00 2001 From: amangalampalli-ks Date: Wed, 11 Mar 2026 23:13:31 +0530 Subject: [PATCH 14/15] Add bot port prompt and field (#1853) --- .../integrations/integration_setup_base.py | 11 ++++-- .../commands/integrations/teams_app_setup.py | 17 +++++++++ .../service/docker/compose_builder.py | 36 ++++++++++--------- keepercommander/service/docker/models.py | 1 + 4 files changed, 46 insertions(+), 19 deletions(-) diff --git a/keepercommander/service/commands/integrations/integration_setup_base.py b/keepercommander/service/commands/integrations/integration_setup_base.py index a9b825eab..290b55bd8 100644 --- a/keepercommander/service/commands/integrations/integration_setup_base.py +++ b/keepercommander/service/commands/integrations/integration_setup_base.py @@ -86,6 +86,10 @@ def get_docker_image(self) -> str: def get_record_env_key(self) -> str: return f'{self.get_integration_name().upper()}_RECORD' + def get_integration_service_ports(self, config) -> List[str]: + """Return host:container port mappings for the integration container. Override if needed.""" + return [] + def get_commander_service_name(self) -> str: return f'commander-{self.get_integration_name().lower()}' @@ -250,7 +254,7 @@ def _run_integration_setup(self, params, setup_result: SetupResult, record_uid = self._create_integration_record(params, record_name, setup_result.folder_uid, custom_fields) DockerSetupPrinter.print_step(2, 2, f"Updating docker-compose.yml with {name} App service...") - self._update_docker_compose(setup_result, service_config, record_uid) + self._update_docker_compose(setup_result, service_config, record_uid, config) return record_uid, config @@ -306,7 +310,7 @@ def _update_record_custom_fields(self, params, record_uid: str, custom_fields: L def _update_docker_compose(self, setup_result: SetupResult, service_config: ServiceConfig, - record_uid: str) -> None: + record_uid: str, config=None) -> None: compose_file = os.path.join(os.getcwd(), 'docker-compose.yml') service_name = self.get_docker_service_name() @@ -329,7 +333,8 @@ def _update_docker_compose(self, setup_result: SetupResult, container_name=self.get_docker_container_name(), image=self.get_docker_image(), record_uid=record_uid, - record_env_key=self.get_record_env_key() + record_env_key=self.get_record_env_key(), + ports=self.get_integration_service_ports(config) ).build() with open(compose_file, 'w') as f: diff --git a/keepercommander/service/commands/integrations/teams_app_setup.py b/keepercommander/service/commands/integrations/teams_app_setup.py index 76a46d6f3..3d3b80aaf 100644 --- a/keepercommander/service/commands/integrations/teams_app_setup.py +++ b/keepercommander/service/commands/integrations/teams_app_setup.py @@ -11,9 +11,12 @@ """Teams App integration setup command.""" +from typing import List + from .... import vault from ....display import bcolors from ...docker import TeamsConfig +from ...config.config_validation import ConfigValidator, ValidationError from .integration_setup_base import IntegrationSetupCommand @@ -66,6 +69,16 @@ def collect_integration_config(self): "Invalid Team ID (must be 32 hex characters in pattern xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx)" ) + print(f"\n{bcolors.BOLD}TEAMS_BOT_PORT:{bcolors.ENDC}") + print(f" Port on which the Teams bot will listen for incoming requests") + while True: + bot_port_input = input(f"{bcolors.OKBLUE}Bot Port [Press Enter for 3978]:{bcolors.ENDC} ").strip() or '3978' + try: + bot_port = ConfigValidator.validate_port(bot_port_input) + break + except ValidationError as e: + print(f"{bcolors.FAIL}Error: {str(e)}{bcolors.ENDC}") + pedm_enabled, pedm_interval = self._collect_pedm_config() da_enabled, da_interval = self._collect_device_approval_config() @@ -77,6 +90,7 @@ def collect_integration_config(self): tenant_id=tenant_id, approvals_channel_id=approvals_channel_id, approvals_team_id=approvals_team_id, + bot_port=bot_port, pedm_enabled=pedm_enabled, pedm_polling_interval=pedm_interval, device_approval_enabled=da_enabled, @@ -96,6 +110,9 @@ def build_record_custom_fields(self, config): vault.TypedField.new_field('text', str(config.device_approval_polling_interval), 'device_approval_polling_interval'), ] + def get_integration_service_ports(self, config) -> List[str]: + return [f'{config.bot_port}:{config.bot_port}'] + # ── Display ─────────────────────────────────────────────────── def print_integration_specific_resources(self, config): diff --git a/keepercommander/service/docker/compose_builder.py b/keepercommander/service/docker/compose_builder.py index f4836f143..be3bf5ffa 100644 --- a/keepercommander/service/docker/compose_builder.py +++ b/keepercommander/service/docker/compose_builder.py @@ -37,12 +37,13 @@ def build_dict(self) -> Dict[str, Any]: def add_integration_service(self, service_name: str, container_name: str, image: str, record_uid: str, - record_env_key: str) -> 'DockerComposeBuilder': + record_env_key: str, + ports: List[str] = None) -> 'DockerComposeBuilder': """Add an integration service to the compose file. Returns self.""" if self.commander_service_name not in self._services: self._services[self.commander_service_name] = self._build_commander_service() self._services[service_name] = self._build_integration_service( - container_name, image, record_uid, record_env_key + container_name, image, record_uid, record_env_key, ports or [] ) return self @@ -65,23 +66,26 @@ def _build_commander_service(self) -> Dict[str, Any]: return service def _build_integration_service(self, container_name: str, image: str, - record_uid: str, - record_env_key: str) -> Dict[str, Any]: - return { + record_uid: str, record_env_key: str, + ports: List[str] = None) -> Dict[str, Any]: + service: Dict[str, Any] = { 'container_name': container_name, 'image': image, - 'environment': { - 'KSM_CONFIG': self.setup_result.b64_config, - 'COMMANDER_RECORD': self.setup_result.record_uid, - record_env_key: record_uid - }, - 'depends_on': { - self.commander_service_name: { - 'condition': 'service_healthy' - } - }, - 'restart': 'unless-stopped' } + if ports: + service['ports'] = ports + service['environment'] = { + 'KSM_CONFIG': self.setup_result.b64_config, + 'COMMANDER_RECORD': self.setup_result.record_uid, + record_env_key: record_uid + } + service['depends_on'] = { + self.commander_service_name: { + 'condition': 'service_healthy' + } + } + service['restart'] = 'unless-stopped' + return service def _build_service_command(self) -> None: port = self.config['port'] diff --git a/keepercommander/service/docker/models.py b/keepercommander/service/docker/models.py index 78ad0a6c6..d601c5581 100644 --- a/keepercommander/service/docker/models.py +++ b/keepercommander/service/docker/models.py @@ -96,6 +96,7 @@ class TeamsConfig: tenant_id: str approvals_channel_id: str approvals_team_id: str + bot_port: int = 3978 pedm_enabled: bool = False pedm_polling_interval: int = 120 device_approval_enabled: bool = False From ad7d7fce7f386c12a7c25ce9cb7dd6370b4a72f9 Mon Sep 17 00:00:00 2001 From: Sergey Kolupaev Date: Wed, 11 Mar 2026 11:40:52 -0700 Subject: [PATCH 15/15] Release 17.2.10 --- keepercommander/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/keepercommander/__init__.py b/keepercommander/__init__.py index 73c61cc2a..50f1aa370 100644 --- a/keepercommander/__init__.py +++ b/keepercommander/__init__.py @@ -10,4 +10,4 @@ # Contact: commander@keepersecurity.com # -__version__ = '17.2.9' +__version__ = '17.2.10'