Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
29 commits
Select commit Hold shift + click to select a range
d422327
ref(api): Use file mapping in DIF batch assemble (#110856)
szokeasaurusrex Mar 18, 2026
8d82f14
feat(ai-insights): ai content renderer (#110847)
obostjancic Mar 18, 2026
da111e4
ref(debug_files): Use keys instead of set(keys) (#110967)
szokeasaurusrex Mar 18, 2026
bf3c94b
ref(settings): migrate integration org settings (#110666)
TkDodo Mar 18, 2026
07b3110
fix(insights): Filter SDK update alert by package name (#110968)
priscilawebdev Mar 18, 2026
5ba3380
ref(trace): Remove useIsEAPTraceEnabled hook and non-EAP code paths (…
nsdeschenes Mar 18, 2026
3c0ecfe
feat(native): Log symbolicator response on empty thread list (#110976)
jjbayer Mar 18, 2026
3aba20d
feat(explore): Add drag-and-drop reordering to Visualize toolbar (#11…
JoshuaKGoldberg Mar 18, 2026
c71b9f8
fix(dashboards): Preserve group-by when saving logs query as widget (…
JoshuaKGoldberg Mar 18, 2026
6ab16d9
fix(seer): Handle enum deserialization in autofix tasks (#110959)
sentry[bot] Mar 18, 2026
a0e18f1
fix(tasks) Add shim for CompressionType to instrumented_task (#110863)
markstory Mar 18, 2026
ff9d3d7
fix: Avoid KeyError when reading SDK metadata (#110896)
markstory Mar 18, 2026
adb8caf
fix(dashboards): skip flakey widget builder test (#110980)
nikkikapadia Mar 18, 2026
da964ac
fix(github_copilot): Handle new task API response format (#110970)
JoshFerge Mar 18, 2026
259cabc
feat(text): Add variant="inherit" to Text primitive (#110945)
JonasBa Mar 18, 2026
775a399
feat(text): Add render prop pattern to Text component (#110926)
JonasBa Mar 18, 2026
f7d2a37
feat(dashboards): Adds validation completion hook to generate dashboa…
edwardgou-sentry Mar 18, 2026
8a4f150
ref: migrate default exports in files with other named exports to nam…
JoshuaKGoldberg Mar 18, 2026
9674f80
chore(tracemetrics): Register UI Refresh Feature Flag (#110978)
nsdeschenes Mar 18, 2026
ef1a4af
fix(dashboards): Use normalizeUrl when navigating after pre-built das…
gggritso Mar 18, 2026
9db1be9
ref(dashboards): Remove hard-coded fieldMeta from prebuilt configs (#…
gggritso Mar 18, 2026
872e99c
fix(dashboards): Use firstTransactionEvent for overview onboarding co…
gggritso Mar 18, 2026
7bc241a
fix(dashboards): Prevent long dashboard names from overflowing in lis…
gggritso Mar 18, 2026
27779ae
feat(eslint): expand no-default-exports rule to files with other name…
JoshuaKGoldberg Mar 18, 2026
4c60bdc
ref(dashboards): Update dashboards generation sendMessage to use fetc…
edwardgou-sentry Mar 18, 2026
0b9357c
feat(dashboards): Dashboards generation chat panel improvements (#110…
edwardgou-sentry Mar 18, 2026
847d2f5
feat(dashboards): Track dashboard generation validation success and f…
edwardgou-sentry Mar 18, 2026
e578beb
feat: Add `seer-gitlab-support` feature flag (#110659)
billyvg Mar 18, 2026
ad453d7
fix(dashboards): Disambiguate unaliased filters in widget legends (#1…
gggritso Mar 18, 2026
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
The table of contents is too big for display.
Diff view
Diff view
  •  
  •  
  •  
70 changes: 26 additions & 44 deletions src/sentry/api/endpoints/debug_files.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
import posixpath
import re
import uuid
from collections.abc import Iterable, Mapping, Sequence
from collections.abc import Iterable, Mapping, Sequence, Set
from typing import TYPE_CHECKING, TypedDict, TypeGuard

import jsonschema
Expand Down Expand Up @@ -422,14 +422,10 @@ def post(self, request: Request, project: Project) -> Response:
return Response({"associatedDsymFiles": []})


def get_file_info(files, checksum):
def get_file_info(file) -> tuple[str | None, str | None, list[str]]:
"""
Extracts file information from files given a checksum.
Extracts file information from one assemble payload.
"""
file = files.get(checksum)
if file is None:
return None

name = file.get("name")
debug_id = file.get("debug_id")
chunks = file.get("chunks", [])
Expand All @@ -441,22 +437,20 @@ def batch_assemble(project, files):
"""
Performs assembling in a batch fashion, issuing queries that span multiple files.
"""
# We build a set of all the checksums that still need checks.
checksums_to_check = {checksum for checksum in files.keys()}
files_to_check = files.copy()
file_response = {}

# 1. Exclude all files that have already an assemble status.
checksums_with_status = set()
for checksum in checksums_to_check:
# 1. Exclude all files that already have an assemble status.
for checksum, file in list(files_to_check.items()):
# First, check the cached assemble status. During assembling, a
# `ProjectDebugFile` will be created, and we need to prevent a race
# condition.
state, detail = get_assemble_status(AssembleTask.DIF, project.id, checksum)
requested_debug_id = _get_requested_debug_id(files[checksum])
requested_debug_id = _get_requested_debug_id(file)
cached_debug_id = detail.get("uuid") if isinstance(detail, Mapping) else None

if state == ChunkFileState.OK and not _is_proguard_reupload_clone_request(
file=files[checksum],
file=file,
requested_debug_id=requested_debug_id,
selected_debug_id=cached_debug_id,
):
Expand All @@ -466,42 +460,41 @@ def batch_assemble(project, files):
"missingChunks": [],
"dif": detail,
}
checksums_with_status.add(checksum)
files_to_check.pop(checksum)
elif state is not None and state != ChunkFileState.OK:
file_response[checksum] = {"state": state, "detail": detail, "missingChunks": []}
checksums_with_status.add(checksum)

checksums_to_check -= checksums_with_status
files_to_check.pop(checksum)

# 2. Check if this project already owns the `ProjectDebugFile` for each file,
# also create ProGuard reupload clones if applicable.
requested_debug_ids_by_checksum = {
checksum: _get_requested_debug_id(files[checksum]) for checksum in checksums_to_check
checksum: _get_requested_debug_id(file) for checksum, file in files_to_check.items()
}
existing_debug_files = _find_existing_debug_files(
project=project,
checksums=checksums_to_check,
checksums=files_to_check.keys(),
requested_debug_ids_by_checksum=requested_debug_ids_by_checksum,
)

for debug_file in existing_debug_files:
checksums_to_check.discard(debug_file.checksum)
requested_debug_id = requested_debug_ids_by_checksum[debug_file.checksum]
checksum = debug_file.checksum
file = files_to_check.pop(checksum)
requested_debug_id = requested_debug_ids_by_checksum[checksum]

if _is_proguard_reupload_clone_request(
requested_debug_id=requested_debug_id,
file=files[debug_file.checksum],
file=file,
selected_debug_id=debug_file.debug_id,
):
file_response[debug_file.checksum] = _clone_proguard_debug_file_for_reupload(
file_response[checksum] = _clone_proguard_debug_file_for_reupload(
project=project,
debug_file=debug_file,
requested_debug_id=requested_debug_id,
is_proguard_clone_source=bool(debug_file.proguard_clone_source_match),
)
continue

file_response[debug_file.checksum] = {
file_response[checksum] = {
"state": ChunkFileState.OK,
"detail": None,
"missingChunks": [],
Expand All @@ -510,26 +503,22 @@ def batch_assemble(project, files):

# 3. Compute all the chunks that have to be checked for existence.
chunks_to_check = {}
checksums_without_chunks = set()
for checksum in checksums_to_check:
file_info = get_file_info(files, checksum)
name, debug_id, chunks = file_info or (None, None, None)
for checksum, file in list(files_to_check.items()):
name, debug_id, chunks = get_file_info(file)

# If we don't have any chunks, this is likely a poll request
# checking for file status, so return NOT_FOUND.
if not chunks:
file_response[checksum] = {"state": ChunkFileState.NOT_FOUND, "missingChunks": []}
checksums_without_chunks.add(checksum)
files_to_check.pop(checksum)
continue

# Map each chunk back to its source file checksum.
for chunk in chunks:
chunks_to_check[chunk] = checksum

checksums_to_check -= checksums_without_chunks

# 4. Find missing chunks and group them per checksum.
all_missing_chunks = find_missing_chunks(project.organization.id, set(chunks_to_check.keys()))
all_missing_chunks = find_missing_chunks(project.organization.id, chunks_to_check.keys())

missing_chunks_per_checksum: dict[str, set[str]] = {}
for chunk in all_missing_chunks:
Expand All @@ -538,29 +527,22 @@ def batch_assemble(project, files):
missing_chunks_per_checksum.setdefault(chunks_to_check[chunk], set()).add(chunk)

# 5. Report missing chunks per checksum.
checksums_with_missing_chunks = set()
for checksum, missing_chunks in missing_chunks_per_checksum.items():
file_response[checksum] = {
"state": ChunkFileState.NOT_FOUND,
"missingChunks": list(missing_chunks),
}
checksums_with_missing_chunks.add(checksum)

checksums_to_check -= checksums_with_missing_chunks
files_to_check.pop(checksum, None)

from sentry.tasks.assemble import assemble_dif

# 6. Kickstart async assembling for all remaining chunks that have passed all checks.
for checksum in checksums_to_check:
file_info = get_file_info(files, checksum)
if file_info is None:
continue

for checksum, file in files_to_check.items():
# We don't have a state yet, this means we can now start an assemble job in the background and mark
# this in the state.
set_assemble_status(AssembleTask.DIF, project.id, checksum, ChunkFileState.CREATED)

name, debug_id, chunks = file_info
name, debug_id, chunks = get_file_info(file)
assemble_dif.apply_async(
kwargs={
"project_id": project.id,
Expand Down Expand Up @@ -619,7 +601,7 @@ class _DebugFileAnnotations(TypedDict):

def _find_existing_debug_files(
project: Project,
checksums: set[str],
checksums: Set[str],
requested_debug_ids_by_checksum: dict[str, str | None],
) -> "QuerySet[WithAnnotations[ProjectDebugFile, _DebugFileAnnotations]]":
"""Find up to one existing `ProjectDebugFile` row per requested checksum.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@
from sentry.api.base import cell_silo_endpoint
from sentry.api.bases.organization import OrganizationEndpoint, OrganizationPermission
from sentry.dashboards.models.generate_dashboard_artifact import GeneratedDashboard
from sentry.dashboards.on_completion_hook import DashboardOnCompletionHook
from sentry.models.organization import Organization
from sentry.ratelimits.config import RateLimitConfig
from sentry.seer.explorer.client import SeerExplorerClient
Expand Down Expand Up @@ -73,7 +74,9 @@ def post(self, request: Request, organization: Organization) -> Response:
prompt = serializer.validated_data["prompt"]

try:
client = SeerExplorerClient(organization, request.user)
client = SeerExplorerClient(
organization, request.user, on_completion_hook=DashboardOnCompletionHook
)
run_id = client.start_run(
prompt=prompt,
on_page_context="The user is on the dashboard generation page. This session must ONLY generate a dashboard artifact. Do not perform code inspection, code changes, or any tasks unrelated to dashboard generation.",
Expand Down
113 changes: 113 additions & 0 deletions src/sentry/dashboards/on_completion_hook.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,113 @@
from __future__ import annotations

import logging

from pydantic import ValidationError

from sentry.dashboards.models.generate_dashboard_artifact import GeneratedDashboard
from sentry.models.organization import Organization
from sentry.seer.explorer.client import SeerExplorerClient
from sentry.seer.explorer.client_utils import fetch_run_status
from sentry.seer.explorer.on_completion_hook import ExplorerOnCompletionHook

logger = logging.getLogger(__name__)

FIX_PROMPT = "The generated dashboard artifact has validation errors."
FIX_PROMPT_SECONDARY = "Please fix the following issues and regenerate the dashboard artifact:"

MAX_VALIDATION_RETRIES = 3


class DashboardOnCompletionHook(ExplorerOnCompletionHook):
"""
Hook called when a dashboard generation Explorer run completes.

Validates the generated dashboard artifact against the GeneratedDashboard
Pydantic model. If validation fails (e.g. blocklisted functions), asks Seer
to regenerate with the error details.

The hook is limited to MAX_VALIDATION_RETRIES retry attempts to prevent
infinite loops, since on_completion_hooks persist across continue_run calls.
"""

@classmethod
def execute(cls, organization: Organization, run_id: int) -> None:
try:
state = fetch_run_status(run_id, organization)
except Exception:
logger.exception(
"dashboards.on_completion_hook.fetch_state_failed",
extra={"run_id": run_id, "organization_id": organization.id},
)
return

if state.status != "completed":
return

try:
artifact = state.get_artifact("dashboard", GeneratedDashboard)
except ValidationError as validation_error:
logger.info(
"dashboards.on_completion_hook.validation_failed",
extra={
"run_id": run_id,
"organization_id": organization.id,
},
)

# Count consecutive fix requests in the current failure chain by
# scanning blocks in reverse. A non-fix user message (i.e. the user
# explicitly continuing the conversation) breaks the chain so each
# new user-driven generation gets its own retry budget.
retry_count = 0
for block in reversed(state.blocks):
if (
block.message.role == "user"
and block.message.content
and block.message.content.startswith(FIX_PROMPT)
):
retry_count += 1
elif block.message.role == "user":
break
if retry_count >= MAX_VALIDATION_RETRIES:
logger.info(
"dashboards.on_completion_hook.max_retries_reached",
extra={
"run_id": run_id,
"organization_id": organization.id,
"retry_count": retry_count,
},
)
return

cls._request_fix(organization, run_id, validation_error)
return

if artifact is None:
logger.warning(
"dashboards.on_completion_hook.no_artifact",
extra={"run_id": run_id, "organization_id": organization.id},
)
return

logger.info(
"dashboards.on_completion_hook.validation_passed",
extra={"run_id": run_id, "organization_id": organization.id},
)

@classmethod
def _request_fix(cls, organization: Organization, run_id: int, error: ValidationError) -> None:
try:
client = SeerExplorerClient(organization=organization, user=None)
# We only request a single regeneration. No further generation requests are made if this fails.
client.continue_run(
run_id,
prompt=(f"{FIX_PROMPT} {FIX_PROMPT_SECONDARY}\n\n{error}"),
artifact_key="dashboard",
artifact_schema=GeneratedDashboard,
)
except Exception:
logger.exception(
"dashboards.on_completion_hook.continue_run_failed",
extra={"run_id": run_id, "organization_id": organization.id},
)
3 changes: 2 additions & 1 deletion src/sentry/debug_files/upload.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
from collections.abc import Set
from datetime import timedelta

import sentry_sdk
Expand All @@ -6,7 +7,7 @@
from sentry.models.files import FileBlob


def find_missing_chunks(organization_id: int, chunks: set[str]) -> list[str]:
def find_missing_chunks(organization_id: int, chunks: Set[str]) -> list[str]:
"""Returns a list of chunks which are missing for an org."""
with sentry_sdk.start_span(op="find_missing_chunks") as span:
span.set_tag("organization_id", organization_id)
Expand Down
4 changes: 4 additions & 0 deletions src/sentry/features/temporary.py
Original file line number Diff line number Diff line change
Expand Up @@ -321,6 +321,8 @@ def register_temporary_features(manager: FeatureManager) -> None:
manager.add("organizations:seer-autopilot", OrganizationFeature, FeatureHandlerStrategy.FLAGPOLE, api_expose=True)
# Disables the enableSeerCoding setting, preventing orgs from changing code generation behavior
manager.add("organizations:seer-disable-coding-setting", OrganizationFeature, FeatureHandlerStrategy.FLAGPOLE, api_expose=True)
# Enable GitLab as a supported SCM provider for Seer
manager.add("organizations:seer-gitlab-support", OrganizationFeature, FeatureHandlerStrategy.FLAGPOLE, api_expose=True)
# Disable select orgs from ingesting mobile replay events.
manager.add("organizations:session-replay-video-disabled", OrganizationFeature, FeatureHandlerStrategy.INTERNAL, api_expose=False)
# Enable data scrubbing of replay recording payloads in Relay.
Expand Down Expand Up @@ -453,6 +455,8 @@ def register_temporary_features(manager: FeatureManager) -> None:
manager.add("organizations:tracemetrics-overlay-charts-ui", OrganizationFeature, FeatureHandlerStrategy.FLAGPOLE, api_expose=True)
# Enable trace metrics units in trace view UI
manager.add("organizations:tracemetrics-units-ui", OrganizationFeature, FeatureHandlerStrategy.FLAGPOLE, api_expose=True)
# Enable trace metrics UI refresh
manager.add("organizations:tracemetrics-ui-refresh", OrganizationFeature, FeatureHandlerStrategy.FLAGPOLE, api_expose=True)
# Enable traces page cross event querying
manager.add("organizations:traces-page-cross-event-querying", OrganizationFeature, FeatureHandlerStrategy.FLAGPOLE, api_expose=True)
# Enable overlaying charts in traces
Expand Down
7 changes: 5 additions & 2 deletions src/sentry/integrations/github_copilot/client.py
Original file line number Diff line number Diff line change
Expand Up @@ -98,8 +98,11 @@ def launch(self, *, webhook_url: str, request: CodingAgentLaunchRequest) -> Codi
},
)

task_response = GithubCopilotTaskResponse.validate(api_response.json)
task = task_response.task
response_json = api_response.json
if isinstance(response_json, dict) and "task" in response_json:
task = GithubCopilotTaskResponse.validate(response_json).task
else:
task = GithubCopilotTask.validate(response_json)

agent_id = self.encode_agent_id(owner, repo, task.id)

Expand Down
1 change: 1 addition & 0 deletions src/sentry/lang/native/processing.py
Original file line number Diff line number Diff line change
Expand Up @@ -279,6 +279,7 @@ def _merge_full_response(data, response):
if response["stacktraces"]:
data["threads"] = {"values": data_threads}
else:
logger.info("minidump has no thread list", extra={"response": response})
error = SymbolicationFailed(
message="minidump has no thread list", type=EventErrorType.NATIVE_SYMBOLICATOR_FAILED
)
Expand Down
2 changes: 1 addition & 1 deletion src/sentry/sdk_updates.py
Original file line number Diff line number Diff line change
Expand Up @@ -381,7 +381,7 @@ def get_sdk_versions():
def get_sdk_urls():
try:
rv = dict(settings.SDK_URLS)
rv.update((key, info["main_docs_url"]) for (key, info) in get_sdk_index().items())
rv.update((key, info.get("main_docs_url", "")) for (key, info) in get_sdk_index().items())
return rv
except Exception:
logger.exception("sentry-release-registry.sdk-urls")
Expand Down
4 changes: 2 additions & 2 deletions src/sentry/seer/autofix/autofix.py
Original file line number Diff line number Diff line change
Expand Up @@ -458,11 +458,11 @@ def _call_autofix(
"options": {
"comment_on_pr_with_url": pr_to_comment_on_url,
"auto_run_source": auto_run_source,
"referrer": referrer,
"referrer": referrer.value,
"disable_coding_step": not group.organization.get_option(
"sentry:enable_seer_coding", default=ENABLE_SEER_CODING_DEFAULT
),
"stopping_point": stopping_point,
"stopping_point": stopping_point.value if stopping_point else None,
},
},
option=orjson.OPT_NON_STR_KEYS,
Expand Down
Loading
Loading