Skip to content

ci(deps): bump trufflesecurity/trufflehog from 3.94.2 to 3.94.3 #636

ci(deps): bump trufflesecurity/trufflehog from 3.94.2 to 3.94.3

ci(deps): bump trufflesecurity/trufflehog from 3.94.2 to 3.94.3 #636

Workflow file for this run

---
name: Performance Testing
"on":
pull_request:
branches: [master]
push:
branches: [master]
schedule:
# Run performance tests weekly on Sunday at 23:00 UTC
- cron: "0 23 * * 0"
workflow_dispatch:
inputs:
benchmark_target:
description: "Target to benchmark (apps, brews, recommend, outdated, all)"
required: false
default: "all"
type: choice
options:
- apps
- brews
- recommend
- outdated
- all
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
permissions:
contents: read
env:
PYTHONUNBUFFERED: 1
FORCE_COLOR: 1
jobs:
# Lightweight smoke check so the required "Performance Testing" status is present on PRs
smoke:
name: Performance Smoke Check
runs-on: ubuntu-latest
permissions: {}
if: github.event_name == 'pull_request' || github.event_name == 'push'
steps:
- name: No-op performance status
run: |
echo "Performance tests are scheduled; smoke check passing for PR/push."
performance-test:
name: Performance Testing on ${{ matrix.os }}
runs-on: ${{ matrix.os }}
permissions:
contents: read
# Only run full performance suite on schedule or manual dispatch
if: github.event_name == 'schedule' || github.event_name == 'workflow_dispatch'
strategy:
fail-fast: false
matrix:
os: [macos-latest]
python-version: ["3.13"] # Use stable version for consistent results
steps:
- name: Checkout code
uses: actions/checkout@v6
with:
fetch-depth: 0 # Need full history for performance comparison
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v6
with:
python-version: ${{ matrix.python-version }}
cache: "pip"
- name: Install Homebrew (if not present)
if: runner.os == 'macOS'
run: |
if ! command -v brew &> /dev/null; then
/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)"
fi
- name: Install dependencies
uses: ./.github/actions/setup-python-deps
- name: Make performance scripts executable
run: |
chmod +x scripts/performance_test.py scripts/compare_performance.py
- name: Run performance benchmarks
env:
BENCHMARK_TARGET: ${{ github.event.inputs.benchmark_target || 'all' }}
run: |
python scripts/performance_test.py
- name: Upload performance results
uses: actions/upload-artifact@v7
with:
name: performance-results-${{ matrix.os }}
path: performance_results.json
retention-days: 30
- name: Compare with previous results (if available)
run: |
python scripts/compare_performance.py
- name: Create performance summary
run: |
echo "## Performance Test Results" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "| Command | Avg Time (s) | Memory (MB) |" >> $GITHUB_STEP_SUMMARY
echo "|---------|--------------|-------------|" >> $GITHUB_STEP_SUMMARY
if [ -f performance_results.json ]; then
python -c "
import json
with open('performance_results.json') as f:
data = json.load(f)
for name, result in data['results'].items():
if 'error' not in result:
print(f'| {name} | {result[\"avg_time\"]:.2f} | {result[\"avg_memory_mb\"]:.2f} |')
" >> $GITHUB_STEP_SUMMARY
else
echo "| N/A | N/A | N/A |" >> $GITHUB_STEP_SUMMARY
fi
performance-analysis:
name: Performance Analysis
runs-on: ubuntu-latest
needs: performance-test
permissions: {}
# Only analyze when full performance-test runs
if: (github.event_name == 'schedule' || github.event_name == 'workflow_dispatch') && always()
steps:
- name: Download performance results
uses: actions/download-artifact@v8
with:
pattern: performance-results-*
merge-multiple: true
- name: Analyze performance trends
run: |
echo "=== Performance Analysis ==="
# List all result files
ls -la *.json || echo "No performance result files found"
# Create trend analysis (placeholder for future enhancement)
echo "Future enhancement: Trend analysis across multiple runs"
echo "This job will track performance over time and detect regressions"
performance-testing:
name: Performance Testing
runs-on: ubuntu-latest
needs: [performance-test, performance-analysis]
permissions: {}
# Only report final status when full suite runs
if: (github.event_name == 'schedule' || github.event_name == 'workflow_dispatch') && always()
steps:
- name: Check performance test status
run: |
echo "Performance test status: ${{ needs.performance-test.result }}"
echo "Performance analysis status: ${{ needs.performance-analysis.result }}"
if [ "${{ needs.performance-test.result }}" != "success" ]; then
echo "Performance test failed"
exit 1
fi
echo "All performance tests completed successfully"