From 618f24f298124259406a52883a9dea431a277157 Mon Sep 17 00:00:00 2001 From: Albert Martinez <58224660+albert-mr@users.noreply.github.com> Date: Fri, 27 Mar 2026 11:06:12 +0100 Subject: [PATCH 1/6] fix: prevent migration crash when seed users don't exist The seed_featured_content migration (0037) assumed specific users always exist, causing crashes in test environments. Added try/except to skip seeding gracefully. Co-Authored-By: Claude Opus 4.6 --- .../migrations/0037_seed_featured_content.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/backend/contributions/migrations/0037_seed_featured_content.py b/backend/contributions/migrations/0037_seed_featured_content.py index 922ace6a..62ea0e96 100644 --- a/backend/contributions/migrations/0037_seed_featured_content.py +++ b/backend/contributions/migrations/0037_seed_featured_content.py @@ -5,8 +5,12 @@ def seed_featured_content(apps, schema_editor): User = apps.get_model('users', 'User') FeaturedContent = apps.get_model('contributions', 'FeaturedContent') - albert = User.objects.get(email='albert@genlayer.foundation') # cognocracy - ivan = User.objects.get(email='ivan@genlayer.foundation') # raskovsky + try: + albert = User.objects.get(email='albert@genlayer.foundation') # cognocracy + ivan = User.objects.get(email='ivan@genlayer.foundation') # raskovsky + except User.DoesNotExist: + # Skip seeding in test environments where these users don't exist + return FeaturedContent.objects.create( content_type='hero', From 5f59675858dcbcf5b0062748c74dd64a2a880dc6 Mon Sep 17 00:00:00 2001 From: Albert Martinez <58224660+albert-mr@users.noreply.github.com> Date: Fri, 27 Mar 2026 11:06:26 +0100 Subject: [PATCH 2/6] feat: rewrite daily uptime command with date range support and bulk operations Major refactor of the add_daily_uptime management command: - Add --date, --start-date, --end-date params for backfilling historical data - Refactor from per-user DB saves to bulk_create with batch processing - Add snapshot-based activity verification with fallback for recent dates - Wrap all operations in atomic transaction for data consistency - Use shared leaderboard helpers (update_user_leaderboard_entries, update_referrer_points) instead of manual leaderboard entry management - Raise CommandError on complete failure to surface to HTTP callers - Comprehensive test suite (16 tests) covering all command paths Database migration note: This command requires the "Uptime" ContributionType to exist. Run the daily uptime workflow after deploying, or manually via: python manage.py add_daily_uptime --verbose Co-Authored-By: Claude Opus 4.6 --- .../management/commands/add_daily_uptime.py | 462 ++++++++++------ .../tests/test_add_daily_uptime.py | 499 ++++++++++-------- 2 files changed, 584 insertions(+), 377 deletions(-) diff --git a/backend/contributions/management/commands/add_daily_uptime.py b/backend/contributions/management/commands/add_daily_uptime.py index 098e910e..4a6089d0 100644 --- a/backend/contributions/management/commands/add_daily_uptime.py +++ b/backend/contributions/management/commands/add_daily_uptime.py @@ -1,16 +1,19 @@ -from django.core.management.base import BaseCommand +from django.core.management.base import BaseCommand, CommandError from django.utils import timezone from django.db import transaction from django.contrib.auth import get_user_model -from contributions.models import Contribution, ContributionType, Category -from leaderboard.models import GlobalLeaderboardMultiplier, update_all_ranks, LeaderboardEntry +from contributions.models import Contribution, ContributionType +from leaderboard.models import GlobalLeaderboardMultiplier, update_all_ranks, update_user_leaderboard_entries, update_referrer_points from django.db.models import Q from datetime import datetime, timedelta import pytz import decimal +MAX_DATE_RANGE_DAYS = 366 + User = get_user_model() + class Command(BaseCommand): help = 'Adds daily uptime contributions for validators with active wallets' @@ -42,61 +45,182 @@ def add_arguments(self, parser): choices=['asimov', 'bradbury'], help='Only process a specific network (default: all networks)' ) + parser.add_argument( + '--date', + type=str, + help='Process a specific date (YYYY-MM-DD) instead of today' + ) + parser.add_argument( + '--start-date', + type=str, + help='Start date for range processing (YYYY-MM-DD, inclusive)' + ) + parser.add_argument( + '--end-date', + type=str, + help='End date for range processing (YYYY-MM-DD, inclusive)' + ) - def handle(self, *args, **options): - from validators.models import ValidatorWallet, Validator, ValidatorWalletStatusSnapshot - - dry_run = options['dry_run'] - verbose = options['verbose'] - points = options['points'] - force = options['force'] - network_filter = options.get('network') - - if dry_run: - self.stdout.write(self.style.WARNING('DRY RUN - No changes will be made')) - - self.stdout.write(self.style.SUCCESS('Starting daily uptime generation...')) - - # Get the uptime contribution type + def _parse_date(self, date_str): + """Parse a YYYY-MM-DD string into a date object.""" try: - uptime_type = ContributionType.objects.get(name='Uptime') - self.stdout.write(f'Found uptime contribution type: {uptime_type.name} (ID: {uptime_type.id})') - except ContributionType.DoesNotExist: - self.stdout.write(self.style.ERROR('Error: Uptime contribution type not found')) - return - - # Determine which networks to process - from django.conf import settings as django_settings - networks_to_process = [network_filter] if network_filter else list(django_settings.TESTNET_NETWORKS.keys()) - lookback_days = django_settings.UPTIME_LOOKBACK_DAYS - - self.stdout.write(f'Networks to process: {networks_to_process}') - self.stdout.write(f'Lookback window: {lookback_days} days') - - # Track stats - total_users = 0 - total_new_contributions = 0 - multiplier_errors = 0 - users_to_update_leaderboard = [] + return datetime.strptime(date_str, '%Y-%m-%d').date() + except ValueError: + raise ValueError(f'Invalid date format: {date_str}. Use YYYY-MM-DD.') + + def _get_dates_to_process(self, options): + """Determine which dates to process based on command options.""" + single_date = options.get('date') + start_date = options.get('start_date') + end_date = options.get('end_date') + + if single_date: + if start_date or end_date: + raise ValueError('Cannot use --date with --start-date or --end-date') + return [self._parse_date(single_date)] + + if start_date and end_date: + start = self._parse_date(start_date) + end = self._parse_date(end_date) + if start > end: + raise ValueError(f'Start date {start} is after end date {end}') + if (end - start).days > MAX_DATE_RANGE_DAYS: + raise ValueError( + f'Date range exceeds {MAX_DATE_RANGE_DAYS} days. ' + f'Process in smaller batches to avoid memory issues.' + ) + dates = [] + current = start + while current <= end: + dates.append(current) + current += timedelta(days=1) + return dates + + if start_date or end_date: + raise ValueError('Both --start-date and --end-date are required for range processing') + + # Default: today + return [timezone.now().date()] + + def _check_active_in_window(self, network_wallets, target_date, lookback_days, ValidatorWalletStatusSnapshot): + """ + Check if any wallet was active in the lookback window. + Falls back to current wallet status if no snapshots exist for the target date, + but only for recent dates (today or yesterday) to avoid granting incorrect + historical points when current status differs from past status. + Returns (is_active, used_fallback). + """ + lookback_start = target_date - timedelta(days=lookback_days) + + # Check snapshots in the lookback window + has_active_in_window = ValidatorWalletStatusSnapshot.objects.filter( + wallet__in=network_wallets, + date__gte=lookback_start, + date__lte=target_date, + status='active' + ).exists() + + if has_active_in_window: + return True, False + + # Fallback: only for recent dates (today/yesterday) where sync may have been down. + # For historical backfills, current wallet status may not reflect past status. + today = timezone.now().date() + is_recent = (today - target_date).days <= 1 + + if is_recent: + has_any_snapshot = ValidatorWalletStatusSnapshot.objects.filter( + wallet__in=network_wallets, + date=target_date + ).exists() + + if not has_any_snapshot: + has_active_wallet = network_wallets.filter(status='active').exists() + if has_active_wallet: + return True, True # Active via fallback + + return False, False + + def _check_existing_contribution(self, user, uptime_type, target_date, network): + """Check if an uptime contribution already exists for this user/date/network.""" + date_contributions = Contribution.objects.filter( + user=user, + contribution_type=uptime_type, + contribution_date__date=target_date, + ) + if network == 'asimov': + # Match (asimov) or legacy contributions without any network marker + return date_contributions.filter( + Q(notes__contains='(asimov)') | + (~Q(notes__contains='(asimov)') & ~Q(notes__contains='(bradbury)')) + ).exists() + else: + return date_contributions.filter( + notes__contains=f'({network})' + ).exists() + + def _get_multiplier(self, uptime_type, target_date, force): + """ + Get the multiplier value for the given date. + Returns (multiplier_value, error_message). + """ + contribution_datetime = datetime.combine( + target_date, + datetime.min.time(), + tzinfo=pytz.UTC + ) + try: + _, multiplier_value = GlobalLeaderboardMultiplier.get_active_for_type( + uptime_type, + at_date=contribution_datetime + ) + return multiplier_value, None + except GlobalLeaderboardMultiplier.DoesNotExist: + if force: + return decimal.Decimal('1.0'), 'fallback' + return None, 'missing' + + def process_date(self, target_date, uptime_type, validators, networks_to_process, + lookback_days, points, force, dry_run, verbose, + ValidatorWallet, ValidatorWalletStatusSnapshot): + """ + Process a single date: create uptime contributions for eligible validators. + Returns (contributions_to_create, users_affected, network_stats, multiplier_errors). + """ + contributions_to_create = [] + users_affected = set() network_stats = {n: 0 for n in networks_to_process} + multiplier_errors = 0 - # Current date - today = timezone.now().date() - lookback_start = today - timedelta(days=lookback_days) + # Cache multiplier per date (same for all users on same date) + multiplier_value, mult_error = self._get_multiplier(uptime_type, target_date, force) - # Get all validators with linked wallets - validators = Validator.objects.filter( - validator_wallets__isnull=False - ).distinct().select_related('user') - total_users = validators.count() + if mult_error == 'missing': + self.stdout.write( + self.style.ERROR( + f' {target_date}: No multiplier found for "Uptime". Use --force to override.' + ) + ) + return contributions_to_create, users_affected, network_stats, 1 - self.stdout.write(f'Found {total_users} validators with linked wallets') + if mult_error == 'fallback': + self.stdout.write( + self.style.WARNING( + f' {target_date}: No multiplier found, using default of 1.0 (--force enabled)' + ) + ) + + frozen_global_points = round(points * float(multiplier_value)) + contribution_datetime = datetime.combine( + target_date, + datetime.min.time(), + tzinfo=pytz.UTC + ) for validator in validators: user = validator.user for network in networks_to_process: - # Check if user has wallets on this network network_wallets = ValidatorWallet.objects.filter( operator=validator, network=network @@ -107,153 +231,161 @@ def handle(self, *args, **options): self.stdout.write(f' {user}: No wallets on {network}, skipping') continue - # Check if ANY wallet was active in the lookback window - has_active_in_window = ValidatorWalletStatusSnapshot.objects.filter( - wallet__in=network_wallets, - date__gte=lookback_start, - date__lte=today, - status='active' - ).exists() + # Check activity in lookback window + is_active, used_fallback = self._check_active_in_window( + network_wallets, target_date, lookback_days, ValidatorWalletStatusSnapshot + ) - if not has_active_in_window: + if not is_active: if verbose: self.stdout.write(f' {user}: No active wallets on {network} in lookback window, skipping') continue - # Check for existing uptime contribution for this user/date/network - # For asimov, also match legacy contributions without any network marker - # (old format: "Auto-generated daily uptime for 2025-12-01") - today_contributions = Contribution.objects.filter( - user=user, - contribution_type=uptime_type, - contribution_date__date=today, - ) - if network == 'asimov': - existing = today_contributions.filter( - Q(notes__contains='(asimov)') | - (~Q(notes__contains='(asimov)') & ~Q(notes__contains='(bradbury)')) - ).exists() - else: - existing = today_contributions.filter( - notes__contains=f'({network})' - ).exists() - - if existing: - if verbose: - self.stdout.write(f' {user}: Uptime for {network} on {today} already exists, skipping') - continue - - # Get the active multiplier for today - contribution_date = datetime.combine( - today, - datetime.min.time(), - tzinfo=pytz.UTC - ) - - try: - _, multiplier_value = GlobalLeaderboardMultiplier.get_active_for_type( - uptime_type, - at_date=contribution_date - ) - except GlobalLeaderboardMultiplier.DoesNotExist: - if force: - multiplier_value = decimal.Decimal('1.0') - self.stdout.write( - self.style.WARNING( - f' {today}: No multiplier found, using default of 1.0 (--force enabled)' - ) - ) - else: - multiplier_errors += 1 - self.stdout.write( - self.style.ERROR( - f' {today}: No multiplier found for "Uptime". Use --force to override.' - ) + if used_fallback: + self.stdout.write( + self.style.WARNING( + f' {user}: Using current wallet status as fallback for {network} on {target_date} (no snapshots found)' ) - continue + ) - frozen_global_points = round(points * float(multiplier_value)) + # Check for existing contribution + if self._check_existing_contribution(user, uptime_type, target_date, network): + if verbose: + self.stdout.write(f' {user}: Uptime for {network} on {target_date} already exists, skipping') + continue if verbose: self.stdout.write( - f' {user}: Adding uptime for {network} on {today} ' + f' {user}: Adding uptime for {network} on {target_date} ' f'({points} points x {multiplier_value} = {frozen_global_points} global points)' ) - if not dry_run: - Contribution.objects.create( - user=user, - contribution_type=uptime_type, - points=points, - contribution_date=contribution_date, - multiplier_at_creation=multiplier_value, - frozen_global_points=frozen_global_points, - notes=f'Auto-generated daily uptime for {today} ({network})' - ) + contributions_to_create.append(Contribution( + user=user, + contribution_type=uptime_type, + points=points, + contribution_date=contribution_datetime, + multiplier_at_creation=multiplier_value, + frozen_global_points=frozen_global_points, + notes=f'Auto-generated daily uptime for {target_date} ({network})' + )) + users_affected.add(user) + network_stats[network] += 1 - if user not in users_to_update_leaderboard: - users_to_update_leaderboard.append(user) + return contributions_to_create, users_affected, network_stats, multiplier_errors - total_new_contributions += 1 - network_stats[network] += 1 + def handle(self, *args, **options): + from validators.models import ValidatorWallet, Validator, ValidatorWalletStatusSnapshot + + dry_run = options['dry_run'] + verbose = options['verbose'] + points = options['points'] + force = options['force'] + network_filter = options.get('network') + + # Parse dates + try: + dates_to_process = self._get_dates_to_process(options) + except ValueError as e: + self.stdout.write(self.style.ERROR(str(e))) + return - # Update leaderboard entries for all affected users - if users_to_update_leaderboard and not dry_run: - self.stdout.write('Updating leaderboard entries...') + if dry_run: + self.stdout.write(self.style.WARNING('DRY RUN - No changes will be made')) - uptime_category = uptime_type.category if uptime_type.category else None + self.stdout.write(self.style.SUCCESS('Starting daily uptime generation...')) - for user in users_to_update_leaderboard: - # Update GLOBAL leaderboard entry - global_entry, created = LeaderboardEntry.objects.get_or_create( - user=user, - category=None - ) - global_points = global_entry.update_points_without_ranking() + # Get the uptime contribution type + try: + uptime_type = ContributionType.objects.get(name='Uptime') + self.stdout.write(f'Found uptime contribution type: {uptime_type.name} (ID: {uptime_type.id})') + except ContributionType.DoesNotExist: + raise CommandError('Uptime contribution type not found') - if verbose: - action = 'Created' if created else 'Updated' - self.stdout.write(f'{action} GLOBAL leaderboard for {user}: {global_points} total points') - - # Update CATEGORY-SPECIFIC leaderboard entry - if uptime_category: - category_entry, cat_created = LeaderboardEntry.objects.get_or_create( - user=user, - category=uptime_category - ) - category_points = category_entry.update_points_without_ranking() + # Determine which networks to process + from django.conf import settings as django_settings + networks_to_process = [network_filter] if network_filter else list(django_settings.TESTNET_NETWORKS.keys()) + lookback_days = django_settings.UPTIME_LOOKBACK_DAYS - if verbose: - action = 'Created' if cat_created else 'Updated' - self.stdout.write(f'{action} {uptime_category.name} category leaderboard for {user}: {category_points} points') - - # Update entries for ALL categories this user has contributions in - user_categories = Category.objects.filter( - contribution_types__contributions__user=user - ).distinct() - - for category in user_categories: - if category != uptime_category: - cat_entry, cat_created = LeaderboardEntry.objects.get_or_create( - user=user, - category=category - ) - cat_points = cat_entry.update_points_without_ranking() + self.stdout.write(f'Networks to process: {networks_to_process}') + self.stdout.write(f'Lookback window: {lookback_days} days') + self.stdout.write(f'Dates to process: {dates_to_process[0]} to {dates_to_process[-1]} ({len(dates_to_process)} days)') - if verbose and cat_created: - self.stdout.write(f'Created {category.name} category leaderboard for {user}: {cat_points} points') + # Get all validators with linked wallets + validators = list(Validator.objects.filter( + validator_wallets__isnull=False + ).distinct().select_related('user')) + total_users = len(validators) - self.stdout.write('Updating all leaderboard ranks...') - update_all_ranks() + self.stdout.write(f'Found {total_users} validators with linked wallets') + + # Track global stats + total_new_contributions = 0 + total_multiplier_errors = 0 + all_users_affected = set() + all_contributions_to_create = [] + total_network_stats = {n: 0 for n in networks_to_process} + + for target_date in dates_to_process: + contributions, users, network_stats, mult_errors = self.process_date( + target_date=target_date, + uptime_type=uptime_type, + validators=validators, + networks_to_process=networks_to_process, + lookback_days=lookback_days, + points=points, + force=force, + dry_run=dry_run, + verbose=verbose, + ValidatorWallet=ValidatorWallet, + ValidatorWalletStatusSnapshot=ValidatorWalletStatusSnapshot, + ) + all_contributions_to_create.extend(contributions) + all_users_affected.update(users) + total_multiplier_errors += mult_errors + for n in networks_to_process: + total_network_stats[n] += network_stats.get(n, 0) + + total_new_contributions = len(all_contributions_to_create) + + # Bulk create contributions (skips save()/clean() and signals for performance) + # Wrapped in a transaction so partial failures don't leave stale leaderboard state + if all_contributions_to_create and not dry_run: + with transaction.atomic(): + self.stdout.write(f'Creating {total_new_contributions} contributions...') + Contribution.objects.bulk_create(all_contributions_to_create, batch_size=500) + + # Update leaderboard for all affected users + self.stdout.write(f'Updating leaderboard for {len(all_users_affected)} users...') + for user in all_users_affected: + update_user_leaderboard_entries(user) + + # Update referral points (bulk_create skips post_save signal) + referrers_updated = set() + for contribution in all_contributions_to_create: + if hasattr(contribution.user, 'referred_by') and contribution.user.referred_by: + if contribution.user.referred_by_id not in referrers_updated: + update_referrer_points(contribution) + referrers_updated.add(contribution.user.referred_by_id) + + self.stdout.write('Updating all leaderboard ranks...') + update_all_ranks() # Print summary self.stdout.write(self.style.SUCCESS( f'Daily uptime generation completed!\n' f'- Validators with wallets: {total_users}\n' f'- New uptime contributions added: {total_new_contributions}\n' - f'- Per-network breakdown: {network_stats}\n' - f'- Dates skipped due to missing multipliers: {multiplier_errors}' + f'- Per-network breakdown: {total_network_stats}\n' + f'- Dates skipped due to missing multipliers: {total_multiplier_errors}' )) if dry_run: self.stdout.write(self.style.WARNING('DRY RUN - No changes were made')) + + # Raise error if all dates failed due to missing multipliers (surfaces to HTTP endpoint) + if total_multiplier_errors > 0 and total_new_contributions == 0 and not dry_run: + raise CommandError( + f'No contributions created — all {total_multiplier_errors} date(s) ' + f'skipped due to missing multipliers. Use --force to override.' + ) diff --git a/backend/contributions/tests/test_add_daily_uptime.py b/backend/contributions/tests/test_add_daily_uptime.py index 3730a900..010dfd30 100644 --- a/backend/contributions/tests/test_add_daily_uptime.py +++ b/backend/contributions/tests/test_add_daily_uptime.py @@ -1,31 +1,50 @@ from django.test import TestCase from django.core.management import call_command +from django.core.management.base import CommandError from django.utils import timezone from django.contrib.auth import get_user_model -from datetime import datetime, timedelta -import pytz +from datetime import timedelta from io import StringIO from decimal import Decimal -from contributions.models import Contribution, ContributionType +from contributions.models import Contribution, ContributionType, Category from leaderboard.models import LeaderboardEntry, GlobalLeaderboardMultiplier +from validators.models import Validator, ValidatorWallet, ValidatorWalletStatusSnapshot User = get_user_model() class AddDailyUptimeCommandTest(TestCase): """Test the add_daily_uptime management command.""" - + def setUp(self): - """Set up test data.""" - # Create uptime contribution type - self.uptime_type = ContributionType.objects.create( + """Set up test data with proper Validator/Wallet/Snapshot fixtures.""" + # Get or create validator category (may exist from migrations) + self.validator_category, _ = Category.objects.get_or_create( + slug='validator', + defaults={ + 'name': 'Validator', + 'profile_model': 'validators.Validator' + } + ) + + # Get or create uptime contribution type + self.uptime_type, _ = ContributionType.objects.get_or_create( name='Uptime', - description='Daily validator uptime', - min_points=1, - max_points=10 + defaults={ + 'slug': 'uptime', + 'description': 'Daily validator uptime', + 'category': self.validator_category, + 'min_points': 1, + 'max_points': 10 + } ) - + # Ensure the type has proper settings for tests + self.uptime_type.min_points = 1 + self.uptime_type.max_points = 10 + self.uptime_type.category = self.validator_category + self.uptime_type.save() + # Create a multiplier for uptime self.multiplier = GlobalLeaderboardMultiplier.objects.create( contribution_type=self.uptime_type, @@ -33,7 +52,7 @@ def setUp(self): valid_from=timezone.now() - timedelta(days=30), description='Default uptime multiplier' ) - + # Create test users self.user1 = User.objects.create_user( email='validator1@test.com', @@ -41,240 +60,296 @@ def setUp(self): name='Validator 1', address='0x1234567890123456789012345678901234567890' ) - self.user2 = User.objects.create_user( email='validator2@test.com', password='testpass123', name='Validator 2', address='0xabcdefabcdefabcdefabcdefabcdefabcdefabcd' ) - - self.user_without_uptime = User.objects.create_user( + self.user_no_validator = User.objects.create_user( email='novalidator@test.com', password='testpass123', name='Not a Validator', address='0x9999999999999999999999999999999999999999' ) - - def test_command_creates_daily_uptime_contributions(self): - """Test that the command creates daily uptime contributions from first uptime to today.""" - # Create an initial uptime contribution 5 days ago - five_days_ago = timezone.now() - timedelta(days=5) - initial_contribution = Contribution.objects.create( - user=self.user1, - contribution_type=self.uptime_type, - points=1, - contribution_date=five_days_ago, - multiplier_at_creation=Decimal('2.0'), - frozen_global_points=2 + + # Create Validator profiles + self.validator1 = Validator.objects.create(user=self.user1) + self.validator2 = Validator.objects.create(user=self.user2) + + # Create wallets on asimov for validator1 + self.wallet1_asimov = ValidatorWallet.objects.create( + address='0xwallet1asimov', + network='asimov', + status='active', + operator=self.validator1, + operator_address='0x1234567890123456789012345678901234567890' ) - - # Run the command - out = StringIO() - call_command('add_daily_uptime', stdout=out, verbosity=2) - - # Check that contributions were created for each day + + # Create wallets on bradbury for validator1 + self.wallet1_bradbury = ValidatorWallet.objects.create( + address='0xwallet1bradbury', + network='bradbury', + status='active', + operator=self.validator1, + operator_address='0x1234567890123456789012345678901234567890' + ) + + # Create wallet on asimov for validator2 + self.wallet2_asimov = ValidatorWallet.objects.create( + address='0xwallet2asimov', + network='asimov', + status='active', + operator=self.validator2, + operator_address='0xabcdefabcdefabcdefabcdefabcdefabcdefabcd' + ) + + # Create active snapshots for today + today = timezone.now().date() + ValidatorWalletStatusSnapshot.objects.create( + wallet=self.wallet1_asimov, date=today, status='active' + ) + ValidatorWalletStatusSnapshot.objects.create( + wallet=self.wallet1_bradbury, date=today, status='active' + ) + ValidatorWalletStatusSnapshot.objects.create( + wallet=self.wallet2_asimov, date=today, status='active' + ) + + def test_creates_uptime_for_active_validator(self): + """Active validator with snapshots gets 1 contribution per network.""" + call_command('add_daily_uptime', verbosity=0) + + # Validator1 has wallets on both networks → 2 contributions contributions = Contribution.objects.filter( - user=self.user1, - contribution_type=self.uptime_type - ).count() - - # Should have 6 contributions (initial + 5 days up to today) - self.assertGreaterEqual(contributions, 6) - - # Verify output - output = out.getvalue() - self.assertIn('Daily uptime generation completed!', output) - self.assertIn('Users with uptime: 1', output) - - def test_leaderboard_updates_correctly(self): - """Test that the leaderboard is updated with correct total points.""" - # Create initial uptime 3 days ago - three_days_ago = timezone.now() - timedelta(days=3) - Contribution.objects.create( - user=self.user1, - contribution_type=self.uptime_type, - points=1, - contribution_date=three_days_ago, - multiplier_at_creation=Decimal('2.0'), - frozen_global_points=2 + user=self.user1, contribution_type=self.uptime_type + ) + self.assertEqual(contributions.count(), 2) + + # Check both networks are present + notes = set(contributions.values_list('notes', flat=True)) + today = timezone.now().date() + self.assertIn(f'Auto-generated daily uptime for {today} (asimov)', notes) + self.assertIn(f'Auto-generated daily uptime for {today} (bradbury)', notes) + + def test_skips_inactive_validator(self): + """Validator with only inactive snapshots gets no contributions.""" + # Change all snapshots to inactive + ValidatorWalletStatusSnapshot.objects.filter( + wallet=self.wallet2_asimov + ).update(status='inactive') + + call_command('add_daily_uptime', verbosity=0) + + contributions = Contribution.objects.filter( + user=self.user2, contribution_type=self.uptime_type ) - - # Run the command + self.assertEqual(contributions.count(), 0) + + def test_no_duplicate_on_rerun(self): + """Running the command twice doesn't create duplicate contributions.""" call_command('add_daily_uptime', verbosity=0) - - # Check leaderboard entry - leaderboard_entry = LeaderboardEntry.objects.get(user=self.user1) - - # Should have 4 days worth of points (3 days + today) * 2 points each = 8 points - self.assertGreaterEqual(leaderboard_entry.total_points, 8) - - def test_multiple_users_with_uptime(self): - """Test that multiple users get their uptime updated correctly.""" - # Create initial uptimes for both users - two_days_ago = timezone.now() - timedelta(days=2) - - Contribution.objects.create( + first_count = Contribution.objects.filter(contribution_type=self.uptime_type).count() + + call_command('add_daily_uptime', verbosity=0) + second_count = Contribution.objects.filter(contribution_type=self.uptime_type).count() + + self.assertEqual(first_count, second_count) + + def test_date_range_backfill(self): + """--start-date and --end-date create contributions for each date in range.""" + today = timezone.now().date() + start = today - timedelta(days=2) + end = today + + # Create snapshots for past dates too + for d in [start, start + timedelta(days=1)]: + ValidatorWalletStatusSnapshot.objects.create( + wallet=self.wallet1_asimov, date=d, status='active' + ) + + call_command( + 'add_daily_uptime', + start_date=start.isoformat(), + end_date=end.isoformat(), + verbosity=0 + ) + + # Validator1 has asimov wallet → 3 days × 1 contribution (asimov only for date range) + # Plus bradbury for dates where snapshots exist (only today) + asimov_contributions = Contribution.objects.filter( user=self.user1, contribution_type=self.uptime_type, - points=1, - contribution_date=two_days_ago, - multiplier_at_creation=Decimal('2.0'), - frozen_global_points=2 + notes__contains='(asimov)' + ) + self.assertEqual(asimov_contributions.count(), 3) + + def test_single_date_param(self): + """--date processes only that specific date.""" + today = timezone.now().date() + yesterday = today - timedelta(days=1) + + # Create snapshot for yesterday + ValidatorWalletStatusSnapshot.objects.create( + wallet=self.wallet1_asimov, date=yesterday, status='active' ) - - Contribution.objects.create( - user=self.user2, + + call_command('add_daily_uptime', date=yesterday.isoformat(), verbosity=0) + + # Should create contributions for yesterday only + contributions = Contribution.objects.filter( + user=self.user1, contribution_type=self.uptime_type, - points=1, - contribution_date=two_days_ago - timedelta(days=1), # User2 started earlier - multiplier_at_creation=Decimal('2.0'), - frozen_global_points=2 + contribution_date__date=yesterday, ) - - # Run the command - call_command('add_daily_uptime', verbosity=0) - - # Check both users have leaderboard entries - entry1 = LeaderboardEntry.objects.get(user=self.user1) - entry2 = LeaderboardEntry.objects.get(user=self.user2) - - # User1 should have at least 3 days of uptime (2 days ago + 1 day + today) - self.assertGreaterEqual(entry1.total_points, 6) - - # User2 should have more points since they started earlier - self.assertGreater(entry2.total_points, entry1.total_points) - - def test_no_duplicate_contributions(self): - """Test that running the command multiple times doesn't create duplicates.""" - # Create initial uptime - yesterday = timezone.now() - timedelta(days=1) - Contribution.objects.create( + self.assertTrue(contributions.exists()) + + # Should NOT create contributions for today + today_contributions = Contribution.objects.filter( user=self.user1, contribution_type=self.uptime_type, - points=1, - contribution_date=yesterday, - multiplier_at_creation=Decimal('2.0'), - frozen_global_points=2 + contribution_date__date=today, ) - - # Run the command twice + self.assertFalse(today_contributions.exists()) + + def test_lookback_window(self): + """Snapshot 3 days ago but not today still qualifies within 7-day window.""" + today = timezone.now().date() + three_days_ago = today - timedelta(days=3) + + # Remove today's snapshots for wallet2 + ValidatorWalletStatusSnapshot.objects.filter(wallet=self.wallet2_asimov).delete() + + # Add snapshot from 3 days ago + ValidatorWalletStatusSnapshot.objects.create( + wallet=self.wallet2_asimov, date=three_days_ago, status='active' + ) + call_command('add_daily_uptime', verbosity=0) - first_count = Contribution.objects.filter( - user=self.user1, - contribution_type=self.uptime_type - ).count() - + + contributions = Contribution.objects.filter( + user=self.user2, contribution_type=self.uptime_type + ) + self.assertEqual(contributions.count(), 1) + + def test_snapshot_gap_fallback(self): + """No snapshots for target date but wallet status is active → uses fallback.""" + today = timezone.now().date() + + # Remove ALL snapshots for wallet2 (simulating sync failure) + ValidatorWalletStatusSnapshot.objects.filter(wallet=self.wallet2_asimov).delete() + + # Wallet status is still 'active' in the model + self.assertEqual(self.wallet2_asimov.status, 'active') + + out = StringIO() + call_command('add_daily_uptime', verbose=True, stdout=out) + + contributions = Contribution.objects.filter( + user=self.user2, contribution_type=self.uptime_type + ) + self.assertEqual(contributions.count(), 1) + + # Check that fallback warning was logged + output = out.getvalue() + self.assertIn('Using current wallet status as fallback', output) + + def test_multi_network(self): + """Validator with wallets on both networks gets 2 contributions per day.""" call_command('add_daily_uptime', verbosity=0) - second_count = Contribution.objects.filter( - user=self.user1, - contribution_type=self.uptime_type - ).count() - - # Count should be the same - self.assertEqual(first_count, second_count) - - def test_dry_run_mode(self): - """Test that dry run mode doesn't create any contributions.""" - # Create initial uptime - yesterday = timezone.now() - timedelta(days=1) - Contribution.objects.create( - user=self.user1, - contribution_type=self.uptime_type, - points=1, - contribution_date=yesterday, - multiplier_at_creation=Decimal('2.0'), - frozen_global_points=2 + + contributions = Contribution.objects.filter( + user=self.user1, contribution_type=self.uptime_type ) - + self.assertEqual(contributions.count(), 2) + + networks_in_notes = [c.notes for c in contributions] + self.assertTrue(any('(asimov)' in n for n in networks_in_notes)) + self.assertTrue(any('(bradbury)' in n for n in networks_in_notes)) + + def test_dry_run(self): + """Dry run mode doesn't create any contributions.""" initial_count = Contribution.objects.count() - - # Run in dry-run mode + out = StringIO() call_command('add_daily_uptime', dry_run=True, stdout=out) - - # No new contributions should be created + self.assertEqual(Contribution.objects.count(), initial_count) - - # Check output mentions dry run - output = out.getvalue() - self.assertIn('DRY RUN', output) - - def test_users_without_uptime_are_skipped(self): - """Test that users without any uptime contributions are skipped.""" - # Run the command (user_without_uptime has no contributions) + self.assertIn('DRY RUN', out.getvalue()) + + def test_force_mode(self): + """--force uses 1.0 multiplier when none exists.""" + # Delete all multipliers + GlobalLeaderboardMultiplier.objects.all().delete() + out = StringIO() - call_command('add_daily_uptime', stdout=out, verbosity=2) - - # Check that no contributions were created for user_without_uptime + call_command('add_daily_uptime', force=True, stdout=out) + + # Should create contributions with multiplier 1.0 contributions = Contribution.objects.filter( - user=self.user_without_uptime, - contribution_type=self.uptime_type - ).count() - - self.assertEqual(contributions, 0) - - # Check that no leaderboard entry was created - self.assertFalse( - LeaderboardEntry.objects.filter(user=self.user_without_uptime).exists() - ) - - def test_force_mode_with_missing_multiplier(self): - """Test that force mode uses default multiplier when none exists.""" - # Create initial uptime with existing multiplier first - yesterday = timezone.now() - timedelta(days=1) - Contribution.objects.create( - user=self.user1, contribution_type=self.uptime_type, - points=1, - contribution_date=yesterday, - multiplier_at_creation=Decimal('2.0'), - frozen_global_points=2 + multiplier_at_creation=Decimal('1.0') ) - - # Now delete the multiplier for future dates + self.assertGreater(contributions.count(), 0) + self.assertIn('using default of 1.0', out.getvalue()) + + def test_no_multiplier_without_force(self): + """Without --force, missing multiplier raises CommandError.""" GlobalLeaderboardMultiplier.objects.all().delete() - - # Run with force flag + + with self.assertRaises(CommandError): + call_command('add_daily_uptime', verbosity=0) + + contributions = Contribution.objects.filter(contribution_type=self.uptime_type) + self.assertEqual(contributions.count(), 0) + + def test_leaderboard_updated(self): + """Contributions update leaderboard entries.""" + call_command('add_daily_uptime', verbosity=0) + + # Validator1 should have a leaderboard entry + entries = LeaderboardEntry.objects.filter(user=self.user1, type='validator') + self.assertTrue(entries.exists()) + self.assertGreater(entries.first().total_points, 0) + + def test_points_calculation(self): + """Frozen global points = points × multiplier.""" + call_command('add_daily_uptime', points=3, verbosity=0) + + contribution = Contribution.objects.filter( + user=self.user1, contribution_type=self.uptime_type + ).first() + + self.assertEqual(contribution.points, 3) + self.assertEqual(contribution.frozen_global_points, 6) # 3 × 2.0 + self.assertEqual(contribution.multiplier_at_creation, Decimal('2.0')) + + def test_network_filter(self): + """--network flag limits processing to one network.""" + call_command('add_daily_uptime', network='asimov', verbosity=0) + + # Only asimov contributions should exist + contributions = Contribution.objects.filter( + user=self.user1, contribution_type=self.uptime_type + ) + self.assertEqual(contributions.count(), 1) + self.assertIn('(asimov)', contributions.first().notes) + + def test_user_without_validator_skipped(self): + """Users without a Validator profile get no contributions.""" + call_command('add_daily_uptime', verbosity=0) + + contributions = Contribution.objects.filter( + user=self.user_no_validator, contribution_type=self.uptime_type + ) + self.assertEqual(contributions.count(), 0) + + def test_output_summary(self): + """Command outputs a summary with stats.""" out = StringIO() - call_command('add_daily_uptime', force=True, stdout=out, verbosity=2) - - # Check that contributions were created with default multiplier - new_contributions = Contribution.objects.filter( - user=self.user1, - contribution_type=self.uptime_type, - multiplier_at_creation=Decimal('1.0') - ).count() - - self.assertGreater(new_contributions, 0) - + call_command('add_daily_uptime', stdout=out) + output = out.getvalue() - self.assertIn('using default of 1.0', output) - - def test_custom_points_value(self): - """Test that custom points value is applied correctly.""" - # Create initial uptime - yesterday = timezone.now() - timedelta(days=1) - Contribution.objects.create( - user=self.user1, - contribution_type=self.uptime_type, - points=1, - contribution_date=yesterday, - multiplier_at_creation=Decimal('2.0'), - frozen_global_points=2 - ) - - # Run with custom points value - call_command('add_daily_uptime', points=5, verbosity=0) - - # Check that new contributions have 5 points - today = timezone.now().date() - todays_contribution = Contribution.objects.filter( - user=self.user1, - contribution_type=self.uptime_type, - contribution_date__date=today - ).first() - - self.assertIsNotNone(todays_contribution) - self.assertEqual(todays_contribution.points, 5) - self.assertEqual(todays_contribution.frozen_global_points, 10) # 5 * 2.0 multiplier \ No newline at end of file + self.assertIn('Daily uptime generation completed!', output) + self.assertIn('Validators with wallets:', output) + self.assertIn('New uptime contributions added:', output) From 8acfd10e1e1cd4dbdc78e5c4882a3a2ebf4383a5 Mon Sep 17 00:00:00 2001 From: Albert Martinez <58224660+albert-mr@users.noreply.github.com> Date: Fri, 27 Mar 2026 11:06:34 +0100 Subject: [PATCH 3/6] feat: add daily-uptime HTTP endpoint for cron automation Add POST /api/v1/validators/wallets/daily-uptime/ endpoint protected by IsCronToken authentication. Calls the add_daily_uptime management command and returns success/failure with output details. Co-Authored-By: Claude Opus 4.6 --- backend/validators/views.py | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/backend/validators/views.py b/backend/validators/views.py index b97096b6..2b9cad57 100644 --- a/backend/validators/views.py +++ b/backend/validators/views.py @@ -348,6 +348,34 @@ def sync(self, request): 'error': str(e) }, status=status.HTTP_500_INTERNAL_SERVER_ERROR) + @action(detail=False, methods=['post'], url_path='daily-uptime', permission_classes=[IsCronToken], authentication_classes=[]) + def daily_uptime(self, request): + """ + Trigger daily uptime point generation for all validators. + Protected by X-Cron-Token header authentication. + """ + from django.core.management import call_command + from io import StringIO + import logging + + logger = logging.getLogger('validators') + + try: + out = StringIO() + call_command('add_daily_uptime', '--verbose', stdout=out) + output = out.getvalue() + logger.info(f'Daily uptime completed: {output}') + return Response({ + 'success': True, + 'output': output + }) + except Exception as e: + logger.error(f'Daily uptime failed: {str(e)}') + return Response({ + 'success': False, + 'error': str(e) + }, status=status.HTTP_500_INTERNAL_SERVER_ERROR) + @action(detail=False, methods=['get']) def networks(self, request): """Return available network names and explorer URLs.""" From 923aea9c494e3951d6debaaaf966e821c883cd46 Mon Sep 17 00:00:00 2001 From: Albert Martinez <58224660+albert-mr@users.noreply.github.com> Date: Fri, 27 Mar 2026 11:06:45 +0100 Subject: [PATCH 4/6] feat: add GitHub Actions workflow for daily uptime cron Runs at 00:30 UTC daily, triggers the daily-uptime endpoint with CRON_SYNC_TOKEN. Supports manual workflow_dispatch for ad-hoc runs. Setup requirements: - Set CRON_SYNC_TOKEN secret in the 'cron-job' environment - Set API_BASE_URL secret pointing to the production API - The "Uptime" ContributionType must exist in the database - A GlobalLeaderboardMultiplier for "Uptime" must be active Co-Authored-By: Claude Opus 4.6 --- .github/workflows/daily-uptime.yml | 28 ++++++++++++++++++++++++++++ CHANGELOG.md | 16 ++++++++++++++++ 2 files changed, 44 insertions(+) create mode 100644 .github/workflows/daily-uptime.yml diff --git a/.github/workflows/daily-uptime.yml b/.github/workflows/daily-uptime.yml new file mode 100644 index 00000000..74535df0 --- /dev/null +++ b/.github/workflows/daily-uptime.yml @@ -0,0 +1,28 @@ +name: Daily Uptime Points + +on: + schedule: + - cron: '30 0 * * *' # Daily at 00:30 UTC + workflow_dispatch: + +jobs: + uptime: + runs-on: ubuntu-latest + environment: cron-job + steps: + - name: Trigger Daily Uptime + run: | + response=$(curl -s -w "\n%{http_code}" -X POST \ + -H "X-Cron-Token: ${{ secrets.CRON_SYNC_TOKEN }}" \ + "${{ secrets.API_BASE_URL }}/api/v1/validators/wallets/daily-uptime/") + + http_code=$(echo "$response" | tail -n1) + body=$(echo "$response" | sed '$d') + + echo "Response: $body" + echo "HTTP Code: $http_code" + + if [ "$http_code" != "200" ]; then + echo "Daily uptime failed with status $http_code" + exit 1 + fi diff --git a/CHANGELOG.md b/CHANGELOG.md index 84ee7bc1..681a50ab 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,5 +4,21 @@ All notable user-facing changes to this project will be documented in this file. ## Unreleased +### Added +- Daily uptime points system: automated management command and cron endpoint for granting daily validator uptime contributions +- Multi-network support (asimov + bradbury) with per-network duplicate detection +- Date range backfill support via `--start-date`/`--end-date` with 366-day cap +- Snapshot-based activity verification with fallback to current wallet status for recent dates +- GitHub Actions workflow for daily uptime cron job (00:30 UTC) +- Comprehensive test suite (16 tests) covering all command paths + +### Changed +- Refactored `add_daily_uptime` command from per-user saves to bulk_create with batch processing +- Leaderboard updates now use shared `update_user_leaderboard_entries` and `update_referrer_points` helpers +- All contribution creation + leaderboard updates wrapped in atomic transaction + +### Fixed +- Migration 0037 no longer crashes in test environments when seed users don't exist + - Direct Cloudinary image upload from Django admin for featured content (ce4c157) - Responsive hero banner images for tablet and mobile (e5c01b5) From 606b3e2c4cc11f2feee8be281cfd0c7d8250e1b3 Mon Sep 17 00:00:00 2001 From: Albert Martinez <58224660+albert-mr@users.noreply.github.com> Date: Fri, 27 Mar 2026 11:09:56 +0100 Subject: [PATCH 5/6] docs: sync backend CLAUDE.md with daily uptime changes Add validator endpoints to API summary and add_daily_uptime command to common commands section. Co-Authored-By: Claude Opus 4.6 --- backend/CLAUDE.md | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/backend/CLAUDE.md b/backend/CLAUDE.md index 57b1ea5e..79b9e204 100644 --- a/backend/CLAUDE.md +++ b/backend/CLAUDE.md @@ -190,6 +190,20 @@ GET /api/v1/multiplier-periods/ # Steward Submissions (public metrics) GET /api/v1/steward-submissions/stats/ (public - aggregate stats) GET /api/v1/steward-submissions/daily-metrics/ (public - time-series data) + +# Validators +GET /api/v1/validators/me/ (requires auth) +PATCH /api/v1/validators/me/ (requires auth) +GET /api/v1/validators/newest/ +POST /api/v1/validators/link-by-operator/ (requires auth) +GET /api/v1/validators/my-wallets/ (requires auth) +GET /api/v1/validators/wallets/ +GET /api/v1/validators/wallets/{id}/ +GET /api/v1/validators/wallets/by-operator/{address}/ +GET /api/v1/validators/wallets/by-user-address/{address}/ +GET /api/v1/validators/wallets/networks/ +POST /api/v1/validators/wallets/sync/ (IsCronToken) +POST /api/v1/validators/wallets/daily-uptime/ (IsCronToken) ``` ## Environment Variables @@ -223,6 +237,15 @@ python manage.py test # Collect static files python manage.py collectstatic + +# Add daily uptime points for validators with active wallets +python manage.py add_daily_uptime # Today's date +python manage.py add_daily_uptime --date 2025-01-15 # Specific date +python manage.py add_daily_uptime --start-date 2025-01-01 --end-date 2025-01-31 # Date range +python manage.py add_daily_uptime --dry-run --verbose # Preview without changes +python manage.py add_daily_uptime --network asimov # Single network only +python manage.py add_daily_uptime --force # Use default multiplier if none exists +python manage.py add_daily_uptime --points 2 # Custom points per contribution ``` ## Authentication Flow From 97cf51078c1b211e9a401c95107960734134836e Mon Sep 17 00:00:00 2001 From: Albert Martinez <58224660+albert-mr@users.noreply.github.com> Date: Fri, 27 Mar 2026 11:23:34 +0100 Subject: [PATCH 6/6] feat: add data migration for Uptime contribution type and multiplier Creates the "Uptime" ContributionType (slug: uptime, category: validator) and its initial GlobalLeaderboardMultiplier (2.0x) via RunPython migration. Required for the add_daily_uptime command to function. Run after deploy: python manage.py migrate contributions Co-Authored-By: Claude Opus 4.6 --- .../0041_create_uptime_contribution_type.py | 60 +++++++++++++++++++ 1 file changed, 60 insertions(+) create mode 100644 backend/contributions/migrations/0041_create_uptime_contribution_type.py diff --git a/backend/contributions/migrations/0041_create_uptime_contribution_type.py b/backend/contributions/migrations/0041_create_uptime_contribution_type.py new file mode 100644 index 00000000..87e76d8f --- /dev/null +++ b/backend/contributions/migrations/0041_create_uptime_contribution_type.py @@ -0,0 +1,60 @@ +from django.db import migrations +from django.utils import timezone + + +def create_uptime_type(apps, schema_editor): + """ + Create the Uptime ContributionType and its multiplier if they don't exist. + Required by the add_daily_uptime management command. + """ + ContributionType = apps.get_model('contributions', 'ContributionType') + Category = apps.get_model('contributions', 'Category') + GlobalLeaderboardMultiplier = apps.get_model('leaderboard', 'GlobalLeaderboardMultiplier') + + # Get the validator category (created by migration 0017) + validator_category = Category.objects.filter(slug='validator').first() + + contribution_type, created = ContributionType.objects.get_or_create( + slug='uptime', + defaults={ + 'name': 'Uptime', + 'description': 'Daily validator uptime points for active validators', + 'category': validator_category, + 'min_points': 1, + 'max_points': 10, + 'is_default': False, + 'is_submittable': False, + } + ) + + if created: + GlobalLeaderboardMultiplier.objects.create( + contribution_type=contribution_type, + multiplier_value=2.0, + valid_from=timezone.now(), + description='Initial multiplier for daily uptime points', + ) + + +def reverse_uptime_type(apps, schema_editor): + ContributionType = apps.get_model('contributions', 'ContributionType') + GlobalLeaderboardMultiplier = apps.get_model('leaderboard', 'GlobalLeaderboardMultiplier') + + try: + contribution_type = ContributionType.objects.get(slug='uptime') + GlobalLeaderboardMultiplier.objects.filter(contribution_type=contribution_type).delete() + contribution_type.delete() + except ContributionType.DoesNotExist: + pass + + +class Migration(migrations.Migration): + + dependencies = [ + ('contributions', '0040_convert_featured_images_to_cloudinary'), + ('leaderboard', '0014_add_referral_points_model'), + ] + + operations = [ + migrations.RunPython(create_uptime_type, reverse_uptime_type), + ]