Enhanced quarterly confirmation system with approval workflow and export improvements

Features added:
-  Fixed quarterly confirmation approval system with URL pattern
-  Added re-approval and status reset functionality for quarterly confirmations
-  Synchronized quarterly approval status with support payment system
-  Enhanced Destinataer export with missing fields (anrede, titel, mobil)
-  Added quarterly confirmation data and documents to export system
-  Fixed address field display issues in destinataer template
-  Added quarterly statistics dashboard to support payment lists
-  Implemented duplicate support payment prevention and cleanup
-  Added visual indicators for quarterly-linked support payments

Technical improvements:
- Enhanced create_quarterly_support_payment() with duplicate detection
- Added get_related_support_payment() method to VierteljahresNachweis model
- Improved quarterly confirmation workflow with proper status transitions
- Added computed address property to Destinataer model
- Fixed template field mismatches (anrede, titel, mobil vs strasse, plz, ort)
- Enhanced backup system with operation tracking and cancellation

Workflow enhancements:
- Quarterly confirmations now properly sync with support payments
- Single support payment per destinataer per quarter (no duplicates)
- Approval button works for both eingereicht and geprueft status
- Reset functionality allows workflow restart
- Export includes complete quarterly data with uploaded documents
This commit is contained in:
2025-09-28 19:09:08 +02:00
parent b00cf62d87
commit acac8695fd
73 changed files with 283380 additions and 206 deletions

View File

@@ -189,42 +189,65 @@ def run_restore(restore_job_id, backup_file_path):
restore_job.started_at = timezone.now()
restore_job.save()
# Verify backup file exists
if not os.path.exists(backup_file_path):
raise Exception(f"Backup file not found: {backup_file_path}")
# Extract backup
with tempfile.TemporaryDirectory() as temp_dir:
extract_dir = os.path.join(temp_dir, "restore")
os.makedirs(extract_dir)
# Extract tar.gz
with tarfile.open(backup_file_path, "r:gz") as tar:
tar.extractall(extract_dir)
try:
with tarfile.open(backup_file_path, "r:gz") as tar:
tar.extractall(extract_dir)
except Exception as e:
raise Exception(f"Failed to extract backup file: {e}")
# Validate backup
metadata_file = os.path.join(extract_dir, "backup_metadata.json")
if not os.path.exists(metadata_file):
raise Exception("Invalid backup: missing metadata")
metadata_files = [name for name in os.listdir(extract_dir) if name.endswith('backup_metadata.json')]
if not metadata_files:
raise Exception("Invalid backup: missing metadata file")
# Read metadata
import json
with open(metadata_file, "r") as f:
metadata = json.load(f)
try:
metadata_file = os.path.join(extract_dir, metadata_files[0])
with open(metadata_file, "r") as f:
metadata = json.load(f)
print(f"Restoring backup created at: {metadata.get('created_at', 'unknown')}")
except Exception as e:
print(f"Warning: Could not read backup metadata: {e}")
# Restore database
db_backup_file = os.path.join(extract_dir, "database.sql")
if os.path.exists(db_backup_file):
print("Restoring database...")
restore_database(db_backup_file)
print("Database restore completed")
else:
print("No database backup found in archive")
# Restore files
files_dir = os.path.join(extract_dir, "files")
if os.path.exists(files_dir):
print("Restoring files...")
restore_files(files_dir)
print("Files restore completed")
else:
print("No files backup found in archive")
# Update job status
restore_job.status = "completed"
restore_job.completed_at = timezone.now()
restore_job.save()
print(f"Restore job {restore_job_id} completed successfully")
except Exception as e:
print(f"Restore job {restore_job_id} failed: {e}")
restore_job = BackupJob.objects.get(id=restore_job_id)
restore_job.status = "failed"
restore_job.error_message = str(e)
restore_job.completed_at = timezone.now()
@@ -234,49 +257,151 @@ def run_restore(restore_job_id, backup_file_path):
def restore_database(db_backup_file):
"""Restore database from backup"""
try:
print(f"Starting database restore from: {db_backup_file}")
# Get database settings
db_settings = settings.DATABASES["default"]
print(f"Database settings: {db_settings.get('NAME')} at {db_settings.get('HOST')}:{db_settings.get('PORT')}")
# Build pg_restore command
cmd = [
"pg_restore",
"--host",
db_settings.get("HOST", "localhost"),
"--port",
str(db_settings.get("PORT", 5432)),
"--username",
db_settings.get("USER", "postgres"),
"--dbname",
db_settings.get("NAME", "stiftung"),
"--clean", # Drop existing objects first
"--if-exists", # Don't error if objects don't exist
"--no-owner", # don't attempt to set original owners
"--role",
db_settings.get("USER", "postgres"), # set target owner
"--single-transaction", # restore atomically when possible
"--disable-triggers", # avoid FK issues during data load
"--no-password",
"--verbose",
db_backup_file,
]
# First, try to determine if this is a custom format or SQL format
# by checking if the file starts with binary data (custom format)
is_custom_format = False
try:
with open(db_backup_file, 'rb') as f:
header = f.read(8)
# Custom format files start with 'PGDMP' followed by version info
if header.startswith(b'PGDMP'):
is_custom_format = True
print(f"Detected custom format backup (header: {header})")
else:
print(f"Detected SQL format backup (header: {header})")
except Exception as e:
print(f"Could not determine backup format, assuming SQL: {e}")
if is_custom_format:
print("Using pg_restore for custom format")
# Use pg_restore for custom format
cmd = [
"pg_restore",
"--host",
db_settings.get("HOST", "localhost"),
"--port",
str(db_settings.get("PORT", 5432)),
"--username",
db_settings.get("USER", "postgres"),
"--dbname",
db_settings.get("NAME", "stiftung"),
"--clean", # Drop existing objects first
"--if-exists", # Don't error if objects don't exist
"--no-owner", # don't attempt to set original owners
"--role",
db_settings.get("USER", "postgres"), # set target owner
# Remove --single-transaction to allow partial restore even with configuration errors
"--disable-triggers", # avoid FK issues during data load
"--no-password",
"--verbose",
# Remove --exit-on-error to allow continuation after configuration warnings
db_backup_file,
]
else:
print("Using psql for SQL format")
# Use psql for SQL format
cmd = [
"psql",
"--host",
db_settings.get("HOST", "localhost"),
"--port",
str(db_settings.get("PORT", 5432)),
"--username",
db_settings.get("USER", "postgres"),
"--dbname",
db_settings.get("NAME", "stiftung"),
"--no-password",
"--file",
db_backup_file,
]
print(f"Running command: {' '.join(cmd)}")
# Set environment variables for authentication
env = os.environ.copy()
env["PGPASSWORD"] = db_settings.get("PASSWORD", "")
# Run pg_restore
# Run the restore command
result = subprocess.run(cmd, env=env, capture_output=True, text=True)
print(f"Command exit code: {result.returncode}")
print(f"STDOUT length: {len(result.stdout)} chars")
print(f"STDERR length: {len(result.stderr)} chars")
# Show first 500 chars of output for debugging
if result.stdout:
print(f"STDOUT (first 500 chars): {result.stdout[:500]}...")
if result.stderr:
print(f"STDERR (first 500 chars): {result.stderr[:500]}...")
# Fail if there are real errors
# Handle different error conditions more gracefully
if result.returncode != 0:
stderr = result.stderr or ""
# escalate only if we see ERROR
if "ERROR" in stderr.upper():
raise Exception(f"pg_restore failed: {stderr}")
stdout = result.stdout or ""
# Check for known configuration parameter issues
if "unrecognized configuration parameter" in stderr:
print(f"Warning: Configuration parameter issues detected, but continuing: {stderr[:200]}...")
# For configuration parameter issues, we'll consider this a warning, not a fatal error
# if there are no other serious errors
serious_errors = [line for line in stderr.split('\n')
if 'ERROR' in line and 'unrecognized configuration parameter' not in line]
if serious_errors:
print(f"Serious errors found: {serious_errors}")
raise Exception(f"pg_restore failed with serious errors: {'; '.join(serious_errors)}")
else:
print("Restore completed with configuration warnings (non-fatal)")
elif "ERROR" in stderr.upper():
# Look for specific error patterns we can ignore
ignorable_patterns = [
"already exists",
"does not exist",
"unrecognized configuration parameter"
]
error_lines = [line for line in stderr.split('\n') if 'ERROR' in line]
serious_errors = []
for error_line in error_lines:
is_ignorable = any(pattern in error_line for pattern in ignorable_patterns)
if not is_ignorable:
serious_errors.append(error_line)
if serious_errors:
print(f"Serious errors found: {serious_errors}")
raise Exception(f"Database restore failed with errors: {'; '.join(serious_errors)}")
else:
print(f"Restore completed with ignorable warnings")
else:
print(f"pg_restore completed with warnings: {stderr}")
print(f"Restore completed with warnings but no errors")
else:
print("Database restore completed successfully with no errors")
# Verify data was actually restored by checking table counts
try:
print("Verifying data was restored...")
from django.db import connection
with connection.cursor() as cursor:
# Check some key tables
test_tables = ['stiftung_person', 'stiftung_land', 'stiftung_destinataer']
for table in test_tables:
try:
cursor.execute(f"SELECT COUNT(*) FROM {table}")
count = cursor.fetchone()[0]
print(f"Table {table}: {count} rows")
except Exception as e:
print(f"Could not check table {table}: {e}")
except Exception as e:
print(f"Could not verify data restoration: {e}")
except Exception as e:
print(f"Database restore failed with exception: {e}")
raise Exception(f"Database restore failed: {e}")
@@ -335,3 +460,51 @@ def cleanup_old_backups(keep_count=10):
except Exception as e:
print(f"Cleanup failed: {e}")
def validate_backup_file(backup_file_path):
"""Validate that a backup file is valid and can be restored"""
try:
if not os.path.exists(backup_file_path):
return False, "Backup file does not exist"
if not backup_file_path.endswith('.tar.gz'):
return False, "Invalid file format. Only .tar.gz files are supported"
# Try to open and extract metadata
with tempfile.TemporaryDirectory() as temp_dir:
try:
with tarfile.open(backup_file_path, "r:gz") as tar:
# Check if it contains expected files
names = tar.getnames()
# Look for metadata file (could be with or without ./ prefix)
metadata_files = [name for name in names if name.endswith('backup_metadata.json')]
if not metadata_files:
return False, "Invalid backup: missing metadata"
# Extract and validate metadata
metadata_file = metadata_files[0]
tar.extract(metadata_file, temp_dir)
extracted_metadata = os.path.join(temp_dir, metadata_file)
import json
with open(extracted_metadata, "r") as f:
metadata = json.load(f)
# Check metadata structure
if "backup_type" not in metadata:
return False, "Invalid backup metadata"
created_at = metadata.get('created_at', 'unknown date')
backup_type = metadata.get('backup_type', 'unknown type')
return True, f"Valid {backup_type} backup from {created_at}"
except tarfile.TarError as e:
return False, f"Corrupted backup file: {e}"
except json.JSONDecodeError:
return False, "Invalid backup metadata format"
except Exception as e:
return False, f"Validation failed: {e}"