Initial commit

This commit is contained in:
Janus C. H. Knudsen 2026-02-03 00:17:08 +01:00
commit 77d35ff965
51 changed files with 5591 additions and 0 deletions

View file

@ -0,0 +1,304 @@
using Azure.Storage.Blobs;
using Azure.Storage.Blobs.Models;
using PlanTempusAdmin.Models;
using BlobType = PlanTempusAdmin.Models.BlobType;
namespace PlanTempusAdmin.Services;
public class AzureStorageService
{
private readonly BlobServiceClient? _serviceClient;
private readonly ILogger<AzureStorageService> _logger;
private readonly string _accountName;
public AzureStorageService(IConfiguration configuration, ILogger<AzureStorageService> logger)
{
_logger = logger;
var connectionString = configuration.GetConnectionString("AzureStorage");
if (!string.IsNullOrEmpty(connectionString))
{
try
{
_serviceClient = new BlobServiceClient(connectionString);
// Extract account name from connection string
var parts = connectionString.Split(';')
.Select(p => p.Split('=', 2))
.Where(p => p.Length == 2)
.ToDictionary(p => p[0], p => p[1], StringComparer.OrdinalIgnoreCase);
_accountName = parts.TryGetValue("AccountName", out var name) ? name : "unknown";
}
catch (Exception ex)
{
_logger.LogError(ex, "Failed to initialize Azure Storage client");
}
}
_accountName ??= string.Empty;
}
public async Task<bool> TestConnectionAsync()
{
if (_serviceClient == null) return false;
try
{
await _serviceClient.GetPropertiesAsync();
return true;
}
catch (Exception ex)
{
_logger.LogWarning(ex, "Could not connect to Azure Storage");
return false;
}
}
public async Task<List<AzureContainer>> GetContainersAsync()
{
if (_serviceClient == null) return new List<AzureContainer>();
var containers = new List<AzureContainer>();
try
{
await foreach (var container in _serviceClient.GetBlobContainersAsync())
{
var containerClient = _serviceClient.GetBlobContainerClient(container.Name);
var stats = await GetContainerStatsAsync(containerClient);
containers.Add(new AzureContainer
{
Name = container.Name,
LastModified = container.Properties.LastModified,
TotalSize = stats.size,
BlobCount = stats.count
});
}
}
catch (Exception ex)
{
_logger.LogError(ex, "Error fetching containers");
}
return containers;
}
public async Task<ContainerDetails> GetContainerDetailsAsync(string containerName, string? prefix = null, int limit = 100)
{
var details = new ContainerDetails { Name = containerName };
if (_serviceClient == null) return details;
try
{
var containerClient = _serviceClient.GetBlobContainerClient(containerName);
var blobs = new List<AzureBlob>();
var prefixes = new HashSet<string>();
await foreach (var item in containerClient.GetBlobsByHierarchyAsync(prefix: prefix, delimiter: "/"))
{
if (item.IsPrefix)
{
prefixes.Add(item.Prefix.TrimEnd('/'));
}
else if (item.Blob != null)
{
blobs.Add(MapToAzureBlob(item.Blob));
details.TotalSize += item.Blob.Properties.ContentLength ?? 0;
details.BlobCount++;
}
if (blobs.Count >= limit) break;
}
details.Blobs = blobs.OrderByDescending(b => b.LastModified).ToList();
details.Prefixes = prefixes.OrderBy(p => p).ToList();
}
catch (Exception ex)
{
_logger.LogError(ex, "Error fetching container details for {Container}", containerName);
}
return details;
}
public async Task<List<AzureBlob>> GetBlobsAsync(string containerName, string? prefix = null, int limit = 100)
{
if (_serviceClient == null) return new List<AzureBlob>();
var blobs = new List<AzureBlob>();
try
{
var containerClient = _serviceClient.GetBlobContainerClient(containerName);
await foreach (var blob in containerClient.GetBlobsAsync(prefix: prefix))
{
blobs.Add(MapToAzureBlob(blob));
if (blobs.Count >= limit) break;
}
}
catch (Exception ex)
{
_logger.LogError(ex, "Error fetching blobs from {Container}", containerName);
}
return blobs.OrderByDescending(b => b.LastModified).ToList();
}
public async Task<AzureStorageDashboard> GetDashboardAsync()
{
var dashboard = new AzureStorageDashboard
{
AccountName = _accountName
};
if (_serviceClient == null) return dashboard;
try
{
dashboard.IsConnected = await TestConnectionAsync();
if (!dashboard.IsConnected) return dashboard;
var containers = await GetContainersAsync();
dashboard.Containers = containers;
dashboard.TotalContainers = containers.Count;
dashboard.TotalSize = containers.Sum(c => c.TotalSize);
dashboard.TotalBlobs = containers.Sum(c => c.BlobCount);
// Find backup container(s) and get recent backups
var backupContainers = containers.Where(c =>
c.Name.Contains("backup", StringComparison.OrdinalIgnoreCase) ||
c.Name.Contains("backups", StringComparison.OrdinalIgnoreCase)).ToList();
var recentBlobs = new List<AzureBlob>();
foreach (var container in backupContainers.Take(3))
{
var blobs = await GetBlobsAsync(container.Name, limit: 20);
recentBlobs.AddRange(blobs);
var backupBlobs = blobs.Where(b => b.IsBackup).ToList();
dashboard.BackupFileCount += backupBlobs.Count;
dashboard.BackupTotalSize += backupBlobs.Sum(b => b.Size);
}
dashboard.RecentBlobs = recentBlobs
.OrderByDescending(b => b.LastModified)
.Take(10)
.ToList();
if (dashboard.RecentBlobs.Any())
{
dashboard.LastBackupUpload = dashboard.RecentBlobs
.Where(b => b.IsBackup)
.Max(b => b.LastModified);
}
}
catch (Exception ex)
{
_logger.LogError(ex, "Error building Azure Storage dashboard");
}
return dashboard;
}
public async Task<string?> GetBlobDownloadUrlAsync(string containerName, string blobName, TimeSpan? expiry = null)
{
if (_serviceClient == null) return null;
try
{
var containerClient = _serviceClient.GetBlobContainerClient(containerName);
var blobClient = containerClient.GetBlobClient(blobName);
if (!await blobClient.ExistsAsync()) return null;
// Generate SAS token for download
var sasBuilder = new Azure.Storage.Sas.BlobSasBuilder
{
BlobContainerName = containerName,
BlobName = blobName,
Resource = "b",
ExpiresOn = DateTimeOffset.UtcNow.Add(expiry ?? TimeSpan.FromHours(1))
};
sasBuilder.SetPermissions(Azure.Storage.Sas.BlobSasPermissions.Read);
// Check if we can generate SAS (requires account key)
if (blobClient.CanGenerateSasUri)
{
return blobClient.GenerateSasUri(sasBuilder).ToString();
}
return blobClient.Uri.ToString();
}
catch (Exception ex)
{
_logger.LogError(ex, "Error generating download URL for {Container}/{Blob}", containerName, blobName);
return null;
}
}
public async Task<bool> DeleteBlobAsync(string containerName, string blobName)
{
if (_serviceClient == null) return false;
try
{
var containerClient = _serviceClient.GetBlobContainerClient(containerName);
var blobClient = containerClient.GetBlobClient(blobName);
var response = await blobClient.DeleteIfExistsAsync();
return response.Value;
}
catch (Exception ex)
{
_logger.LogError(ex, "Error deleting blob {Container}/{Blob}", containerName, blobName);
return false;
}
}
private async Task<(long size, int count)> GetContainerStatsAsync(BlobContainerClient containerClient)
{
long totalSize = 0;
int count = 0;
try
{
await foreach (var blob in containerClient.GetBlobsAsync())
{
totalSize += blob.Properties.ContentLength ?? 0;
count++;
// Limit iteration for large containers
if (count >= 10000) break;
}
}
catch (Exception ex)
{
_logger.LogWarning(ex, "Error getting stats for container {Container}", containerClient.Name);
}
return (totalSize, count);
}
private static AzureBlob MapToAzureBlob(BlobItem blob)
{
return new AzureBlob
{
Name = blob.Name,
ContentType = blob.Properties.ContentType,
Size = blob.Properties.ContentLength ?? 0,
LastModified = blob.Properties.LastModified,
CreatedOn = blob.Properties.CreatedOn,
AccessTier = blob.Properties.AccessTier?.ToString(),
BlobType = blob.Properties.BlobType switch
{
Azure.Storage.Blobs.Models.BlobType.Page => BlobType.Page,
Azure.Storage.Blobs.Models.BlobType.Append => BlobType.Append,
_ => BlobType.Block
}
};
}
}

261
Services/BackupService.cs Normal file
View file

@ -0,0 +1,261 @@
using Dapper;
using Npgsql;
using PlanTempusAdmin.Models;
namespace PlanTempusAdmin.Services;
public class BackupService
{
private readonly string _connectionString;
private readonly ILogger<BackupService> _logger;
static BackupService()
{
DefaultTypeMap.MatchNamesWithUnderscores = true;
}
public BackupService(IConfiguration configuration, ILogger<BackupService> logger)
{
_connectionString = configuration.GetConnectionString("BackupDb")
?? throw new InvalidOperationException("BackupDb connection string not configured");
_logger = logger;
}
public async Task<List<BackupLog>> GetLogsAsync(int limit = 100)
{
try
{
await using var connection = new NpgsqlConnection(_connectionString);
var logs = await connection.QueryAsync<BackupLog>(@"
SELECT id, started_at, completed_at, duration_ms, backup_type, source_name, source_path,
destination, remote_path, status, size_bytes, file_count, error_message, error_code,
retry_count, hostname, script_version, checksum, created_at
FROM backup_logs
ORDER BY started_at DESC
LIMIT @limit", new { limit });
return logs.ToList();
}
catch (Exception ex)
{
_logger.LogError(ex, "Error fetching backup logs");
return new List<BackupLog>();
}
}
public async Task<BackupSummary> GetSummaryAsync()
{
try
{
await using var connection = new NpgsqlConnection(_connectionString);
var summary = await connection.QuerySingleOrDefaultAsync<BackupSummary>(@"
SELECT
COUNT(*)::int as total_backups,
COUNT(*) FILTER (WHERE status = 'success')::int as successful_backups,
COUNT(*) FILTER (WHERE status = 'failed')::int as failed_backups,
MAX(started_at) as last_backup,
MAX(started_at) FILTER (WHERE status = 'success') as last_successful_backup,
COALESCE(SUM(size_bytes) FILTER (WHERE status = 'success'), 0) as total_size_bytes
FROM backup_logs");
return summary ?? new BackupSummary();
}
catch (Exception ex)
{
_logger.LogError(ex, "Error fetching backup summary");
return new BackupSummary();
}
}
public async Task<List<RepositorySummary>> GetRepositorySummariesAsync()
{
try
{
await using var connection = new NpgsqlConnection(_connectionString);
var summaries = await connection.QueryAsync<RepositorySummary>(@"
WITH ranked_backups AS (
SELECT
source_name,
backup_type,
size_bytes,
started_at,
status,
ROW_NUMBER() OVER (PARTITION BY source_name ORDER BY started_at DESC) as rn
FROM backup_logs
WHERE status = 'success'
)
SELECT
bl.source_name,
bl.backup_type,
COUNT(*)::int as total_backups,
COUNT(*) FILTER (WHERE bl.status = 'success')::int as successful_backups,
COUNT(*) FILTER (WHERE bl.status = 'failed')::int as failed_backups,
MAX(bl.started_at) as last_backup,
MAX(bl.started_at) FILTER (WHERE bl.status = 'success') as last_successful_backup,
COALESCE(SUM(bl.size_bytes) FILTER (WHERE bl.status = 'success'), 0) as total_size_bytes,
(SELECT rb.size_bytes FROM ranked_backups rb WHERE rb.source_name = bl.source_name AND rb.rn = 1) as last_backup_size_bytes,
(SELECT rb.size_bytes FROM ranked_backups rb WHERE rb.source_name = bl.source_name AND rb.rn = 2) as previous_backup_size_bytes
FROM backup_logs bl
GROUP BY bl.source_name, bl.backup_type
ORDER BY last_backup DESC NULLS LAST");
return summaries.ToList();
}
catch (Exception ex)
{
_logger.LogError(ex, "Error fetching repository summaries");
return new List<RepositorySummary>();
}
}
public async Task<BackupDashboard> GetDashboardAsync()
{
var dashboard = new BackupDashboard();
try
{
await using var connection = new NpgsqlConnection(_connectionString);
// Overall stats
var stats = await connection.QuerySingleOrDefaultAsync<dynamic>(@"
SELECT
COUNT(*)::int as total_backups,
COUNT(*) FILTER (WHERE status = 'success')::int as successful_backups,
COUNT(*) FILTER (WHERE status = 'failed')::int as failed_backups,
COUNT(*) FILTER (WHERE status = 'running')::int as running_backups,
COALESCE(SUM(size_bytes) FILTER (WHERE status = 'success'), 0) as total_size_bytes,
MAX(started_at) as last_backup,
MAX(started_at) FILTER (WHERE status = 'success') as last_successful_backup,
COUNT(*) FILTER (WHERE started_at > NOW() - INTERVAL '24 hours')::int as backups_last_24_hours,
COUNT(*) FILTER (WHERE started_at > NOW() - INTERVAL '7 days')::int as backups_last_7_days,
COALESCE(SUM(size_bytes) FILTER (WHERE status = 'success' AND started_at > NOW() - INTERVAL '24 hours'), 0) as size_last_24_hours,
COALESCE(SUM(size_bytes) FILTER (WHERE status = 'success' AND started_at > NOW() - INTERVAL '7 days'), 0) as size_last_7_days
FROM backup_logs");
if (stats != null)
{
dashboard.TotalBackups = (int)stats.total_backups;
dashboard.SuccessfulBackups = (int)stats.successful_backups;
dashboard.FailedBackups = (int)stats.failed_backups;
dashboard.RunningBackups = (int)stats.running_backups;
dashboard.TotalSizeBytes = (long)stats.total_size_bytes;
dashboard.LastBackup = stats.last_backup;
dashboard.LastSuccessfulBackup = stats.last_successful_backup;
dashboard.BackupsLast24Hours = (int)stats.backups_last_24_hours;
dashboard.BackupsLast7Days = (int)stats.backups_last_7_days;
dashboard.SizeLast24Hours = (long)stats.size_last_24_hours;
dashboard.SizeLast7Days = (long)stats.size_last_7_days;
}
// By backup type
dashboard.ByType = (await connection.QueryAsync<BackupTypeStat>(@"
SELECT
backup_type,
COUNT(*)::int as total,
COUNT(*) FILTER (WHERE status = 'success')::int as successful,
COUNT(*) FILTER (WHERE status = 'failed')::int as failed,
COUNT(*) FILTER (WHERE status = 'running')::int as running,
COALESCE(SUM(size_bytes) FILTER (WHERE status = 'success'), 0) as total_size,
MAX(started_at) as last_backup
FROM backup_logs
GROUP BY backup_type
ORDER BY total DESC")).ToList();
// By destination
dashboard.ByDestination = (await connection.QueryAsync<DestinationStat>(@"
SELECT
destination,
COUNT(*)::int as total,
COUNT(*) FILTER (WHERE status = 'success')::int as successful,
COUNT(*) FILTER (WHERE status = 'failed')::int as failed,
COALESCE(SUM(size_bytes) FILTER (WHERE status = 'success'), 0) as total_size,
MAX(started_at) as last_backup
FROM backup_logs
GROUP BY destination
ORDER BY total DESC")).ToList();
// By host
dashboard.ByHost = (await connection.QueryAsync<HostStat>(@"
SELECT
COALESCE(hostname, 'unknown') as hostname,
COUNT(*)::int as total,
COUNT(*) FILTER (WHERE status = 'success')::int as successful,
COUNT(*) FILTER (WHERE status = 'failed')::int as failed,
COUNT(*) FILTER (WHERE status = 'running')::int as running,
MAX(started_at) as last_backup,
(SELECT script_version FROM backup_logs b2
WHERE COALESCE(b2.hostname, 'unknown') = COALESCE(backup_logs.hostname, 'unknown')
ORDER BY started_at DESC LIMIT 1) as script_version
FROM backup_logs
GROUP BY COALESCE(hostname, 'unknown')
ORDER BY total DESC")).ToList();
// Top errors
dashboard.TopErrors = (await connection.QueryAsync<ErrorStat>(@"
SELECT
COALESCE(error_code, 'UNKNOWN') as error_code,
COUNT(*)::int as count,
MAX(started_at) as last_occurrence,
(SELECT error_message FROM backup_logs b2
WHERE COALESCE(b2.error_code, 'UNKNOWN') = COALESCE(backup_logs.error_code, 'UNKNOWN')
AND b2.status = 'failed'
ORDER BY started_at DESC LIMIT 1) as last_message
FROM backup_logs
WHERE status = 'failed'
GROUP BY COALESCE(error_code, 'UNKNOWN')
ORDER BY count DESC
LIMIT 5")).ToList();
// Daily stats (last 14 days)
dashboard.DailyStats = (await connection.QueryAsync<DailyStat>(@"
SELECT
DATE(started_at) as date,
COUNT(*)::int as total,
COUNT(*) FILTER (WHERE status = 'success')::int as successful,
COUNT(*) FILTER (WHERE status = 'failed')::int as failed,
COALESCE(SUM(size_bytes) FILTER (WHERE status = 'success'), 0) as total_size
FROM backup_logs
WHERE started_at > NOW() - INTERVAL '14 days'
GROUP BY DATE(started_at)
ORDER BY date DESC")).ToList();
// Running now
dashboard.RunningNow = (await connection.QueryAsync<BackupLog>(@"
SELECT * FROM backup_logs
WHERE status = 'running'
ORDER BY started_at DESC")).ToList();
// Recent successes
dashboard.RecentSuccesses = (await connection.QueryAsync<BackupLog>(@"
SELECT * FROM backup_logs
WHERE status = 'success'
ORDER BY started_at DESC
LIMIT 5")).ToList();
// Recent failures
dashboard.RecentFailures = (await connection.QueryAsync<BackupLog>(@"
SELECT * FROM backup_logs
WHERE status = 'failed'
ORDER BY started_at DESC
LIMIT 5")).ToList();
}
catch (Exception ex)
{
_logger.LogError(ex, "Error fetching backup dashboard");
}
return dashboard;
}
public async Task<bool> TestConnectionAsync()
{
try
{
await using var connection = new NpgsqlConnection(_connectionString);
await connection.OpenAsync();
return true;
}
catch (Exception ex)
{
_logger.LogWarning(ex, "Could not connect to backup database");
return false;
}
}
}

132
Services/CaddyService.cs Normal file
View file

@ -0,0 +1,132 @@
using System.Text.Json;
using PlanTempusAdmin.Models;
namespace PlanTempusAdmin.Services;
public class CaddyService
{
private readonly HttpClient _httpClient;
private readonly ILogger<CaddyService> _logger;
private readonly string _caddyAdminUrl;
public CaddyService(HttpClient httpClient, ILogger<CaddyService> logger, IConfiguration configuration)
{
_httpClient = httpClient;
_logger = logger;
_caddyAdminUrl = configuration.GetValue<string>("Caddy:AdminUrl") ?? "http://localhost:2019";
}
public async Task<bool> IsRunningAsync()
{
try
{
var response = await _httpClient.GetAsync($"{_caddyAdminUrl}/config/");
return response.IsSuccessStatusCode;
}
catch (Exception ex)
{
_logger.LogWarning(ex, "Could not connect to Caddy Admin API");
return false;
}
}
public async Task<CaddyConfig?> GetConfigAsync()
{
try
{
var response = await _httpClient.GetAsync($"{_caddyAdminUrl}/config/");
if (!response.IsSuccessStatusCode)
{
return null;
}
var json = await response.Content.ReadAsStringAsync();
var options = new JsonSerializerOptions
{
PropertyNameCaseInsensitive = true
};
return JsonSerializer.Deserialize<CaddyConfig>(json, options);
}
catch (Exception ex)
{
_logger.LogError(ex, "Error fetching Caddy config");
return null;
}
}
public async Task<List<CaddyHost>> GetHostsAsync()
{
var hosts = new List<CaddyHost>();
try
{
var config = await GetConfigAsync();
if (config?.Apps?.Http?.Servers == null)
{
return hosts;
}
foreach (var server in config.Apps.Http.Servers.Values)
{
if (server.Routes == null) continue;
foreach (var route in server.Routes)
{
if (route.Match == null) continue;
foreach (var match in route.Match)
{
if (match.Host == null) continue;
foreach (var hostname in match.Host)
{
var host = new CaddyHost
{
Hostname = hostname,
Addresses = server.Listen ?? Array.Empty<string>(),
Tls = server.Listen?.Any(l => l.Contains(":443")) ?? false
};
// Try to get upstream from handlers
if (route.Handle != null)
{
var reverseProxy = route.Handle.FirstOrDefault(h => h.Handler == "reverse_proxy");
if (reverseProxy?.Upstreams?.Length > 0)
{
host.Upstream = reverseProxy.Upstreams[0].Dial;
}
}
hosts.Add(host);
}
}
}
}
}
catch (Exception ex)
{
_logger.LogError(ex, "Error parsing Caddy hosts");
}
return hosts;
}
public async Task<string?> GetRawConfigAsync()
{
try
{
var response = await _httpClient.GetAsync($"{_caddyAdminUrl}/config/");
if (!response.IsSuccessStatusCode)
{
return null;
}
return await response.Content.ReadAsStringAsync();
}
catch (Exception ex)
{
_logger.LogError(ex, "Error fetching raw Caddy config");
return null;
}
}
}

232
Services/ForgejoService.cs Normal file
View file

@ -0,0 +1,232 @@
using Dapper;
using Npgsql;
using PlanTempusAdmin.Models;
namespace PlanTempusAdmin.Services;
public class ForgejoService
{
private readonly string _connectionString;
private readonly ILogger<ForgejoService> _logger;
static ForgejoService()
{
DefaultTypeMap.MatchNamesWithUnderscores = true;
}
public ForgejoService(IConfiguration configuration, ILogger<ForgejoService> logger)
{
_connectionString = configuration.GetConnectionString("ForgejoDb")
?? throw new InvalidOperationException("ForgejoDb connection string not configured");
_logger = logger;
}
public async Task<bool> TestConnectionAsync()
{
try
{
await using var connection = new NpgsqlConnection(_connectionString);
await connection.OpenAsync();
return true;
}
catch (Exception ex)
{
_logger.LogWarning(ex, "Could not connect to Forgejo database");
return false;
}
}
public async Task<ForgejoDashboard> GetDashboardAsync()
{
var dashboard = new ForgejoDashboard();
try
{
await using var connection = new NpgsqlConnection(_connectionString);
// Repository stats
var repoStats = await connection.QuerySingleOrDefaultAsync<dynamic>(@"
SELECT
COUNT(*)::int as total,
COUNT(*) FILTER (WHERE NOT is_private)::int as public_repos,
COUNT(*) FILTER (WHERE is_private)::int as private_repos,
COUNT(*) FILTER (WHERE is_fork)::int as forked,
COUNT(*) FILTER (WHERE is_archived)::int as archived,
COUNT(*) FILTER (WHERE is_mirror)::int as mirrors,
COALESCE(SUM(size), 0) as total_size,
COALESCE(SUM(num_stars), 0)::int as total_stars,
COALESCE(SUM(num_forks), 0)::int as total_forks,
COALESCE(SUM(num_issues - num_closed_issues), 0)::int as open_issues,
COALESCE(SUM(num_pulls - num_closed_pulls), 0)::int as open_prs
FROM repository");
if (repoStats != null)
{
dashboard.TotalRepos = (int)repoStats.total;
dashboard.PublicRepos = (int)repoStats.public_repos;
dashboard.PrivateRepos = (int)repoStats.private_repos;
dashboard.ForkedRepos = (int)repoStats.forked;
dashboard.ArchivedRepos = (int)repoStats.archived;
dashboard.MirrorRepos = (int)repoStats.mirrors;
dashboard.TotalSize = (long)repoStats.total_size;
dashboard.TotalStars = (int)repoStats.total_stars;
dashboard.TotalForks = (int)repoStats.total_forks;
dashboard.TotalOpenIssues = (int)repoStats.open_issues;
dashboard.TotalOpenPRs = (int)repoStats.open_prs;
}
// Actions stats
var actionStats = await connection.QuerySingleOrDefaultAsync<dynamic>(@"
SELECT
COUNT(*)::int as total,
COUNT(*) FILTER (WHERE TO_TIMESTAMP(created) >= NOW() - INTERVAL '1 day')::int as today,
COUNT(*) FILTER (WHERE TO_TIMESTAMP(created) >= NOW() - INTERVAL '7 days')::int as this_week,
COUNT(*) FILTER (WHERE status = 3)::int as successful,
COUNT(*) FILTER (WHERE status = 4)::int as failed,
COUNT(*) FILTER (WHERE status = 2)::int as running
FROM action_run");
if (actionStats != null)
{
dashboard.TotalRuns = (int)actionStats.total;
dashboard.RunsToday = (int)actionStats.today;
dashboard.RunsThisWeek = (int)actionStats.this_week;
dashboard.SuccessfulRuns = (int)actionStats.successful;
dashboard.FailedRunsCount = (int)actionStats.failed;
dashboard.RunningNow = (int)actionStats.running;
}
// Recently updated repos
dashboard.RecentlyUpdated = await GetRepositoriesAsync(connection, "ORDER BY r.updated_unix DESC LIMIT 5");
// Largest repos
dashboard.LargestRepos = await GetRepositoriesAsync(connection, "ORDER BY r.size DESC LIMIT 5");
// Recent action runs
dashboard.RecentRuns = await GetActionRunsAsync(connection, "ORDER BY ar.created DESC LIMIT 10");
// Failed runs
dashboard.FailedRuns = await GetActionRunsAsync(connection, "WHERE ar.status = 4 ORDER BY ar.created DESC LIMIT 5");
// Running now
dashboard.RunningRuns = await GetActionRunsAsync(connection, "WHERE ar.status = 2 ORDER BY ar.started LIMIT 10");
}
catch (Exception ex)
{
_logger.LogError(ex, "Error fetching Forgejo dashboard");
}
return dashboard;
}
public async Task<List<ForgejoRepository>> GetAllRepositoriesAsync()
{
try
{
await using var connection = new NpgsqlConnection(_connectionString);
return await GetRepositoriesAsync(connection, "ORDER BY LOWER(u.name), LOWER(r.name)");
}
catch (Exception ex)
{
_logger.LogError(ex, "Error fetching repositories");
return new List<ForgejoRepository>();
}
}
public async Task<List<ForgejoActionRun>> GetAllActionRunsAsync(int limit = 100)
{
try
{
await using var connection = new NpgsqlConnection(_connectionString);
return await GetActionRunsAsync(connection, $"ORDER BY ar.created DESC LIMIT {limit}");
}
catch (Exception ex)
{
_logger.LogError(ex, "Error fetching action runs");
return new List<ForgejoActionRun>();
}
}
public async Task<List<ForgejoActionStats>> GetActionStatsAsync()
{
try
{
await using var connection = new NpgsqlConnection(_connectionString);
var stats = await connection.QueryAsync<ForgejoActionStats>(@"
SELECT
ar.workflow_id,
r.name as repo_name,
COUNT(*)::int as total_runs,
COUNT(*) FILTER (WHERE ar.status = 3)::int as successful,
COUNT(*) FILTER (WHERE ar.status = 4)::int as failed,
TO_TIMESTAMP(MAX(ar.created)) as last_run,
AVG(ar.stopped - ar.started) FILTER (WHERE ar.stopped > 0 AND ar.started > 0) as avg_duration_seconds
FROM action_run ar
JOIN repository r ON ar.repo_id = r.id
GROUP BY ar.workflow_id, r.name
ORDER BY total_runs DESC");
return stats.ToList();
}
catch (Exception ex)
{
_logger.LogError(ex, "Error fetching action stats");
return new List<ForgejoActionStats>();
}
}
private async Task<List<ForgejoRepository>> GetRepositoriesAsync(NpgsqlConnection connection, string orderClause)
{
var repos = await connection.QueryAsync<ForgejoRepository>($@"
SELECT
r.id,
u.name as owner_name,
r.name,
r.description,
r.is_private,
r.is_fork,
r.is_archived,
r.is_mirror,
r.num_stars,
r.num_forks,
r.num_watches,
r.num_issues,
r.num_closed_issues,
r.num_pulls,
r.num_closed_pulls,
r.size,
TO_TIMESTAMP(r.created_unix) as created_at,
TO_TIMESTAMP(r.updated_unix) as updated_at
FROM repository r
JOIN ""user"" u ON r.owner_id = u.id
{orderClause}");
return repos.ToList();
}
private async Task<List<ForgejoActionRun>> GetActionRunsAsync(NpgsqlConnection connection, string whereOrderClause)
{
var runs = await connection.QueryAsync<ForgejoActionRun>($@"
SELECT
ar.id,
ar.repo_id,
r.name as repo_name,
u.name as owner_name,
ar.workflow_id,
ar.""index"",
COALESCE(tu.name, '') as trigger_user,
ar.ref,
ar.commit_sha,
ar.event,
ar.title,
ar.status,
CASE WHEN ar.started > 0 THEN TO_TIMESTAMP(ar.started) ELSE NULL END as started,
CASE WHEN ar.stopped > 0 THEN TO_TIMESTAMP(ar.stopped) ELSE NULL END as stopped,
TO_TIMESTAMP(ar.created) as created,
TO_TIMESTAMP(ar.updated) as updated
FROM action_run ar
JOIN repository r ON ar.repo_id = r.id
JOIN ""user"" u ON r.owner_id = u.id
LEFT JOIN ""user"" tu ON ar.trigger_user_id = tu.id
{whereOrderClause}");
return runs.ToList();
}
}