sql query optimization, add server cache hard limit, try catch separate around every block

This commit is contained in:
Stanley Dimant
2022-08-16 12:11:36 +02:00
parent 1bd37ffe70
commit e83d668724
5 changed files with 91 additions and 39 deletions

View File

@@ -46,13 +46,13 @@ namespace MareSynchronosServer
filesOlderThanDays = 7; filesOlderThanDays = 7;
} }
using var scope = _services.CreateScope();
using var dbContext = scope.ServiceProvider.GetService<MareDbContext>()!;
_logger.LogInformation($"Cleaning up files older than {filesOlderThanDays} days"); _logger.LogInformation($"Cleaning up files older than {filesOlderThanDays} days");
try try
{ {
using var scope = _services.CreateScope();
using var dbContext = scope.ServiceProvider.GetService<MareDbContext>()!;
var prevTime = DateTime.Now.Subtract(TimeSpan.FromDays(filesOlderThanDays)); var prevTime = DateTime.Now.Subtract(TimeSpan.FromDays(filesOlderThanDays));
var allFiles = dbContext.Files.Where(f => f.Uploaded).ToList(); var allFiles = dbContext.Files.Where(f => f.Uploaded).ToList();
@@ -70,10 +70,47 @@ namespace MareSynchronosServer
MareMetrics.FilesTotalSize.Dec(fi.Length); MareMetrics.FilesTotalSize.Dec(fi.Length);
_logger.LogInformation("File outdated: " + fileName); _logger.LogInformation("File outdated: " + fileName);
dbContext.Files.Remove(file); dbContext.Files.Remove(file);
File.Delete(fileName); fi.Delete();
} }
} }
}
catch (Exception ex)
{
_logger.LogWarning(ex, "Error during file cleanup");
}
var cacheSizeLimitInGiB = _configuration.GetValue<double>("CacheSizeHardLimitInGiB", -1);
try
{
if (cacheSizeLimitInGiB > 0)
{
_logger.LogInformation("Cleaning up files beyond the cache size limit");
var allLocalFiles = Directory.EnumerateFiles(_configuration["CacheDirectory"]).Select(f => new FileInfo(f)).ToList().OrderBy(f => f.LastAccessTimeUtc).ToList();
var totalCacheSizeInBytes = allLocalFiles.Sum(s => s.Length);
long cacheSizeLimitInBytes = (long)(cacheSizeLimitInGiB * 1024 * 1024 * 1024);
HashSet<string> removedHashes = new();
while (totalCacheSizeInBytes > cacheSizeLimitInBytes && allLocalFiles.Any())
{
var oldestFile = allLocalFiles.First();
removedHashes.Add(oldestFile.Name.ToLower());
allLocalFiles.Remove(oldestFile);
totalCacheSizeInBytes -= oldestFile.Length;
MareMetrics.FilesTotal.Dec();
MareMetrics.FilesTotalSize.Dec(oldestFile.Length);
oldestFile.Delete();
}
dbContext.Files.RemoveRange(dbContext.Files.Where(f => removedHashes.Contains(f.Hash.ToLower())));
}
}
catch (Exception ex)
{
_logger.LogWarning(ex, "Error during cache size limit cleanup");
}
try
{
_logger.LogInformation($"Cleaning up expired lodestone authentications"); _logger.LogInformation($"Cleaning up expired lodestone authentications");
var lodestoneAuths = dbContext.LodeStoneAuth.Include(u => u.User).Where(a => a.StartedAt != null).ToList(); var lodestoneAuths = dbContext.LodeStoneAuth.Include(u => u.User).Where(a => a.StartedAt != null).ToList();
List<LodeStoneAuth> expiredAuths = new List<LodeStoneAuth>(); List<LodeStoneAuth> expiredAuths = new List<LodeStoneAuth>();
@@ -85,10 +122,16 @@ namespace MareSynchronosServer
} }
} }
dbContext.RemoveRange(expiredAuths);
dbContext.RemoveRange(expiredAuths.Select(a => a.User)); dbContext.RemoveRange(expiredAuths.Select(a => a.User));
dbContext.RemoveRange(expiredAuths);
}
catch (Exception ex)
{
_logger.LogWarning(ex, "Error during expired auths cleanup");
}
try
{
if (!bool.TryParse(_configuration["PurgeUnusedAccounts"], out var purgeUnusedAccounts)) if (!bool.TryParse(_configuration["PurgeUnusedAccounts"], out var purgeUnusedAccounts))
{ {
purgeUnusedAccounts = false; purgeUnusedAccounts = false;
@@ -121,16 +164,18 @@ namespace MareSynchronosServer
} }
_logger.LogInformation("Cleaning up unauthorized users"); _logger.LogInformation("Cleaning up unauthorized users");
}
catch (Exception ex)
{
_logger.LogWarning(ex, "Error during user purge");
}
SecretKeyAuthenticationHandler.ClearUnauthorizedUsers(); SecretKeyAuthenticationHandler.ClearUnauthorizedUsers();
_logger.LogInformation($"Cleanup complete"); _logger.LogInformation($"Cleanup complete");
dbContext.SaveChanges(); dbContext.SaveChanges();
} }
catch
{
}
}
public static void PurgeUser(User user, MareDbContext dbContext, IConfiguration _configuration) public static void PurgeUser(User user, MareDbContext dbContext, IConfiguration _configuration)
{ {

View File

@@ -106,20 +106,21 @@ namespace MareSynchronosServer.Hubs
{ {
var userSentHashes = new HashSet<string>(fileListHashes.Distinct()); var userSentHashes = new HashSet<string>(fileListHashes.Distinct());
_logger.LogInformation($"User {AuthenticatedUserId} sending files: {userSentHashes.Count}"); _logger.LogInformation($"User {AuthenticatedUserId} sending files: {userSentHashes.Count}");
var coveredFiles = new Dictionary<string, UploadFileDto>(); var notCoveredFiles = new Dictionary<string, UploadFileDto>();
// Todo: Check if a select can directly transform to hashset // Todo: Check if a select can directly transform to hashset
var forbiddenFiles = await _dbContext.ForbiddenUploadEntries.AsNoTracking().Where(f => userSentHashes.Contains(f.Hash)).ToDictionaryAsync(f => f.Hash, f => f); var forbiddenFiles = await _dbContext.ForbiddenUploadEntries.AsNoTracking().Where(f => userSentHashes.Contains(f.Hash)).ToDictionaryAsync(f => f.Hash, f => f);
var existingFiles = await _dbContext.Files.AsNoTracking().Where(f => userSentHashes.Contains(f.Hash)).ToDictionaryAsync(f => f.Hash, f => f); var existingFiles = await _dbContext.Files.AsNoTracking().Where(f => userSentHashes.Contains(f.Hash)).ToDictionaryAsync(f => f.Hash, f => f);
var uploader = await _dbContext.Users.SingleAsync(u => u.UID == AuthenticatedUserId); var uploader = await _dbContext.Users.SingleAsync(u => u.UID == AuthenticatedUserId);
List<FileCache> fileCachesToUpload = new();
foreach (var file in userSentHashes) foreach (var file in userSentHashes)
{ {
// Skip empty file hashes, duplicate file hashes, forbidden file hashes and existing file hashes // Skip empty file hashes, duplicate file hashes, forbidden file hashes and existing file hashes
if (string.IsNullOrEmpty(file)) { continue; } if (string.IsNullOrEmpty(file)) { continue; }
if (coveredFiles.ContainsKey(file)) { continue; } if (notCoveredFiles.ContainsKey(file)) { continue; }
if (forbiddenFiles.ContainsKey(file)) if (forbiddenFiles.ContainsKey(file))
{ {
coveredFiles[file] = new UploadFileDto() notCoveredFiles[file] = new UploadFileDto()
{ {
ForbiddenBy = forbiddenFiles[file].ForbiddenBy, ForbiddenBy = forbiddenFiles[file].ForbiddenBy,
Hash = file, Hash = file,
@@ -132,21 +133,22 @@ namespace MareSynchronosServer.Hubs
_logger.LogInformation("User " + AuthenticatedUserId + " needs upload: " + file); _logger.LogInformation("User " + AuthenticatedUserId + " needs upload: " + file);
var userId = AuthenticatedUserId; var userId = AuthenticatedUserId;
await _dbContext.Files.AddAsync(new FileCache() fileCachesToUpload.Add(new FileCache()
{ {
Hash = file, Hash = file,
Uploaded = false, Uploaded = false,
Uploader = uploader Uploader = uploader
}); });
coveredFiles[file] = new UploadFileDto() notCoveredFiles[file] = new UploadFileDto()
{ {
Hash = file, Hash = file,
}; };
} }
//Save bulk //Save bulk
await _dbContext.Files.AddRangeAsync(fileCachesToUpload);
await _dbContext.SaveChangesAsync(); await _dbContext.SaveChangesAsync();
return coveredFiles.Values.ToList(); return notCoveredFiles.Values.ToList();
} }
[Authorize(AuthenticationSchemes = SecretKeyAuthenticationHandler.AuthScheme)] [Authorize(AuthenticationSchemes = SecretKeyAuthenticationHandler.AuthScheme)]

View File

@@ -118,25 +118,33 @@ namespace MareSynchronosServer.Hubs
_logger.LogInformation("User " + AuthenticatedUserId + " pushing character data to " + visibleCharacterIds.Count + " visible clients"); _logger.LogInformation("User " + AuthenticatedUserId + " pushing character data to " + visibleCharacterIds.Count + " visible clients");
var user = await GetAuthenticatedUserUntrackedAsync(); var user = await GetAuthenticatedUserUntrackedAsync();
var senderPairedUsers = await _dbContext.ClientPairs.AsNoTracking()
.Include(w => w.User)
.Include(w => w.OtherUser)
.Where(w => w.User.UID == user.UID && !w.IsPaused
&& visibleCharacterIds.Contains(w.OtherUser.CharacterIdentification))
.Select(u => u.OtherUser).ToListAsync();
foreach (var pairedUser in senderPairedUsers) var query =
from userToOther in _dbContext.ClientPairs
join otherToUser in _dbContext.ClientPairs
on new
{ {
var isPaused = (await _dbContext.ClientPairs.AsNoTracking() user = userToOther.UserUID,
.FirstOrDefaultAsync(w => other = userToOther.OtherUserUID
w.User.UID == pairedUser.UID && w.OtherUser.UID == user.UID))?.IsPaused ?? true;
if (isPaused) continue; } equals new
await Clients.User(pairedUser.UID).SendAsync(Api.OnUserReceiveCharacterData, characterCache, {
user.CharacterIdentification); user = otherToUser.OtherUserUID,
other = otherToUser.UserUID
} }
where
userToOther.UserUID == user.UID
&& !userToOther.IsPaused
&& !otherToUser.IsPaused
&& visibleCharacterIds.Contains(userToOther.OtherUser.CharacterIdentification)
select otherToUser.UserUID;
var otherEntries = await query.ToListAsync();
await Clients.Users(otherEntries).SendAsync(Api.OnUserReceiveCharacterData, characterCache, user.CharacterIdentification);
MareMetrics.UserPushData.Inc(); MareMetrics.UserPushData.Inc();
MareMetrics.UserPushDataTo.Inc(visibleCharacterIds.Count); MareMetrics.UserPushDataTo.Inc(otherEntries.Count);
} }
[Authorize(AuthenticationSchemes = SecretKeyAuthenticationHandler.AuthScheme)] [Authorize(AuthenticationSchemes = SecretKeyAuthenticationHandler.AuthScheme)]

View File

@@ -101,13 +101,9 @@ namespace MareSynchronosServer
app.UseHttpLogging(); app.UseHttpLogging();
app.UseRouting(); app.UseRouting();
var webSocketOptions = new WebSocketOptions
{
KeepAliveInterval = TimeSpan.FromSeconds(10),
};
app.UseHttpMetrics(); app.UseHttpMetrics();
app.UseWebSockets(webSocketOptions); app.UseWebSockets();
app.UseAuthentication(); app.UseAuthentication();
app.UseAuthorization(); app.UseAuthorization();

View File

@@ -31,6 +31,7 @@
"PurgeUnusedAccounts": true, "PurgeUnusedAccounts": true,
"PurgeUnusedAccountsPeriodInDays": 14, "PurgeUnusedAccountsPeriodInDays": 14,
"CacheDirectory": "G:\\ServerTest", // do not delete this key and set it to the path where the files will be stored "CacheDirectory": "G:\\ServerTest", // do not delete this key and set it to the path where the files will be stored
"CacheSizeHardLimitInGiB": -1,
"AllowedHosts": "*", "AllowedHosts": "*",
"Kestrel": { "Kestrel": {
"Endpoints": { "Endpoints": {