mirror of
https://github.com/s-frick/effigenix.git
synced 2026-03-28 15:59:35 +01:00
fix(production): N+1-Query in traceForward durch Level-by-Level BFS ersetzen
traceForward aus BatchTraceabilityService in BatchRepository verschoben. Statt einer Query pro Knoten (N+1) wird jetzt eine IN(:parentIds)-Query pro Tiefenebene ausgeführt (max. maxDepth Queries statt N).
This commit is contained in:
parent
ddb674d618
commit
8948103957
6 changed files with 130 additions and 174 deletions
|
|
@ -31,5 +31,7 @@ public interface BatchRepository {
|
|||
|
||||
Result<RepositoryError, List<Batch>> findByInputBatchId(BatchId inputBatchId);
|
||||
|
||||
Result<RepositoryError, List<TracedBatch>> traceForward(BatchId startBatchId, int maxDepth);
|
||||
|
||||
Result<RepositoryError, Void> save(Batch batch);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -2,11 +2,7 @@ package de.effigenix.domain.production;
|
|||
|
||||
import de.effigenix.shared.common.Result;
|
||||
|
||||
import java.util.ArrayDeque;
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
|
||||
public class BatchTraceabilityService {
|
||||
|
||||
|
|
@ -33,35 +29,15 @@ public class BatchTraceabilityService {
|
|||
}
|
||||
}
|
||||
|
||||
List<TracedBatch> result = new ArrayList<>();
|
||||
Set<String> visited = new HashSet<>();
|
||||
visited.add(startBatchId.value());
|
||||
|
||||
record BfsEntry(BatchId batchId, int depth) {}
|
||||
var queue = new ArrayDeque<BfsEntry>();
|
||||
queue.add(new BfsEntry(startBatchId, 0));
|
||||
|
||||
while (!queue.isEmpty()) {
|
||||
var entry = queue.poll();
|
||||
if (entry.depth() >= maxDepth) {
|
||||
continue;
|
||||
}
|
||||
|
||||
switch (batchRepository.findByInputBatchId(entry.batchId())) {
|
||||
case Result.Failure(var err) ->
|
||||
{ return Result.failure(new BatchError.RepositoryFailure(err.message())); }
|
||||
case Result.Success(var children) -> {
|
||||
int childDepth = entry.depth() + 1;
|
||||
for (Batch child : children) {
|
||||
if (visited.add(child.id().value())) {
|
||||
result.add(new TracedBatch(child, childDepth));
|
||||
queue.add(new BfsEntry(child.id(), childDepth));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if (maxDepth <= 0) {
|
||||
return Result.success(List.of());
|
||||
}
|
||||
|
||||
return Result.success(result);
|
||||
return switch (batchRepository.traceForward(startBatchId, maxDepth)) {
|
||||
case Result.Failure(var err) ->
|
||||
Result.failure(new BatchError.RepositoryFailure(err.message()));
|
||||
case Result.Success(var traced) ->
|
||||
Result.success(traced);
|
||||
};
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -17,6 +17,8 @@ import java.sql.ResultSet;
|
|||
import java.sql.SQLException;
|
||||
import java.time.LocalDate;
|
||||
import java.time.OffsetDateTime;
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Optional;
|
||||
|
||||
|
|
@ -210,6 +212,44 @@ public class JdbcBatchRepository implements BatchRepository {
|
|||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Result<RepositoryError, List<TracedBatch>> traceForward(BatchId startBatchId, int maxDepth) {
|
||||
try {
|
||||
String startId = startBatchId.value();
|
||||
var results = new ArrayList<TracedBatch>();
|
||||
var visited = new HashSet<String>();
|
||||
visited.add(startId);
|
||||
|
||||
var currentLevel = List.of(startId);
|
||||
for (int depth = 1; depth <= maxDepth && !currentLevel.isEmpty(); depth++) {
|
||||
int currentDepth = depth;
|
||||
var children = jdbc.sql("""
|
||||
SELECT DISTINCT b.*
|
||||
FROM batches b
|
||||
JOIN batch_consumptions bc ON b.id = bc.batch_id
|
||||
WHERE bc.input_batch_id IN (:parentIds)
|
||||
""")
|
||||
.param("parentIds", currentLevel)
|
||||
.query(this::mapBatchRow)
|
||||
.list();
|
||||
|
||||
var nextLevel = new ArrayList<String>();
|
||||
for (Batch child : children) {
|
||||
if (visited.add(child.id().value())) {
|
||||
results.add(new TracedBatch(child, currentDepth));
|
||||
nextLevel.add(child.id().value());
|
||||
}
|
||||
}
|
||||
currentLevel = nextLevel;
|
||||
}
|
||||
|
||||
return Result.success(results);
|
||||
} catch (Exception e) {
|
||||
logger.trace("Database error in traceForward", e);
|
||||
return Result.failure(new RepositoryError.DatabaseError(e.getMessage()));
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Result<RepositoryError, Void> save(Batch batch) {
|
||||
try {
|
||||
|
|
@ -294,7 +334,7 @@ public class JdbcBatchRepository implements BatchRepository {
|
|||
.query((rs, rowNum) -> rs.getString("id"))
|
||||
.list();
|
||||
|
||||
var existingIdSet = new java.util.HashSet<>(existingIds);
|
||||
var existingIdSet = new HashSet<>(existingIds);
|
||||
|
||||
for (Consumption c : batch.consumptions()) {
|
||||
if (!existingIdSet.contains(c.id().value())) {
|
||||
|
|
|
|||
|
|
@ -6,6 +6,7 @@ import de.effigenix.domain.production.BatchNumber;
|
|||
import de.effigenix.domain.production.BatchRepository;
|
||||
import de.effigenix.domain.production.BatchStatus;
|
||||
import de.effigenix.domain.production.RecipeId;
|
||||
import de.effigenix.domain.production.TracedBatch;
|
||||
import de.effigenix.shared.common.RepositoryError;
|
||||
import de.effigenix.shared.common.Result;
|
||||
import org.springframework.context.annotation.Profile;
|
||||
|
|
@ -77,6 +78,11 @@ public class StubBatchRepository implements BatchRepository {
|
|||
return Result.failure(STUB_ERROR);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Result<RepositoryError, List<TracedBatch>> traceForward(BatchId startBatchId, int maxDepth) {
|
||||
return Result.failure(STUB_ERROR);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Result<RepositoryError, Void> save(Batch batch) {
|
||||
return Result.failure(STUB_ERROR);
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue