give me the proper end to end working code for thi...
Créé le : 17 juillet 2025
Créé le : 17 juillet 2025
give me the proper end to end working code for this usecase
-given code base i already have and we need to integrate the given usecase with this code
-give me proper end to end code code for this..
and everything i can copy paste and workable
give me all things like how to sentup and which technologies which dependancies i need to add and each and every detail i want
compatible with the .net 9 and c#
using Microsoft.Extensions.Configuration;
using Microsoft.Extensions.DependencyInjection;
using Microsoft.Extensions.Hosting;
using Microsoft.Extensions.Logging;
using TestLogAnalyzer.Services;
namespace TestLogAnalyzer
{
class Program
{
static async Task Main(string[] args)
{
// Build configuration
var configuration = new ConfigurationBuilder()
.SetBasePath(Directory.GetCurrentDirectory())
.AddJsonFile("appsettings.json", optional: false, reloadOnChange: true)
.Build();
text// Setup dependency injection var host = Host.CreateDefaultBuilder(args) .ConfigureServices((context, services) => { services.AddSingleton<IConfiguration>(configuration); services.AddScoped<IDatabaseService, DatabaseService>(); services.AddScoped<IOllamaService, OllamaService>(); services.AddScoped<ITestLogAnalyzerService, TestLogAnalyzerService>(); services.AddLogging(builder => { builder.AddConsole(); builder.AddDebug(); }); }) .Build(); var logger = host.Services.GetRequiredService<ILogger<Program>>(); var analyzer = host.Services.GetRequiredService<ITestLogAnalyzerService>(); try { logger.LogInformation("Starting Test Log Analyzer..."); // Process all failed test cases await analyzer.ProcessFailedTestCasesAsync(); //await analyzer.ProcessSingleTestCaseAsync("TC001_DatabaseConnection"); logger.LogInformation("Test Log Analysis completed successfully!"); } catch (Exception ex) { logger.LogError(ex, "An error occurred during test log analysis"); } finally { await host.StopAsync(); } } }
}
using Microsoft.Extensions.Configuration;
using Microsoft.Extensions.Logging;
using TestLogAnalyzer.Models;
namespace TestLogAnalyzer.Services
{
public interface ITestLogAnalyzerService
{
Task ProcessFailedTestCasesAsync();
Task<ClassificationResult> ProcessSingleTestCaseAsync(string testCaseId);
}
textpublic class TestLogAnalyzerService : ITestLogAnalyzerService { private readonly IDatabaseService _databaseService; private readonly IOllamaService _ollamaService; private readonly ILogger<TestLogAnalyzerService> _logger; private readonly IConfiguration _configuration; public TestLogAnalyzerService( IDatabaseService databaseService, IOllamaService ollamaService, ILogger<TestLogAnalyzerService> logger, IConfiguration configuration) { _databaseService = databaseService; _ollamaService = ollamaService; _logger = logger; _configuration = configuration; } public async Task ProcessFailedTestCasesAsync() { try { _logger.LogInformation("Starting test log analysis process..."); await ValidateConnectionsAsync(); // Get all unprocessed failed test cases var failedTestCases = await _databaseService.GetUnprocessedFailedTestCasesAsync(); if (!failedTestCases.Any()) { _logger.LogInformation("No unprocessed failed test cases found."); return; } _logger.LogInformation($"Found {failedTestCases.Count} failed test cases to process"); var processedCount = 0; var errorCount = 0; foreach (var testCase in failedTestCases) { try { _logger.LogInformation($"Processing test case: {testCase.TestCaseId}"); // Get historical context var historicalData = await _databaseService.GetHistoricalDataAsync(testCase.TestCaseId); var context = new HistoricalContext { TestCaseId = testCase.TestCaseId, CurrentResult = testCase, PreviousResults = historicalData }; // Classify var classification = await _ollamaService.ClassifyTestFailureAsync(context); // Update DB with results await _databaseService.UpdateClassificationAsync( testCase.Id, classification.Classification, classification.ConfidenceScore); processedCount++; _logger.LogInformation($"✓ {testCase.TestCaseId}: {classification.Classification} ({classification.ConfidenceScore:P})"); await Task.Delay(200); } catch (Exception ex) { errorCount++; _logger.LogError(ex, $"Error processing test case {testCase.TestCaseId}"); } } _logger.LogInformation($"Processing completed: {processedCount} successful, {errorCount} errors"); await LogSummaryAsync(failedTestCases); } catch (Exception ex) { _logger.LogError(ex, "Error in ProcessFailedTestCasesAsync"); throw; } } public async Task<ClassificationResult> ProcessSingleTestCaseAsync(string testCaseId) { try { _logger.LogInformation($"Processing single test case: {testCaseId}"); var failedTestCases = await _databaseService.GetUnprocessedFailedTestCasesAsync(); var testCase = failedTestCases.FirstOrDefault(tc => tc.TestCaseId == testCaseId); if (testCase == null) { throw new ArgumentException($"Test case {testCaseId} not found or not in failed state"); } var historicalData = await _databaseService.GetHistoricalDataAsync(testCaseId); var context = new HistoricalContext { TestCaseId = testCaseId, CurrentResult = testCase, PreviousResults = historicalData }; var classification = await _ollamaService.ClassifyTestFailureAsync(context); await _databaseService.UpdateClassificationAsync( testCase.Id, classification.Classification, classification.ConfidenceScore); _logger.LogInformation($"Single test case processed: {testCaseId} -> {classification.Classification} ({classification.ConfidenceScore:P})"); return classification; } catch (Exception ex) { _logger.LogError(ex, $"Error processing single test case {testCaseId}"); throw; } } private async Task ValidateConnectionsAsync() { _logger.LogInformation("Validating connections..."); var dbConnected = await _databaseService.TestConnectionAsync(); if (!dbConnected) { throw new Exception("Database connection failed"); } var ollamaConnected = await _ollamaService.TestConnectionAsync(); if (!ollamaConnected) { throw new Exception("Ollama connection failed. Make sure Ollama is running locally."); } _logger.LogInformation("All connections validated successfully"); } private async Task LogSummaryAsync(List<TestResult> processedTestCases) { try { var summary = new Dictionary<string, int>(); var confidenceStats = new List<double>(); foreach (var testCase in processedTestCases) { if (!string.IsNullOrEmpty(testCase.Classification)) { summary[testCase.Classification] = summary.GetValueOrDefault(testCase.Classification, 0) + 1; if (testCase.ConfidenceScore.HasValue) { confidenceStats.Add(testCase.ConfidenceScore.Value); } } } _logger.LogInformation("=== ANALYSIS SUMMARY ==="); foreach (var item in summary) { _logger.LogInformation($"{item.Key}: {item.Value} test cases"); } if (confidenceStats.Any()) { var avgConfidence = confidenceStats.Average(); _logger.LogInformation($"Average Confidence Score: {avgConfidence:P2}"); } _logger.LogInformation("======================"); } catch (Exception ex) { _logger.LogWarning(ex, "Error generating summary"); } } }
}
using Microsoft.Extensions.Configuration;
using Microsoft.Extensions.Logging;
using System.Text;
using System.Text.Json;
using TestLogAnalyzer.Models;
namespace TestLogAnalyzer.Services
{
public interface IOllamaService
{
Task<ClassificationResult> ClassifyTestFailureAsync(HistoricalContext context);
Task<bool> TestConnectionAsync();
}
textpublic class OllamaService : IOllamaService { private readonly HttpClient _httpClient; private readonly ILogger<OllamaService> _logger; private readonly string _ollamaUrl; private readonly string _modelName; public OllamaService(IConfiguration configuration, ILogger<OllamaService> logger) { _httpClient = new HttpClient(); _logger = logger; _ollamaUrl = configuration["Ollama:Url"] ?? "http://localhost:11434"; _modelName = configuration["Ollama:ModelName"] ?? "gemma2:2b"; _httpClient.Timeout = TimeSpan.FromMinutes(5); } public async Task<bool> TestConnectionAsync() { try { var response = await _httpClient.GetAsync($"{_ollamaUrl}/api/tags"); var isConnected = response.IsSuccessStatusCode; if (isConnected) _logger.LogInformation("Ollama connection successful"); else _logger.LogWarning("Ollama connection failed"); return isConnected; } catch (Exception ex) { _logger.LogError(ex, "Error testing Ollama connection"); return false; } } public async Task<ClassificationResult> ClassifyTestFailureAsync(HistoricalContext context) { try { var prompt = BuildPrompt(context); _logger.LogDebug($"Sending prompt to Ollama for test case: {context.TestCaseId}"); var request = new OllamaRequest { Model = _modelName, Prompt = prompt, Stream = false, Options = new OllamaOptions { Temperature = 0.1, NumCtx = 4096, NumPredict = 500 } }; var jsonContent = JsonSerializer.Serialize(request); var content = new StringContent(jsonContent, Encoding.UTF8, "application/json"); var response = await _httpClient.PostAsync($"{_ollamaUrl}/api/generate", content); if (!response.IsSuccessStatusCode) { throw new Exception($"Ollama API returned status: {response.StatusCode}"); } var responseContent = await response.Content.ReadAsStringAsync(); var ollamaResponse = JsonSerializer.Deserialize<OllamaResponse>(responseContent); if (ollamaResponse?.Response == null) { throw new Exception("Invalid response from Ollama API"); } var result = ParseClassificationResponse(ollamaResponse.Response); _logger.LogDebug($"Classification result for {context.TestCaseId}: {result.Classification} ({result.ConfidenceScore:P})"); return result; } catch (Exception ex) { _logger.LogError(ex, $"Error classifying test failure for {context.TestCaseId}"); // Return default classification on error return new ClassificationResult { Classification = "Test Code Issue", ConfidenceScore = 0.0, Reasoning = "Error occurred during classification" }; } } private string BuildPrompt(HistoricalContext context) { var sb = new StringBuilder(); sb.AppendLine("You are an expert test automation engineer. Analyze test failures and classify them into exactly one of these categories:"); sb.AppendLine("1. 'Product Issue' - Application/system bugs, functionality problems, environmental issues"); sb.AppendLine("2. 'Test Code Issue' - Test automation problems, flaky tests, test data issues, infrastructure problems"); sb.AppendLine(); sb.AppendLine($"CURRENT TEST CASE: {context.TestCaseId}"); sb.AppendLine($"CURRENT FAILURE: {context.CurrentResult.ExceptionMessage}"); sb.AppendLine(); if (context.PreviousResults.Any()) { sb.AppendLine("HISTORICAL CONTEXT (Last 20 days):"); foreach (var historical in context.PreviousResults.Take(10)) // Limit to prevent prompt overflow { var status = historical.Outcome ? "PASSED" : "FAILED"; sb.AppendLine($"- {historical.DaysAgo} days ago: {status}"); if (!historical.Outcome && !string.IsNullOrEmpty(historical.ExceptionMessage)) { sb.AppendLine($" Exception: {historical.ExceptionMessage.Substring(0, Math.Min(200, historical.ExceptionMessage.Length))}..."); } } sb.AppendLine(); var failureCount = context.PreviousResults.Count(r => !r.Outcome); var successCount = context.PreviousResults.Count(r => r.Outcome); sb.AppendLine($"PATTERN ANALYSIS: {failureCount} failures, {successCount} successes in last 20 days"); } else { sb.AppendLine("HISTORICAL CONTEXT: No previous data available (new test case)"); } sb.AppendLine(); sb.AppendLine("ANALYSIS GUIDELINES:"); sb.AppendLine("- Product Issue: Database errors, API failures, UI element not found, timeout issues, assertion failures on expected behavior"); sb.AppendLine("- Test Code Issue: Test setup problems, test data issues, flaky selectors, race conditions, test infrastructure failures"); sb.AppendLine("- Consider frequency: Consistent failures often indicate product issues, intermittent failures suggest test code issues"); sb.AppendLine("- Consider error messages: Look for patterns in exception types and messages"); sb.AppendLine(); sb.AppendLine("RESPOND IN EXACTLY THIS FORMAT:"); sb.AppendLine("CLASSIFICATION: [Product Issue OR Test Code Issue]"); sb.AppendLine("CONFIDENCE: [0.0 to 1.0]"); sb.AppendLine("REASONING: [Brief explanation of your decision]"); return sb.ToString(); } private ClassificationResult ParseClassificationResponse(string response) { var result = new ClassificationResult { Classification = "Test Code Issue", // Default ConfidenceScore = 0.5, Reasoning = "Could not parse response" }; try { var lines = response.Split('\n', StringSplitOptions.RemoveEmptyEntries); foreach (var line in lines) { if (line.StartsWith("CLASSIFICATION:", StringComparison.OrdinalIgnoreCase)) { var classification = line.Substring("CLASSIFICATION:".Length).Trim(); if (classification.Contains("Product Issue", StringComparison.OrdinalIgnoreCase)) { result.Classification = "Product Issue"; } else if (classification.Contains("Test Code Issue", StringComparison.OrdinalIgnoreCase)) { result.Classification = "Test Code Issue"; } } else if (line.StartsWith("CONFIDENCE:", StringComparison.OrdinalIgnoreCase)) { var confidenceStr = line.Substring("CONFIDENCE:".Length).Trim(); if (double.TryParse(confidenceStr, out double confidence)) { result.ConfidenceScore = Math.Max(0.0, Math.Min(1.0, confidence)); } } else if (line.StartsWith("REASONING:", StringComparison.OrdinalIgnoreCase)) { result.Reasoning = line.Substring("REASONING:".Length).Trim(); } } } catch (Exception ex) { _logger.LogWarning(ex, "Error parsing classification response"); } return result; } public void Dispose() { _httpClient?.Dispose(); } }
}
using System.Data;
using Microsoft.Data.SqlClient;
using Microsoft.Extensions.Configuration;
using Microsoft.Extensions.Logging;
using TestLogAnalyzer.Models;
namespace TestLogAnalyzer.Services
{
public interface IDatabaseService
{
Task<List<TestResult>> GetUnprocessedFailedTestCasesAsync();
Task<List<HistoricalTestResult>> GetHistoricalDataAsync(string testCaseId, int daysBack = 20);
Task UpdateClassificationAsync(int testResultId, string classification, double confidenceScore);
Task<bool> TestConnectionAsync();
}
textpublic class DatabaseService : IDatabaseService { private readonly string _connectionString; private readonly ILogger<DatabaseService> _logger; public DatabaseService(IConfiguration configuration, ILogger<DatabaseService> logger) { _connectionString = configuration.GetConnectionString("DefaultConnection") ?? throw new InvalidOperationException("Database connection string not found"); _logger = logger; } public async Task<bool> TestConnectionAsync() { try { using var connection = new SqlConnection(_connectionString); await connection.OpenAsync(); _logger.LogInformation("Database connection successful"); return true; } catch (Exception ex) { _logger.LogError(ex, "Database connection failed"); return false; } } public async Task<List<TestResult>> GetUnprocessedFailedTestCasesAsync() { var results = new List<TestResult>(); try { using var connection = new SqlConnection(_connectionString); await connection.OpenAsync(); var query = @" SELECT Id, TestCaseId, ExecutionDate, Outcome, ExceptionMessage, Classification, ConfidenceScore, ProcessedDate FROM TestResults WHERE Outcome = 0 AND (Classification IS NULL OR ProcessedDate IS NULL) AND ExecutionDate >= DATEADD(day, -1, GETDATE()) ORDER BY ExecutionDate DESC"; using var command = new SqlCommand(query, connection); using var reader = await command.ExecuteReaderAsync(); while (await reader.ReadAsync()) { results.Add(new TestResult { Id = reader.GetInt32("Id"), TestCaseId = reader.GetString("TestCaseId"), ExecutionDate = reader.GetDateTime("ExecutionDate"), Outcome = reader.GetBoolean("Outcome"), ExceptionMessage = reader.GetString("ExceptionMessage"), Classification = reader.IsDBNull("Classification") ? null : reader.GetString("Classification"), ConfidenceScore = reader.IsDBNull("ConfidenceScore") ? null : reader.GetDouble("ConfidenceScore"), ProcessedDate = reader.IsDBNull("ProcessedDate") ? null : reader.GetDateTime("ProcessedDate") }); } _logger.LogInformation($"Retrieved {results.Count} unprocessed failed test cases"); return results; } catch (Exception ex) { _logger.LogError(ex, "Error retrieving unprocessed failed test cases"); throw; } } public async Task<List<HistoricalTestResult>> GetHistoricalDataAsync(string testCaseId, int daysBack = 20) { var results = new List<HistoricalTestResult>(); try { using var connection = new SqlConnection(_connectionString); await connection.OpenAsync(); var query = @" SELECT ExecutionDate, Outcome, ExceptionMessage, DATEDIFF(day, ExecutionDate, GETDATE()) as DaysAgo FROM TestResults WHERE TestCaseId = @TestCaseId AND ExecutionDate >= DATEADD(day, -@DaysBack, GETDATE()) AND ExecutionDate < DATEADD(day, -1, GETDATE()) ORDER BY ExecutionDate DESC"; using var command = new SqlCommand(query, connection); command.Parameters.AddWithValue("@TestCaseId", testCaseId); command.Parameters.AddWithValue("@DaysBack", daysBack); using var reader = await command.ExecuteReaderAsync(); while (await reader.ReadAsync()) { results.Add(new HistoricalTestResult { ExecutionDate = reader.GetDateTime("ExecutionDate"), Outcome = reader.GetBoolean("Outcome"), //ExceptionMessage = reader.GetString("ExceptionMessage"), ExceptionMessage = reader.IsDBNull("ExceptionMessage") ? null : reader.GetString("ExceptionMessage"), DaysAgo = reader.GetInt32("DaysAgo") }); } _logger.LogDebug($"Retrieved {results.Count} historical records for test case {testCaseId}"); return results; } catch (Exception ex) { _logger.LogError(ex, $"Error retrieving historical data for test case {testCaseId}"); throw; } } public async Task UpdateClassificationAsync(int testResultId, string classification, double confidenceScore) { try { using var connection = new SqlConnection(_connectionString); await connection.OpenAsync(); var query = @" UPDATE TestResults SET Classification = @Classification, ConfidenceScore = @ConfidenceScore, ProcessedDate = GETDATE() WHERE Id = @Id"; using var command = new SqlCommand(query, connection); command.Parameters.AddWithValue("@Id", testResultId); command.Parameters.AddWithValue("@Classification", classification); command.Parameters.AddWithValue("@ConfidenceScore", confidenceScore); await command.ExecuteNonQueryAsync(); _logger.LogDebug($"Updated classification for test result ID {testResultId}"); } catch (Exception ex) { _logger.LogError(ex, $"Error updating classification for test result ID {testResultId}"); throw; } } }
}
{
"ConnectionStrings": {
"DefaultConnection": "Server=XYZ;Database=TestResults;Trusted_Connection=true;TrustServerCertificate=true;"
},
"Ollama": {
"Url": "http://localhost:11434",
"ModelName": "gemma3:latest"
},
"Logging": {
"LogLevel": {
"Default": "Information",
"Microsoft": "Warning",
"Microsoft.Hosting.Lifetime": "Information"
}
},
"Analysis": {
"HistoricalDays": 20,
"BatchSize": 10,
"DelayBetweenRequests": 100
}
}
Based on your existing code, you have:
text┌─────────────────────────────────────────────────────────────────────────────┐ │ RAG-Enhanced Test Log Analyzer │ ├─────────────────────────────────────────────────────────────────────────────┤ │ 1. Data Layer │ │ ├── SQL Server (Primary Storage) │ │ │ ├── TestResults (Enhanced with embedding flags) │ │ │ └── ProcessingLogs (Performance tracking) │ │ └── ChromaDB (Vector Database) │ │ ├── Collections: test_exceptions, historical_patterns │ │ └── Metadata: testcase_id, date, classification, confidence │ ├─────────────────────────────────────────────────────────────────────────────┤ │ 2. AI/ML Layer │ │ ├── Ollama Models (Local) │ │ │ ├── nomic-embed-text (384-dim embeddings) │ │ │ └── gemma3:latest (classification with enhanced context) │ │ └── RAG Pipeline │ │ ├── Embedding Generation Service │ │ ├── Semantic Search Service │ │ └── Context Augmentation Service │ ├─────────────────────────────────────────────────────────────────────────────┤ │ 3. Enhanced Services Layer │ │ ├── DatabaseService (Enhanced) │ │ ├── ChromaService (NEW) │ │ ├── EmbeddingService (NEW) │ │ ├── RAGService (NEW) │ │ ├── OllamaService (Enhanced) │ │ └── TestLogAnalyzerService (Enhanced) │ └─────────────────────────────────────────────────────────────────────────────┘
text┌──────────────────────────────────────────────────────────────────────────────┐ │ RAG Workflow │ ├──────────────────────────────────────────────────────────────────────────────┤ │ Step 1: Data Ingestion & Preparation │ │ ├── Fetch failed test cases from SQL Server │ │ ├── Check if embeddings exist for current exceptions │ │ └── Generate embeddings for new exception messages │ ├──────────────────────────────────────────────────────────────────────────────┤ │ Step 2: Semantic Retrieval (No Time Limit) │ │ ├── Query ChromaDB with current exception embedding │ │ ├── Retrieve top-K most similar historical cases │ │ ├── Filter by metadata (testcase_id, confidence_score) │ │ └── Rank by semantic similarity + temporal relevance │ ├──────────────────────────────────────────────────────────────────────────────┤ │ Step 3: Context Augmentation │ │ ├── Combine current exception with retrieved similar cases │ │ ├── Include classification patterns from similar failures │ │ ├── Add temporal context (recent vs old patterns) │ │ └── Optimize context for model token limits │ ├──────────────────────────────────────────────────────────────────────────────┤ │ Step 4: Enhanced Classification │ │ ├── Send enriched context to Ollama Gemma3 │ │ ├── Receive classification + confidence + reasoning │ │ ├── Post-process results with similarity-based confidence boost │ │ └── Update both SQL Server and ChromaDB with results │ └──────────────────────────────────────────────────────────────────────────────┘
xml<PackageReference Include="System.Numerics.Vectors" Version="4.5.0" /> <PackageReference Include="Microsoft.ML.OnnxRuntime" Version="1.16.0" /> <PackageReference Include="Newtonsoft.Json" Version="13.0.3" />
sql-- Enhanced TestResults table ALTER TABLE TestResults ADD COLUMN EmbeddingGenerated BIT DEFAULT 0; ALTER TABLE TestResults ADD COLUMN VectorId NVARCHAR(100); -- ChromaDB reference ALTER TABLE TestResults ADD COLUMN SimilarityScore FLOAT; ALTER TABLE TestResults ADD COLUMN RetrievedContextCount INT; -- Performance tracking table CREATE TABLE ProcessingMetrics ( Id INT IDENTITY(1,1) PRIMARY KEY, TestCaseId NVARCHAR(255), ProcessingStartTime DATETIME2, ProcessingEndTime DATETIME2, EmbeddingGenerationTime INT, -- milliseconds SemanticSearchTime INT, ClassificationTime INT, TotalProcessingTime INT, RetrievedSimilarCases INT, ConfidenceScore FLOAT, CreatedDate DATETIME2 DEFAULT GETDATE() );
textTestLogAnalyzer/ ├── src/ │ ├── Core/ │ │ ├── Models/ │ │ │ ├── TestResult.cs (Enhanced) │ │ │ ├── EmbeddingModel.cs (NEW) │ │ │ ├── SimilarityResult.cs (NEW) │ │ │ ├── RAGContext.cs (NEW) │ │ │ └── ProcessingMetrics.cs (NEW) │ │ ├── Services/ │ │ │ ├── DatabaseService.cs (Enhanced) │ │ │ ├── ChromaService.cs (NEW) │ │ │ ├── EmbeddingService.cs (NEW) │ │ │ ├── RAGService.cs (NEW) │ │ │ ├── OllamaService.cs (Enhanced) │ │ │ └── TestLogAnalyzerService.cs (Enhanced) │ │ └── Interfaces/ │ │ ├── IChromaService.cs (NEW) │ │ ├── IEmbeddingService.cs (NEW) │ │ └── IRAGService.cs (NEW) │ ├── Infrastructure/ │ │ ├── Configuration/ │ │ │ └── RAGConfiguration.cs (NEW) │ │ └── Utils/ │ │ ├── VectorOperations.cs (NEW) │ │ └── PerformanceTracker.cs (NEW) ├── vector-db/ │ ├── chroma-data/ (ChromaDB storage) │ └── embeddings-cache/ (Backup storage) ├── config/ │ └── appsettings.json (Enhanced) └── Program.cs (Enhanced)
json{ "ConnectionStrings": { "DefaultConnection": "Server=XYZ;Database=TestResults;Trusted_Connection=true;TrustServerCertificate=true;" }, "Ollama": { "Url": "http://localhost:11434", "ClassificationModel": "gemma3:latest", "EmbeddingModel": "nomic-embed-text" }, "ChromaDB": { "Url": "http://localhost:8000", "Collections": { "TestExceptions": "test_exceptions", "HistoricalPatterns": "historical_patterns" } }, "RAG": { "EmbeddingDimension": 384, "MaxSimilarCases": 10, "SimilarityThreshold": 0.7, "ContextWindowSize": 3000, "BatchSize": 50, "EnablePerformanceTracking": true }, "Performance": { "ParallelProcessing": true, "MaxConcurrentTasks": 5, "EmbeddingCacheSize": 10000, "SearchTimeoutMs": 5000 } }
This RAG implementation will remove the 20-day limitation and provide semantic search across your entire historical dataset, significantly improving both accuracy and processing efficiency.