Skip to content

Commit 9fe5cc2

Browse files
committed
feat: support configuring chat context in AI assistant
This is important for reports with many apps, or many checks. The model limits can be easily hit.
1 parent 3865a74 commit 9fe5cc2

File tree

10 files changed

+562
-321
lines changed

10 files changed

+562
-321
lines changed

report-app/report-server.ts

Lines changed: 32 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,14 @@ import {fileURLToPath} from 'node:url';
1010
import {chatWithReportAI} from '../runner/reporting/report-ai-chat';
1111
import {convertV2ReportToV3Report} from '../runner/reporting/migrations/v2_to_v3';
1212
import {FetchedLocalReports, fetchReportsFromDisk} from '../runner/reporting/report-local-disk';
13-
import {AiChatRequest, AIConfigState, RunInfo} from '../runner/shared-interfaces';
13+
import {
14+
AiChatRequest,
15+
AIConfigState,
16+
AssessmentResultFromReportServer,
17+
IndividualAssessmentState,
18+
RunInfo,
19+
RunInfoFromReportServer,
20+
} from '../runner/shared-interfaces';
1421

1522
// This will result in a lot of loading and would slow down the serving,
1623
// so it's loaded lazily below.
@@ -41,7 +48,7 @@ app.get('/api/reports', async (_, res) => {
4148
res.json(results);
4249
});
4350

44-
async function fetchAndMigrateReports(id: string): Promise<RunInfo[] | null> {
51+
async function fetchAndMigrateReports(id: string): Promise<RunInfoFromReportServer[] | null> {
4552
const localData = await resolveLocalData(options.reportsRoot);
4653
let result: RunInfo[] | null = null;
4754

@@ -55,8 +62,23 @@ async function fetchAndMigrateReports(id: string): Promise<RunInfo[] | null> {
5562
return null;
5663
}
5764

58-
// Convert potential older v2 reports.
59-
return result.map(r => convertV2ReportToV3Report(r));
65+
let checkID = 0;
66+
return result.map(run => {
67+
const newRun = {
68+
// Convert potential older v2 reports.
69+
...convertV2ReportToV3Report(run),
70+
// Augment the `RunInfo` to include IDs for individual apps.
71+
// This is useful for the AI chat and context filtering.
72+
results: run.results.map(
73+
check =>
74+
({
75+
id: `${id}-${checkID++}`,
76+
...check,
77+
}) satisfies AssessmentResultFromReportServer,
78+
),
79+
};
80+
return newRun satisfies RunInfoFromReportServer;
81+
});
6082
}
6183

6284
// Endpoint for fetching a specific report group.
@@ -89,16 +111,19 @@ app.post('/api/reports/:id/chat', async (req, res) => {
89111
}
90112

91113
try {
92-
const {prompt, pastMessages, model} = req.body as AiChatRequest;
93-
const assessments = reports.flatMap(run => run.results);
114+
const {prompt, pastMessages, model, contextFilters, openAppIDs} = req.body as AiChatRequest;
115+
const allAssessments = reports.flatMap(run => run.results);
116+
94117
const abortController = new AbortController();
95118
const summary = await chatWithReportAI(
96119
await (llm ?? getOrCreateGenkitLlmRunner()),
97120
prompt,
98121
abortController.signal,
99-
assessments,
122+
allAssessments,
100123
pastMessages,
101124
model,
125+
contextFilters,
126+
openAppIDs,
102127
);
103128
res.json(summary);
104129
} catch (e) {

0 commit comments

Comments
 (0)