New LLM connection added.
This commit is contained in:
@@ -24,6 +24,12 @@ UDM_VERIFY_TLS=false
|
||||
OPENAI_API_KEY=
|
||||
OPENAI_BASE_URL=https://api.openai.com/v1
|
||||
OPENAI_MODEL=gpt-5.3-codex
|
||||
LLM_PROVIDER=codex_oauth
|
||||
CODEX_CLI_PATH=codex
|
||||
LOCAL_LLM_BASE_URL=http://localhost:11434
|
||||
LOCAL_LLM_API_KEY=
|
||||
LOCAL_LLM_MODELS_PATH=/api/tags
|
||||
LOCAL_LLM_CHAT_PATH=/api/chat
|
||||
|
||||
ENCRYPTION_KEY_PATH=/run/secrets/unfi_encryption_key
|
||||
JWT_SECRET=replace-with-strong-secret
|
||||
|
||||
39
README.md
39
README.md
@@ -20,19 +20,50 @@ LAN-only TypeScript monorepo for:
|
||||
node -e "console.log(require('crypto').randomBytes(32).toString('base64'))" > secrets/unfi_encryption_key
|
||||
```
|
||||
3. Set `ENCRYPTION_KEY_PATH` in `.env` to that file path.
|
||||
3. Install dependencies:
|
||||
4. Install dependencies:
|
||||
```bash
|
||||
npm install
|
||||
```
|
||||
4. Start with Docker:
|
||||
5. For ChatGPT Codex login mode, verify local CLI auth:
|
||||
```bash
|
||||
codex login status
|
||||
```
|
||||
Expected output: `Logged in using ChatGPT`
|
||||
6. Start with Docker:
|
||||
```bash
|
||||
docker compose up --build
|
||||
```
|
||||
5. Or start services locally:
|
||||
7. Or start services locally:
|
||||
```bash
|
||||
npm run dev
|
||||
```
|
||||
|
||||
## LLM modes
|
||||
- `LLM_PROVIDER=codex_oauth` (default): uses local `codex` CLI and your ChatGPT login session, not API key billing.
|
||||
- `LLM_PROVIDER=openai_api`: uses `OPENAI_API_KEY` and `/v1/chat/completions` metered API calls.
|
||||
- `LLM_PROVIDER=local_ollama`: uses local/openwebui-served Ollama-style endpoints for open-source models.
|
||||
|
||||
Environment knobs:
|
||||
- `OPENAI_MODEL=gpt-5.3-codex`
|
||||
- `CODEX_CLI_PATH=codex`
|
||||
- `OPENAI_API_KEY` only required in `openai_api` mode.
|
||||
- `LOCAL_LLM_BASE_URL` default `http://localhost:11434`
|
||||
- `LOCAL_LLM_MODELS_PATH` default `/api/tags`
|
||||
- `LOCAL_LLM_CHAT_PATH` default `/api/chat`
|
||||
- `LOCAL_LLM_API_KEY` optional bearer token for protected local gateways
|
||||
|
||||
Important:
|
||||
- In `codex_oauth` mode, the API process must be able to execute `codex` and access your Codex login session.
|
||||
- If you run via Docker, either install/mount Codex CLI and credentials into the API container or run API locally.
|
||||
|
||||
OpenWebUI + Ollama example:
|
||||
- Set `LLM_PROVIDER=local_ollama`
|
||||
- Set `OPENAI_MODEL=qwen3-coder-next`
|
||||
- Set `LOCAL_LLM_BASE_URL=http://YOUR_OPENWEBUI_HOST:8080`
|
||||
- Set `LOCAL_LLM_MODELS_PATH=/ollama/api/tags`
|
||||
- Set `LOCAL_LLM_CHAT_PATH=/ollama/api/chat`
|
||||
- If your OpenWebUI endpoint is protected, set `LOCAL_LLM_API_KEY`
|
||||
|
||||
## First-run auth flow
|
||||
1. Open web UI at `http://localhost:5173`.
|
||||
2. Use **Bootstrap owner** once to create the first local account and get TOTP URI.
|
||||
@@ -40,7 +71,7 @@ LAN-only TypeScript monorepo for:
|
||||
4. Sign in with username/password/TOTP code.
|
||||
|
||||
## Security defaults
|
||||
- AI/remediation features are blocked if `gpt-5.3-codex` is unavailable.
|
||||
- AI/remediation features are blocked if the configured model/provider is unavailable.
|
||||
- Queue execution is manual-trigger only (`POST /api/v1/remediation/queue/apply`).
|
||||
- Low-risk non-disruptive actions only.
|
||||
- Mandatory backup and rollback attempt for each execution.
|
||||
|
||||
@@ -17,9 +17,15 @@ const envSchema = z.object({
|
||||
.string()
|
||||
.default("true")
|
||||
.transform((value) => value.toLowerCase() === "true"),
|
||||
LLM_PROVIDER: z.enum(["codex_oauth", "openai_api", "local_ollama"]).default("codex_oauth"),
|
||||
OPENAI_API_KEY: z.string().optional().default(""),
|
||||
OPENAI_BASE_URL: z.string().url().default("https://api.openai.com/v1"),
|
||||
OPENAI_MODEL: z.string().default("gpt-5.3-codex"),
|
||||
CODEX_CLI_PATH: z.string().default("codex"),
|
||||
LOCAL_LLM_BASE_URL: z.string().url().default("http://localhost:11434"),
|
||||
LOCAL_LLM_API_KEY: z.string().optional().default(""),
|
||||
LOCAL_LLM_MODELS_PATH: z.string().default("/api/tags"),
|
||||
LOCAL_LLM_CHAT_PATH: z.string().default("/api/chat"),
|
||||
ENCRYPTION_KEY_PATH: z.string().min(1),
|
||||
JWT_SECRET: z.string().min(16),
|
||||
MFA_ISSUER: z.string().default("UNFI-Security-Copilot"),
|
||||
|
||||
@@ -64,7 +64,17 @@ const unifiAdapter = new UnifiHttpAdapter(async () => {
|
||||
const logIngestor = new LogIngestor(repository, unifiAdapter);
|
||||
await logIngestor.startSyslogListener(5514);
|
||||
|
||||
const codexClient = new CodexClient(config.OPENAI_API_KEY, config.OPENAI_BASE_URL, config.OPENAI_MODEL);
|
||||
const codexClient = new CodexClient({
|
||||
provider: config.LLM_PROVIDER,
|
||||
model: config.OPENAI_MODEL,
|
||||
openAiApiKey: config.OPENAI_API_KEY,
|
||||
openAiBaseUrl: config.OPENAI_BASE_URL,
|
||||
codexCliPath: config.CODEX_CLI_PATH,
|
||||
localBaseUrl: config.LOCAL_LLM_BASE_URL,
|
||||
localApiKey: config.LOCAL_LLM_API_KEY,
|
||||
localModelsPath: config.LOCAL_LLM_MODELS_PATH,
|
||||
localChatPath: config.LOCAL_LLM_CHAT_PATH
|
||||
});
|
||||
const policyEngine = new PolicyEngine();
|
||||
const dependencyService = new DependencyService(unifiAdapter, codexClient);
|
||||
const alertService = new AlertService(repository, {
|
||||
|
||||
@@ -5,6 +5,8 @@ import type { UnifiAdapter } from "./unifiAdapter.js";
|
||||
export interface DependencyHealth {
|
||||
checkedAt: string;
|
||||
modelGateReady: boolean;
|
||||
llmProvider: string;
|
||||
llmModel: string;
|
||||
udm: CapabilityReport;
|
||||
issues: string[];
|
||||
}
|
||||
@@ -21,7 +23,7 @@ export class DependencyService {
|
||||
|
||||
let modelGateReady = false;
|
||||
try {
|
||||
await this.codexClient.assertModelAvailable("gpt-5.3-codex");
|
||||
await this.codexClient.assertModelAvailable(this.codexClient.getConfiguredModel());
|
||||
modelGateReady = true;
|
||||
} catch (error) {
|
||||
issues.push((error as Error).message);
|
||||
@@ -30,6 +32,8 @@ export class DependencyService {
|
||||
return {
|
||||
checkedAt: new Date().toISOString(),
|
||||
modelGateReady,
|
||||
llmProvider: this.codexClient.getProvider(),
|
||||
llmModel: this.codexClient.getConfiguredModel(),
|
||||
udm: {
|
||||
...udm,
|
||||
modelGateReady
|
||||
|
||||
@@ -1,81 +1,105 @@
|
||||
import { mkdtemp, readFile, rm, writeFile } from "node:fs/promises";
|
||||
import { tmpdir } from "node:os";
|
||||
import { join } from "node:path";
|
||||
import { spawn } from "node:child_process";
|
||||
import { v4 as uuid } from "uuid";
|
||||
import { recommendationSetSchema, type Recommendation, type RecommendationSet, type SecurityAnalysisInput } from "@unfi/contracts";
|
||||
|
||||
const SECRET_KEY_PATTERN = /(password|secret|token|cookie|api[_-]?key|private[_-]?key)/i;
|
||||
|
||||
type LlmProvider = "codex_oauth" | "openai_api" | "local_ollama";
|
||||
|
||||
interface CommandResult {
|
||||
stdout: string;
|
||||
stderr: string;
|
||||
exitCode: number;
|
||||
}
|
||||
|
||||
type CommandRunner = (command: string, args: string[], stdin?: string) => Promise<CommandResult>;
|
||||
|
||||
export interface CodexClientOptions {
|
||||
provider: LlmProvider;
|
||||
model: string;
|
||||
openAiApiKey: string;
|
||||
openAiBaseUrl: string;
|
||||
codexCliPath: string;
|
||||
localBaseUrl: string;
|
||||
localApiKey: string;
|
||||
localModelsPath: string;
|
||||
localChatPath: string;
|
||||
commandRunner?: CommandRunner;
|
||||
fetchFn?: typeof fetch;
|
||||
}
|
||||
|
||||
export class CodexClient {
|
||||
private availableModels = new Set<string>();
|
||||
private modelsCheckedAt = 0;
|
||||
private codexLoginCheckedAt = 0;
|
||||
private codexLoginLooksGood = false;
|
||||
private readonly provider: LlmProvider;
|
||||
private readonly model: string;
|
||||
private readonly openAiApiKey: string;
|
||||
private readonly openAiBaseUrl: string;
|
||||
private readonly codexCliPath: string;
|
||||
private readonly localBaseUrl: string;
|
||||
private readonly localApiKey: string;
|
||||
private readonly localModelsPath: string;
|
||||
private readonly localChatPath: string;
|
||||
private readonly commandRunner: CommandRunner;
|
||||
private readonly fetchFn: typeof fetch;
|
||||
|
||||
constructor(
|
||||
private readonly apiKey: string,
|
||||
private readonly baseUrl: string,
|
||||
private readonly model: string
|
||||
) {}
|
||||
constructor(options: CodexClientOptions) {
|
||||
this.provider = options.provider;
|
||||
this.model = options.model;
|
||||
this.openAiApiKey = options.openAiApiKey;
|
||||
this.openAiBaseUrl = options.openAiBaseUrl;
|
||||
this.codexCliPath = options.codexCliPath;
|
||||
this.localBaseUrl = trimTrailingSlash(options.localBaseUrl);
|
||||
this.localApiKey = options.localApiKey;
|
||||
this.localModelsPath = options.localModelsPath;
|
||||
this.localChatPath = options.localChatPath;
|
||||
this.commandRunner = options.commandRunner ?? runCommand;
|
||||
this.fetchFn = options.fetchFn ?? fetch;
|
||||
}
|
||||
|
||||
async assertModelAvailable(requiredModel: "gpt-5.3-codex"): Promise<void> {
|
||||
if (!this.apiKey) {
|
||||
throw new Error("OPENAI_API_KEY is not configured");
|
||||
getConfiguredModel(): string {
|
||||
return this.model;
|
||||
}
|
||||
|
||||
getProvider(): LlmProvider {
|
||||
return this.provider;
|
||||
}
|
||||
|
||||
async assertModelAvailable(requiredModel: string = this.model): Promise<void> {
|
||||
if (this.model !== requiredModel) {
|
||||
throw new Error(`Configured model '${this.model}' does not match required model '${requiredModel}'`);
|
||||
}
|
||||
|
||||
const cacheTtlMs = 5 * 60 * 1000;
|
||||
if (Date.now() - this.modelsCheckedAt > cacheTtlMs) {
|
||||
await this.refreshModels();
|
||||
if (this.provider === "openai_api") {
|
||||
await this.assertOpenAiModelAvailable(requiredModel);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!this.availableModels.has(requiredModel)) {
|
||||
throw new Error(`Required model '${requiredModel}' is not available for this API key`);
|
||||
if (this.provider === "local_ollama") {
|
||||
await this.assertLocalModelAvailable(requiredModel);
|
||||
return;
|
||||
}
|
||||
|
||||
await this.assertCodexOAuthReady(requiredModel);
|
||||
}
|
||||
|
||||
async recommend(input: SecurityAnalysisInput, fallbackRecommendations: Recommendation[]): Promise<RecommendationSet> {
|
||||
await this.assertModelAvailable("gpt-5.3-codex");
|
||||
await this.assertModelAvailable(this.model);
|
||||
const sanitized = sanitizeForModel(input) as SecurityAnalysisInput;
|
||||
|
||||
const systemPrompt =
|
||||
"You are a network security copilot. Return JSON with shape {generatedAt, model, recommendations[]} only.";
|
||||
const userPrompt = JSON.stringify({
|
||||
task: "Recommend low-risk, reversible, non-disruptive hardening actions",
|
||||
constraints: [
|
||||
"Only include actions with riskLevel=low",
|
||||
"Never include disruptive=true",
|
||||
"Prefer explicit endpoint+body payloads for UniFi Network API"
|
||||
],
|
||||
analysis: sanitized
|
||||
});
|
||||
|
||||
try {
|
||||
const response = await fetch(`${this.baseUrl}/chat/completions`, {
|
||||
method: "POST",
|
||||
headers: {
|
||||
"content-type": "application/json",
|
||||
authorization: `Bearer ${this.apiKey}`
|
||||
},
|
||||
body: JSON.stringify({
|
||||
model: this.model,
|
||||
messages: [
|
||||
{ role: "system", content: systemPrompt },
|
||||
{ role: "user", content: userPrompt }
|
||||
],
|
||||
response_format: { type: "json_object" },
|
||||
temperature: 0.1
|
||||
})
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
throw new Error(`OpenAI request failed with HTTP ${response.status}`);
|
||||
if (this.provider === "openai_api") {
|
||||
return await this.recommendViaOpenAiApi(sanitized);
|
||||
}
|
||||
|
||||
const payload = (await response.json()) as {
|
||||
choices?: Array<{ message?: { content?: string | null } }>;
|
||||
};
|
||||
const text = payload.choices?.[0]?.message?.content;
|
||||
if (!text) {
|
||||
throw new Error("OpenAI response did not include content");
|
||||
if (this.provider === "local_ollama") {
|
||||
return await this.recommendViaLocalOllama(sanitized);
|
||||
}
|
||||
const parsed = JSON.parse(text) as unknown;
|
||||
const normalized = recommendationSetSchema.parse(parsed);
|
||||
return normalized;
|
||||
return await this.recommendViaCodexOAuth(sanitized);
|
||||
} catch {
|
||||
return {
|
||||
generatedAt: new Date().toISOString(),
|
||||
@@ -90,10 +114,184 @@ export class CodexClient {
|
||||
}
|
||||
}
|
||||
|
||||
private async refreshModels(): Promise<void> {
|
||||
const response = await fetch(`${this.baseUrl}/models`, {
|
||||
private async assertOpenAiModelAvailable(requiredModel: string): Promise<void> {
|
||||
if (!this.openAiApiKey) {
|
||||
throw new Error("OPENAI_API_KEY is not configured for openai_api mode");
|
||||
}
|
||||
|
||||
const cacheTtlMs = 5 * 60 * 1000;
|
||||
if (Date.now() - this.modelsCheckedAt > cacheTtlMs) {
|
||||
await this.refreshOpenAiModels();
|
||||
}
|
||||
|
||||
if (!this.availableModels.has(requiredModel)) {
|
||||
throw new Error(`Required model '${requiredModel}' is not available for this API key`);
|
||||
}
|
||||
}
|
||||
|
||||
private async assertLocalModelAvailable(requiredModel: string): Promise<void> {
|
||||
const cacheTtlMs = 2 * 60 * 1000;
|
||||
if (Date.now() - this.modelsCheckedAt > cacheTtlMs) {
|
||||
await this.refreshLocalModels();
|
||||
}
|
||||
if (!this.availableModels.has(requiredModel)) {
|
||||
throw new Error(
|
||||
`Local model '${requiredModel}' not found at ${this.localBaseUrl}${this.localModelsPath}. ` +
|
||||
`Update OPENAI_MODEL or LOCAL_LLM_MODELS_PATH.`
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
private async assertCodexOAuthReady(requiredModel: string): Promise<void> {
|
||||
const cacheTtlMs = 60 * 1000;
|
||||
if (this.codexLoginLooksGood && Date.now() - this.codexLoginCheckedAt < cacheTtlMs) {
|
||||
return;
|
||||
}
|
||||
|
||||
let status: CommandResult;
|
||||
try {
|
||||
status = await this.commandRunner(this.codexCliPath, ["login", "status"]);
|
||||
} catch (error) {
|
||||
throw new Error(`Codex CLI was not executable at '${this.codexCliPath}': ${(error as Error).message}`);
|
||||
}
|
||||
if (status.exitCode !== 0) {
|
||||
throw new Error(`Codex login status failed: ${status.stderr || status.stdout}`.trim());
|
||||
}
|
||||
|
||||
const combined = `${status.stdout}\n${status.stderr}`.toLowerCase();
|
||||
if (!combined.includes("logged in")) {
|
||||
throw new Error("Codex CLI is not logged in");
|
||||
}
|
||||
if (!combined.includes("chatgpt")) {
|
||||
throw new Error("Codex CLI is not using ChatGPT login. Run `codex login` (without --with-api-key).");
|
||||
}
|
||||
if (!requiredModel.toLowerCase().includes("codex")) {
|
||||
throw new Error(`codex_oauth mode requires a Codex model. Configured model: '${requiredModel}'`);
|
||||
}
|
||||
|
||||
this.codexLoginLooksGood = true;
|
||||
this.codexLoginCheckedAt = Date.now();
|
||||
}
|
||||
|
||||
private async recommendViaOpenAiApi(input: SecurityAnalysisInput): Promise<RecommendationSet> {
|
||||
const systemPrompt =
|
||||
"You are a network security copilot. Return JSON with shape {generatedAt, model, recommendations[]} only.";
|
||||
const userPrompt = JSON.stringify({
|
||||
task: "Recommend low-risk, reversible, non-disruptive hardening actions",
|
||||
constraints: [
|
||||
"Only include actions with riskLevel=low",
|
||||
"Never include disruptive=true",
|
||||
"Prefer explicit endpoint+body payloads for UniFi Network API"
|
||||
],
|
||||
analysis: input
|
||||
});
|
||||
|
||||
const response = await this.fetchFn(`${this.openAiBaseUrl}/chat/completions`, {
|
||||
method: "POST",
|
||||
headers: {
|
||||
authorization: `Bearer ${this.apiKey}`
|
||||
"content-type": "application/json",
|
||||
authorization: `Bearer ${this.openAiApiKey}`
|
||||
},
|
||||
body: JSON.stringify({
|
||||
model: this.model,
|
||||
messages: [
|
||||
{ role: "system", content: systemPrompt },
|
||||
{ role: "user", content: userPrompt }
|
||||
],
|
||||
response_format: { type: "json_object" },
|
||||
temperature: 0.1
|
||||
})
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
throw new Error(`OpenAI request failed with HTTP ${response.status}`);
|
||||
}
|
||||
|
||||
const payload = (await response.json()) as Record<string, unknown>;
|
||||
const text = extractModelResponseText(payload);
|
||||
if (!text) {
|
||||
throw new Error("OpenAI response did not include content");
|
||||
}
|
||||
return parseRecommendationJson(text);
|
||||
}
|
||||
|
||||
private async recommendViaLocalOllama(input: SecurityAnalysisInput): Promise<RecommendationSet> {
|
||||
const systemPrompt =
|
||||
"You are a network security copilot. Return JSON with shape {generatedAt, model, recommendations[]} only.";
|
||||
const userPrompt = JSON.stringify({
|
||||
task: "Recommend low-risk, reversible, non-disruptive hardening actions",
|
||||
constraints: [
|
||||
"Only include actions with riskLevel=low",
|
||||
"Never include disruptive=true",
|
||||
"Prefer explicit endpoint+body payloads for UniFi Network API"
|
||||
],
|
||||
analysis: input
|
||||
});
|
||||
|
||||
const response = await this.fetchFn(`${this.localBaseUrl}${this.localChatPath}`, {
|
||||
method: "POST",
|
||||
headers: this.modelHeaders(),
|
||||
body: JSON.stringify({
|
||||
model: this.model,
|
||||
stream: false,
|
||||
format: "json",
|
||||
messages: [
|
||||
{ role: "system", content: systemPrompt },
|
||||
{ role: "user", content: userPrompt }
|
||||
]
|
||||
})
|
||||
});
|
||||
if (!response.ok) {
|
||||
throw new Error(`Local LLM request failed with HTTP ${response.status}`);
|
||||
}
|
||||
|
||||
const payload = (await response.json()) as Record<string, unknown>;
|
||||
const text = extractModelResponseText(payload);
|
||||
if (!text) {
|
||||
throw new Error("Local LLM response did not include content");
|
||||
}
|
||||
return parseRecommendationJson(text);
|
||||
}
|
||||
|
||||
private async recommendViaCodexOAuth(input: SecurityAnalysisInput): Promise<RecommendationSet> {
|
||||
const sandboxDir = await mkdtemp(join(tmpdir(), "unfi-codex-"));
|
||||
const schemaPath = join(sandboxDir, "recommendation-schema.json");
|
||||
const outputPath = join(sandboxDir, "recommendation-output.json");
|
||||
try {
|
||||
await writeFile(schemaPath, JSON.stringify(RECOMMENDATION_OUTPUT_SCHEMA), "utf8");
|
||||
const prompt = buildCodexPrompt(input);
|
||||
|
||||
const command = await this.commandRunner(
|
||||
this.codexCliPath,
|
||||
[
|
||||
"exec",
|
||||
"-m",
|
||||
this.model,
|
||||
"--skip-git-repo-check",
|
||||
"--output-schema",
|
||||
schemaPath,
|
||||
"--output-last-message",
|
||||
outputPath,
|
||||
"-"
|
||||
],
|
||||
prompt
|
||||
);
|
||||
|
||||
if (command.exitCode !== 0) {
|
||||
throw new Error(`Codex exec failed: ${command.stderr || command.stdout}`.trim());
|
||||
}
|
||||
|
||||
const raw = await readFile(outputPath, "utf8");
|
||||
return parseRecommendationJson(raw);
|
||||
} finally {
|
||||
await rm(sandboxDir, { recursive: true, force: true });
|
||||
}
|
||||
}
|
||||
|
||||
private async refreshOpenAiModels(): Promise<void> {
|
||||
const response = await this.fetchFn(`${this.openAiBaseUrl}/models`, {
|
||||
headers: {
|
||||
authorization: `Bearer ${this.openAiApiKey}`
|
||||
}
|
||||
});
|
||||
if (!response.ok) {
|
||||
@@ -105,6 +303,165 @@ export class CodexClient {
|
||||
this.availableModels = new Set(ids);
|
||||
this.modelsCheckedAt = Date.now();
|
||||
}
|
||||
|
||||
private async refreshLocalModels(): Promise<void> {
|
||||
const response = await this.fetchFn(`${this.localBaseUrl}${this.localModelsPath}`, {
|
||||
headers: this.modelHeaders()
|
||||
});
|
||||
if (!response.ok) {
|
||||
throw new Error(`Failed to list local models: HTTP ${response.status}`);
|
||||
}
|
||||
|
||||
const payload = (await response.json()) as {
|
||||
models?: Array<{ name?: string; model?: string }>;
|
||||
data?: Array<{ id?: string }>;
|
||||
};
|
||||
|
||||
const fromModels = (payload.models ?? []).flatMap((entry) => {
|
||||
if (typeof entry.name === "string") {
|
||||
return [entry.name];
|
||||
}
|
||||
if (typeof entry.model === "string") {
|
||||
return [entry.model];
|
||||
}
|
||||
return [];
|
||||
});
|
||||
const fromData = (payload.data ?? []).flatMap((entry) => (typeof entry.id === "string" ? [entry.id] : []));
|
||||
this.availableModels = new Set([...fromModels, ...fromData]);
|
||||
this.modelsCheckedAt = Date.now();
|
||||
}
|
||||
|
||||
private modelHeaders(): Record<string, string> {
|
||||
if (!this.localApiKey) {
|
||||
return {
|
||||
"content-type": "application/json"
|
||||
};
|
||||
}
|
||||
return {
|
||||
"content-type": "application/json",
|
||||
authorization: `Bearer ${this.localApiKey}`
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
function buildCodexPrompt(input: SecurityAnalysisInput): string {
|
||||
return [
|
||||
"You are a network security copilot.",
|
||||
"Return only JSON matching the provided output schema.",
|
||||
"Requirements:",
|
||||
"- Generate low-risk, reversible, non-disruptive actions only.",
|
||||
"- Set recommendations[].source to 'ai'.",
|
||||
"- Keep actions specific to UniFi-compatible payloads.",
|
||||
"",
|
||||
"Analysis input JSON:",
|
||||
JSON.stringify(
|
||||
{
|
||||
task: "Recommend low-risk, reversible, non-disruptive hardening actions",
|
||||
constraints: [
|
||||
"Only include actions with riskLevel=low",
|
||||
"Never include disruptive=true",
|
||||
"Prefer explicit endpoint+body payloads for UniFi Network API"
|
||||
],
|
||||
analysis: input
|
||||
},
|
||||
null,
|
||||
2
|
||||
)
|
||||
].join("\n");
|
||||
}
|
||||
|
||||
function parseRecommendationJson(text: string): RecommendationSet {
|
||||
const stripped = stripMarkdownCodeFence(text.trim());
|
||||
const parsed = JSON.parse(stripped) as unknown;
|
||||
return recommendationSetSchema.parse(parsed);
|
||||
}
|
||||
|
||||
function stripMarkdownCodeFence(text: string): string {
|
||||
if (!text.startsWith("```")) {
|
||||
return text;
|
||||
}
|
||||
const lines = text.split(/\r?\n/);
|
||||
if (lines.length < 3) {
|
||||
return text;
|
||||
}
|
||||
const firstLine = lines[0];
|
||||
const lastLine = lines.at(-1);
|
||||
if (!firstLine?.startsWith("```") || !lastLine?.startsWith("```")) {
|
||||
return text;
|
||||
}
|
||||
return lines.slice(1, -1).join("\n");
|
||||
}
|
||||
|
||||
async function runCommand(command: string, args: string[], stdin?: string): Promise<CommandResult> {
|
||||
return new Promise<CommandResult>((resolve, reject) => {
|
||||
const child = spawn(command, args, {
|
||||
stdio: ["pipe", "pipe", "pipe"]
|
||||
});
|
||||
|
||||
let stdout = "";
|
||||
let stderr = "";
|
||||
child.stdout.setEncoding("utf8");
|
||||
child.stderr.setEncoding("utf8");
|
||||
child.stdout.on("data", (chunk: string) => {
|
||||
stdout += chunk;
|
||||
});
|
||||
child.stderr.on("data", (chunk: string) => {
|
||||
stderr += chunk;
|
||||
});
|
||||
child.on("error", (error) => {
|
||||
reject(error);
|
||||
});
|
||||
child.on("close", (code) => {
|
||||
resolve({
|
||||
stdout,
|
||||
stderr,
|
||||
exitCode: code ?? -1
|
||||
});
|
||||
});
|
||||
|
||||
if (stdin) {
|
||||
child.stdin.write(stdin);
|
||||
}
|
||||
child.stdin.end();
|
||||
});
|
||||
}
|
||||
|
||||
function extractModelResponseText(payload: Record<string, unknown>): string | null {
|
||||
const message = payload.message;
|
||||
if (typeof message === "object" && message !== null) {
|
||||
const content = (message as { content?: unknown }).content;
|
||||
if (typeof content === "string" && content.trim()) {
|
||||
return content;
|
||||
}
|
||||
}
|
||||
|
||||
const choices = payload.choices;
|
||||
if (Array.isArray(choices) && choices.length > 0) {
|
||||
const first = choices[0];
|
||||
if (typeof first === "object" && first !== null) {
|
||||
const msg = (first as { message?: unknown }).message;
|
||||
if (typeof msg === "object" && msg !== null) {
|
||||
const content = (msg as { content?: unknown }).content;
|
||||
if (typeof content === "string" && content.trim()) {
|
||||
return content;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const response = payload.response;
|
||||
if (typeof response === "string" && response.trim()) {
|
||||
return response;
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
function trimTrailingSlash(value: string): string {
|
||||
if (value.endsWith("/")) {
|
||||
return value.slice(0, -1);
|
||||
}
|
||||
return value;
|
||||
}
|
||||
|
||||
function sanitizeForModel(value: unknown): unknown {
|
||||
@@ -125,3 +482,50 @@ function sanitizeForModel(value: unknown): unknown {
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
const RECOMMENDATION_OUTPUT_SCHEMA = {
|
||||
$schema: "http://json-schema.org/draft-07/schema#",
|
||||
type: "object",
|
||||
required: ["generatedAt", "model", "recommendations"],
|
||||
additionalProperties: false,
|
||||
properties: {
|
||||
generatedAt: { type: "string" },
|
||||
model: { type: "string" },
|
||||
recommendations: {
|
||||
type: "array",
|
||||
items: {
|
||||
type: "object",
|
||||
required: ["id", "source", "title", "rationale", "riskLevel", "controls", "actions", "createdAt"],
|
||||
additionalProperties: true,
|
||||
properties: {
|
||||
id: { type: "string" },
|
||||
source: { type: "string", enum: ["ai", "rule"] },
|
||||
title: { type: "string" },
|
||||
rationale: { type: "string" },
|
||||
riskLevel: { type: "string", enum: ["low", "medium", "high"] },
|
||||
controls: { type: "array", items: { type: "string" } },
|
||||
createdAt: { type: "string" },
|
||||
actions: {
|
||||
type: "array",
|
||||
items: {
|
||||
type: "object",
|
||||
required: ["id", "controlId", "type", "description", "riskLevel", "disruptive", "reversible", "payload"],
|
||||
additionalProperties: true,
|
||||
properties: {
|
||||
id: { type: "string" },
|
||||
controlId: { type: "string" },
|
||||
type: { type: "string" },
|
||||
description: { type: "string" },
|
||||
riskLevel: { type: "string", enum: ["low", "medium", "high"] },
|
||||
disruptive: { type: "boolean" },
|
||||
reversible: { type: "boolean" },
|
||||
payload: { type: "object" },
|
||||
expectedState: { type: "object" }
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
} as const;
|
||||
|
||||
182
apps/api/test/llmOrchestrator.test.ts
Normal file
182
apps/api/test/llmOrchestrator.test.ts
Normal file
@@ -0,0 +1,182 @@
|
||||
import { writeFile } from "node:fs/promises";
|
||||
import { describe, expect, it } from "vitest";
|
||||
import type { SecurityAnalysisInput } from "@unfi/contracts";
|
||||
import { CodexClient } from "../src/services/llmOrchestrator.js";
|
||||
|
||||
const analysisInput: SecurityAnalysisInput = {
|
||||
posture: {
|
||||
score: 70,
|
||||
evaluatedAt: new Date().toISOString(),
|
||||
failedControls: [],
|
||||
warningControls: [],
|
||||
passedControls: [],
|
||||
driftDetected: false,
|
||||
driftSummary: []
|
||||
},
|
||||
recentEvents: [],
|
||||
snapshot: {
|
||||
id: "snapshot-1",
|
||||
capturedAt: new Date().toISOString(),
|
||||
firmwareVersion: "9.0.0",
|
||||
firmwareChannel: "stable",
|
||||
site: "default",
|
||||
settings: {
|
||||
raw: {}
|
||||
},
|
||||
hash: "hash"
|
||||
},
|
||||
policyVersion: "mvp-1"
|
||||
};
|
||||
|
||||
describe("CodexClient OAuth mode", () => {
|
||||
it("uses ChatGPT Codex login path and parses recommendations", async () => {
|
||||
const client = new CodexClient({
|
||||
provider: "codex_oauth",
|
||||
model: "gpt-5.3-codex",
|
||||
openAiApiKey: "",
|
||||
openAiBaseUrl: "https://api.openai.com/v1",
|
||||
codexCliPath: "codex",
|
||||
localBaseUrl: "http://localhost:11434",
|
||||
localApiKey: "",
|
||||
localModelsPath: "/api/tags",
|
||||
localChatPath: "/api/chat",
|
||||
commandRunner: async (_command, args) => {
|
||||
if (args[0] === "login" && args[1] === "status") {
|
||||
return {
|
||||
stdout: "Logged in using ChatGPT",
|
||||
stderr: "",
|
||||
exitCode: 0
|
||||
};
|
||||
}
|
||||
if (args[0] === "exec") {
|
||||
const outputFlagIndex = args.findIndex((arg) => arg === "--output-last-message");
|
||||
const outputPath = args[outputFlagIndex + 1];
|
||||
if (!outputPath) {
|
||||
throw new Error("Missing output path");
|
||||
}
|
||||
await writeFile(
|
||||
outputPath,
|
||||
JSON.stringify({
|
||||
generatedAt: new Date().toISOString(),
|
||||
model: "gpt-5.3-codex",
|
||||
recommendations: [
|
||||
{
|
||||
id: "rec-1",
|
||||
source: "ai",
|
||||
title: "Disable remote management",
|
||||
rationale: "Reduce management plane exposure",
|
||||
riskLevel: "low",
|
||||
controls: ["MGMT-01"],
|
||||
createdAt: new Date().toISOString(),
|
||||
actions: [
|
||||
{
|
||||
id: "act-1",
|
||||
controlId: "MGMT-01",
|
||||
type: "disable_feature",
|
||||
description: "Disable remote access",
|
||||
riskLevel: "low",
|
||||
disruptive: false,
|
||||
reversible: true,
|
||||
payload: {
|
||||
endpoint: "/proxy/network/api/s/default/set/setting/mgmt",
|
||||
method: "POST",
|
||||
body: {
|
||||
remoteAccess: false
|
||||
}
|
||||
},
|
||||
expectedState: {
|
||||
remoteAccess: false
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}),
|
||||
"utf8"
|
||||
);
|
||||
return {
|
||||
stdout: "",
|
||||
stderr: "",
|
||||
exitCode: 0
|
||||
};
|
||||
}
|
||||
throw new Error(`Unexpected command args: ${args.join(" ")}`);
|
||||
}
|
||||
});
|
||||
|
||||
const result = await client.recommend(analysisInput, []);
|
||||
expect(result.model).toBe("gpt-5.3-codex");
|
||||
expect(result.recommendations).toHaveLength(1);
|
||||
expect(result.recommendations.at(0)?.source).toBe("ai");
|
||||
});
|
||||
|
||||
it("rejects Codex login that is not ChatGPT", async () => {
|
||||
const client = new CodexClient({
|
||||
provider: "codex_oauth",
|
||||
model: "gpt-5.3-codex",
|
||||
openAiApiKey: "",
|
||||
openAiBaseUrl: "https://api.openai.com/v1",
|
||||
codexCliPath: "codex",
|
||||
localBaseUrl: "http://localhost:11434",
|
||||
localApiKey: "",
|
||||
localModelsPath: "/api/tags",
|
||||
localChatPath: "/api/chat",
|
||||
commandRunner: async () => ({
|
||||
stdout: "Logged in using API key",
|
||||
stderr: "",
|
||||
exitCode: 0
|
||||
})
|
||||
});
|
||||
|
||||
await expect(client.assertModelAvailable("gpt-5.3-codex")).rejects.toThrow(/ChatGPT login/i);
|
||||
});
|
||||
});
|
||||
|
||||
describe("CodexClient local_ollama mode", () => {
|
||||
it("checks tags and parses chat response", async () => {
|
||||
const client = new CodexClient({
|
||||
provider: "local_ollama",
|
||||
model: "qwen3-coder-next",
|
||||
openAiApiKey: "",
|
||||
openAiBaseUrl: "https://api.openai.com/v1",
|
||||
codexCliPath: "codex",
|
||||
localBaseUrl: "http://localhost:11434",
|
||||
localApiKey: "",
|
||||
localModelsPath: "/api/tags",
|
||||
localChatPath: "/api/chat",
|
||||
fetchFn: async (url, init) => {
|
||||
const asString = String(url);
|
||||
if (asString.endsWith("/api/tags")) {
|
||||
return new Response(
|
||||
JSON.stringify({
|
||||
models: [{ name: "qwen3-coder-next" }]
|
||||
}),
|
||||
{ status: 200, headers: { "content-type": "application/json" } }
|
||||
);
|
||||
}
|
||||
if (asString.endsWith("/api/chat")) {
|
||||
const body = JSON.parse(String(init?.body ?? "{}")) as { model?: string };
|
||||
expect(body.model).toBe("qwen3-coder-next");
|
||||
return new Response(
|
||||
JSON.stringify({
|
||||
message: {
|
||||
content: JSON.stringify({
|
||||
generatedAt: new Date().toISOString(),
|
||||
model: "qwen3-coder-next",
|
||||
recommendations: []
|
||||
})
|
||||
}
|
||||
}),
|
||||
{ status: 200, headers: { "content-type": "application/json" } }
|
||||
);
|
||||
}
|
||||
return new Response("not found", { status: 404 });
|
||||
}
|
||||
});
|
||||
|
||||
await expect(client.assertModelAvailable("qwen3-coder-next")).resolves.toBeUndefined();
|
||||
const result = await client.recommend(analysisInput, []);
|
||||
expect(result.model).toBe("qwen3-coder-next");
|
||||
expect(result.recommendations).toHaveLength(0);
|
||||
});
|
||||
});
|
||||
@@ -4,6 +4,8 @@ import { api } from "../lib/api";
|
||||
|
||||
interface DependencyHealth {
|
||||
modelGateReady: boolean;
|
||||
llmProvider: string;
|
||||
llmModel: string;
|
||||
issues: string[];
|
||||
udm: {
|
||||
controllerReachable: boolean;
|
||||
@@ -38,6 +40,12 @@ export function PosturePage() {
|
||||
{error ? <p className="error-text">{error}</p> : null}
|
||||
{deps ? (
|
||||
<div className="status-grid">
|
||||
<p>
|
||||
LLM Provider: <strong>{deps.llmProvider}</strong>
|
||||
</p>
|
||||
<p>
|
||||
LLM Model: <strong>{deps.llmModel}</strong>
|
||||
</p>
|
||||
<p>
|
||||
UDM Reachable: <strong>{deps.udm.controllerReachable ? "yes" : "no"}</strong>
|
||||
</p>
|
||||
|
||||
Reference in New Issue
Block a user