feat(3.15): AI Tools extindere si integrare

Prompt Generator:
- Search bar cu cautare in name/description/tags/category
- Filtru target type (text/image) cu toggle rapid 'Imagine'
- 4 template-uri noi imagine: Midjourney Exterior, SD Interior,
  Midjourney Infographic, SD Material Texture (18 total)
- Config v0.2.0

AI Chat  Real API Integration:
- /api/ai-chat route: multi-provider (OpenAI, Anthropic, Ollama, demo)
- System prompt default in romana pt context arhitectura
- GET: config status, POST: message routing
- use-chat.ts: sendMessage() cu fetch real, sending state,
  providerConfig fetch, updateSession() pt project linking
- UI: provider status badge (Wifi/WifiOff), Bot icon pe mesaje,
  loading spinner la generare, disable input while sending
- Config banner cu detalii provider/model/stare

AI Chat + Tag Manager:
- Project selector dropdown in chat header (useTags project)
- Session linking: projectTagId + projectName on ChatSession
- Project name display in session sidebar
- Project context injected in system prompt

Docker:
- AI env vars: AI_PROVIDER, AI_API_KEY, AI_MODEL, AI_BASE_URL, AI_MAX_TOKENS
This commit is contained in:
AI Assistant
2026-02-28 04:51:36 +02:00
parent 11b35c750f
commit d34c722167
12 changed files with 1550 additions and 189 deletions
+254
View File
@@ -0,0 +1,254 @@
import { NextRequest, NextResponse } from "next/server";
/**
* AI Chat API Route
*
* Supports multiple providers: OpenAI, Anthropic (Claude), Ollama (local).
* Provider and API key configured via environment variables:
*
* - AI_PROVIDER: 'openai' | 'anthropic' | 'ollama' (default: 'demo')
* - AI_API_KEY: API key for OpenAI or Anthropic
* - AI_MODEL: Model name (default: per provider)
* - AI_BASE_URL: Custom base URL (required for Ollama, optional for others)
* - AI_MAX_TOKENS: Max response tokens (default: 2048)
*/
interface ChatRequestBody {
messages: Array<{
role: "user" | "assistant" | "system";
content: string;
}>;
systemPrompt?: string;
maxTokens?: number;
}
function getConfig() {
return {
provider: (process.env.AI_PROVIDER ?? "demo") as string,
apiKey: process.env.AI_API_KEY ?? "",
model: process.env.AI_MODEL ?? "",
baseUrl: process.env.AI_BASE_URL ?? "",
maxTokens: parseInt(process.env.AI_MAX_TOKENS ?? "2048", 10),
};
}
const DEFAULT_SYSTEM_PROMPT = `Ești un asistent AI pentru un birou de arhitectură. Răspunzi în limba română.
Ești specializat în:
- Arhitectură și proiectare
- Urbanism și PUZ/PUG/PUD
- Legislația construcțiilor din România (Legea 50/1991, Legea 350/2001)
- Certificat de Urbanism, Autorizație de Construire
- Norme tehnice (P118, normative de proiectare)
- Documentație tehnică (DTAC, PT, memorii)
Răspunde clar, concis și profesional.`;
async function callOpenAI(
messages: ChatRequestBody["messages"],
systemPrompt: string,
config: ReturnType<typeof getConfig>,
): Promise<string> {
const baseUrl = config.baseUrl || "https://api.openai.com/v1";
const model = config.model || "gpt-4o-mini";
const response = await fetch(`${baseUrl}/chat/completions`, {
method: "POST",
headers: {
"Content-Type": "application/json",
Authorization: `Bearer ${config.apiKey}`,
},
body: JSON.stringify({
model,
messages: [{ role: "system", content: systemPrompt }, ...messages],
max_tokens: config.maxTokens,
temperature: 0.7,
}),
});
if (!response.ok) {
const error = await response.text();
throw new Error(`OpenAI API error (${response.status}): ${error}`);
}
const data = (await response.json()) as {
choices: Array<{ message: { content: string } }>;
};
return data.choices[0]?.message?.content ?? "";
}
async function callAnthropic(
messages: ChatRequestBody["messages"],
systemPrompt: string,
config: ReturnType<typeof getConfig>,
): Promise<string> {
const model = config.model || "claude-sonnet-4-20250514";
const response = await fetch("https://api.anthropic.com/v1/messages", {
method: "POST",
headers: {
"Content-Type": "application/json",
"x-api-key": config.apiKey,
"anthropic-version": "2023-06-01",
},
body: JSON.stringify({
model,
max_tokens: config.maxTokens,
system: systemPrompt,
messages: messages.map((m) => ({
role: m.role === "system" ? "user" : m.role,
content: m.content,
})),
}),
});
if (!response.ok) {
const error = await response.text();
throw new Error(`Anthropic API error (${response.status}): ${error}`);
}
const data = (await response.json()) as {
content: Array<{ type: string; text: string }>;
};
return data.content
.filter((c) => c.type === "text")
.map((c) => c.text)
.join("");
}
async function callOllama(
messages: ChatRequestBody["messages"],
systemPrompt: string,
config: ReturnType<typeof getConfig>,
): Promise<string> {
const baseUrl = config.baseUrl || "http://localhost:11434";
const model = config.model || "llama3.2";
const response = await fetch(`${baseUrl}/api/chat`, {
method: "POST",
headers: { "Content-Type": "application/json" },
body: JSON.stringify({
model,
messages: [{ role: "system", content: systemPrompt }, ...messages],
stream: false,
}),
});
if (!response.ok) {
const error = await response.text();
throw new Error(`Ollama API error (${response.status}): ${error}`);
}
const data = (await response.json()) as {
message: { content: string };
};
return data.message?.content ?? "";
}
function getDemoResponse(): string {
const responses = [
"Modulul AI Chat funcționează în mod demonstrativ. Pentru a activa răspunsuri reale, configurați variabilele de mediu:\n\n" +
"- `AI_PROVIDER`: openai / anthropic / ollama\n" +
"- `AI_API_KEY`: cheia API\n" +
"- `AI_MODEL`: modelul dorit (opțional)\n\n" +
"Consultați documentația pentru detalii.",
"Aceasta este o conversație demonstrativă. Mesajele sunt salvate, dar răspunsurile AI nu sunt generate fără o conexiune API configurată.",
];
return (
responses[Math.floor(Math.random() * responses.length)] ?? responses[0]!
);
}
/**
* GET /api/ai-chat — Return provider config (without API key)
*/
export async function GET() {
const config = getConfig();
return NextResponse.json({
provider: config.provider,
model: config.model || "(default)",
baseUrl: config.baseUrl || "(default)",
maxTokens: config.maxTokens,
isConfigured:
config.provider !== "demo" &&
(config.provider === "ollama" || !!config.apiKey),
});
}
/**
* POST /api/ai-chat — Send messages and get AI response
*/
export async function POST(request: NextRequest) {
const config = getConfig();
let body: ChatRequestBody;
try {
body = (await request.json()) as ChatRequestBody;
} catch {
return NextResponse.json({ error: "invalid_json" }, { status: 400 });
}
if (!body.messages || body.messages.length === 0) {
return NextResponse.json({ error: "no_messages" }, { status: 400 });
}
const systemPrompt = body.systemPrompt ?? DEFAULT_SYSTEM_PROMPT;
try {
let responseText: string;
switch (config.provider) {
case "openai":
if (!config.apiKey) {
return NextResponse.json(
{
error: "missing_api_key",
message: "AI_API_KEY nu este configurat.",
},
{ status: 500 },
);
}
responseText = await callOpenAI(body.messages, systemPrompt, config);
break;
case "anthropic":
if (!config.apiKey) {
return NextResponse.json(
{
error: "missing_api_key",
message: "AI_API_KEY nu este configurat.",
},
{ status: 500 },
);
}
responseText = await callAnthropic(body.messages, systemPrompt, config);
break;
case "ollama":
responseText = await callOllama(body.messages, systemPrompt, config);
break;
default:
// Demo mode
responseText = getDemoResponse();
break;
}
return NextResponse.json({
content: responseText,
provider: config.provider,
model: config.model || "(default)",
timestamp: new Date().toISOString(),
});
} catch (error) {
return NextResponse.json(
{
error: "api_error",
message:
error instanceof Error
? error.message
: "Eroare necunoscută la apelul API AI.",
provider: config.provider,
},
{ status: 502 },
);
}
}