Release v2.2.4: AI prompt test feature + updateto.sh path fix
This commit is contained in:
parent
abd5014eb0
commit
8d7d32571a
@ -6,6 +6,9 @@ from fastapi import APIRouter, HTTPException
|
||||
from typing import List, Optional, Dict
|
||||
from pydantic import BaseModel
|
||||
from app.core.database import execute_query
|
||||
from app.core.config import settings
|
||||
import httpx
|
||||
import time
|
||||
import logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@ -474,15 +477,11 @@ Output: [Liste af opgaver]""",
|
||||
}
|
||||
|
||||
|
||||
|
||||
@router.get("/ai-prompts", tags=["Settings"])
|
||||
async def get_ai_prompts():
|
||||
"""Get all AI prompts (defaults merged with custom overrides)"""
|
||||
def _get_prompts_with_overrides() -> Dict:
|
||||
"""Get AI prompts with DB overrides applied"""
|
||||
prompts = _get_default_prompts()
|
||||
|
||||
|
||||
try:
|
||||
# Check for custom overrides in DB
|
||||
# Note: Table ai_prompts must rely on migration 066
|
||||
rows = execute_query("SELECT key, prompt_text FROM ai_prompts")
|
||||
if rows:
|
||||
for row in rows:
|
||||
@ -491,14 +490,40 @@ async def get_ai_prompts():
|
||||
prompts[row['key']]['is_custom'] = True
|
||||
except Exception as e:
|
||||
logger.warning(f"Could not load custom ai prompts: {e}")
|
||||
|
||||
|
||||
return prompts
|
||||
|
||||
|
||||
def _get_test_input_for_prompt(key: str) -> str:
|
||||
"""Default test input per prompt type"""
|
||||
examples = {
|
||||
"invoice_extraction": "FAKTURA 2026-1001 fra Demo A/S. CVR 12345678. Total 1.250,00 DKK inkl moms.",
|
||||
"ticket_classification": "Emne: Kan ikke logge på VPN. Beskrivelse: Flere brugere er ramt siden i morges.",
|
||||
"ticket_summary": "Bruger havde netværksfejl. Router genstartet og DNS opdateret. Forbindelse virker nu stabilt.",
|
||||
"kb_generation": "Problem: Outlook åbner ikke. Løsning: Reparer Office installation og nulstil profil.",
|
||||
"troubleshooting_assistant": "Server svarer langsomt efter opdatering. CPU er høj, disk IO er normal.",
|
||||
"sentiment_analysis": "Jeg er meget frustreret, systemet er nede igen og vi mister kunder!",
|
||||
"meeting_action_items": "Peter opdaterer firewall fredag. Anna sender status til kunden mandag.",
|
||||
}
|
||||
return examples.get(key, "Skriv kort: AI test OK")
|
||||
|
||||
|
||||
|
||||
@router.get("/ai-prompts", tags=["Settings"])
|
||||
async def get_ai_prompts():
|
||||
"""Get all AI prompts (defaults merged with custom overrides)"""
|
||||
return _get_prompts_with_overrides()
|
||||
|
||||
|
||||
class PromptUpdate(BaseModel):
|
||||
prompt_text: str
|
||||
|
||||
|
||||
class PromptTestRequest(BaseModel):
|
||||
test_input: Optional[str] = None
|
||||
prompt_text: Optional[str] = None
|
||||
|
||||
|
||||
@router.put("/ai-prompts/{key}", tags=["Settings"])
|
||||
async def update_ai_prompt(key: str, update: PromptUpdate):
|
||||
"""Override a system prompt with a custom one"""
|
||||
@ -533,3 +558,84 @@ async def reset_ai_prompt(key: str):
|
||||
raise HTTPException(status_code=500, detail="Could not reset prompt")
|
||||
|
||||
|
||||
@router.post("/ai-prompts/{key}/test", tags=["Settings"])
|
||||
async def test_ai_prompt(key: str, payload: PromptTestRequest):
|
||||
"""Run a quick AI test for a specific system prompt"""
|
||||
prompts = _get_prompts_with_overrides()
|
||||
if key not in prompts:
|
||||
raise HTTPException(status_code=404, detail="Unknown prompt key")
|
||||
|
||||
prompt_cfg = prompts[key]
|
||||
model = prompt_cfg.get("model") or settings.OLLAMA_MODEL
|
||||
endpoint = prompt_cfg.get("endpoint") or settings.OLLAMA_ENDPOINT
|
||||
prompt_text = (payload.prompt_text or prompt_cfg.get("prompt") or "").strip()
|
||||
if not prompt_text:
|
||||
raise HTTPException(status_code=400, detail="Prompt text is empty")
|
||||
|
||||
test_input = (payload.test_input or _get_test_input_for_prompt(key)).strip()
|
||||
if not test_input:
|
||||
raise HTTPException(status_code=400, detail="Test input is empty")
|
||||
|
||||
start = time.perf_counter()
|
||||
try:
|
||||
use_chat_api = model.startswith("qwen3")
|
||||
|
||||
async with httpx.AsyncClient(timeout=60.0) as client:
|
||||
if use_chat_api:
|
||||
response = await client.post(
|
||||
f"{endpoint}/api/chat",
|
||||
json={
|
||||
"model": model,
|
||||
"messages": [
|
||||
{"role": "system", "content": prompt_text},
|
||||
{"role": "user", "content": test_input},
|
||||
],
|
||||
"stream": False,
|
||||
"options": {"temperature": 0.2, "num_predict": 600},
|
||||
},
|
||||
)
|
||||
else:
|
||||
response = await client.post(
|
||||
f"{endpoint}/api/generate",
|
||||
json={
|
||||
"model": model,
|
||||
"prompt": f"{prompt_text}\n\nBrugerinput:\n{test_input}",
|
||||
"stream": False,
|
||||
"options": {"temperature": 0.2, "num_predict": 600},
|
||||
},
|
||||
)
|
||||
|
||||
if response.status_code != 200:
|
||||
raise HTTPException(
|
||||
status_code=502,
|
||||
detail=f"AI endpoint fejl: {response.status_code} - {response.text[:300]}",
|
||||
)
|
||||
|
||||
data = response.json()
|
||||
if use_chat_api:
|
||||
message_data = data.get("message", {})
|
||||
ai_response = (message_data.get("content") or message_data.get("thinking") or "").strip()
|
||||
else:
|
||||
ai_response = (data.get("response") or "").strip()
|
||||
|
||||
if not ai_response:
|
||||
raise HTTPException(status_code=502, detail="AI returnerede tomt svar")
|
||||
|
||||
latency_ms = int((time.perf_counter() - start) * 1000)
|
||||
return {
|
||||
"ok": True,
|
||||
"key": key,
|
||||
"model": model,
|
||||
"endpoint": endpoint,
|
||||
"test_input": test_input,
|
||||
"ai_response": ai_response,
|
||||
"latency_ms": latency_ms,
|
||||
}
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(f"❌ AI prompt test failed for {key}: {e}")
|
||||
raise HTTPException(status_code=500, detail=f"Kunne ikke teste AI prompt: {str(e)}")
|
||||
|
||||
|
||||
|
||||
@ -2775,6 +2775,9 @@ async function loadAIPrompts() {
|
||||
<button class="btn btn-outline-danger" onclick="resetPrompt('${key}')" title="Nulstil til standard">
|
||||
<i class="bi bi-arrow-counterclockwise"></i> Nulstil
|
||||
</button>` : ''}
|
||||
<button class="btn btn-outline-success" onclick="testPrompt('${key}')" id="testBtn_${key}" title="Test AI prompt">
|
||||
<i class="bi bi-play-circle"></i> Test
|
||||
</button>
|
||||
<button class="btn btn-outline-primary" onclick="editPrompt('${key}')" id="editBtn_${key}" title="Rediger Prompt">
|
||||
<i class="bi bi-pencil"></i> Rediger
|
||||
</button>
|
||||
@ -2788,6 +2791,8 @@ async function loadAIPrompts() {
|
||||
style="max-height: 400px; overflow-y: auto; font-size: 0.85rem; white-space: pre-wrap; border-radius: 0;">${escapeHtml(prompt.prompt)}</pre>
|
||||
<textarea id="edit_prompt_${key}" class="form-control d-none p-3 bg-white text-dark rounded-bottom"
|
||||
style="height: 300px; font-family: monospace; font-size: 0.85rem; border-radius: 0;">${escapeHtml(prompt.prompt)}</textarea>
|
||||
|
||||
<div id="testResult_${key}" class="alert alert-secondary m-3 py-2 px-3 d-none" style="white-space: pre-wrap; font-size: 0.85rem;"></div>
|
||||
|
||||
<div id="editActions_${key}" class="position-absolute bottom-0 end-0 p-3 d-none">
|
||||
<button class="btn btn-sm btn-secondary me-1" onclick="cancelEdit('${key}')">Annuller</button>
|
||||
@ -2882,6 +2887,52 @@ async function resetPrompt(key) {
|
||||
}
|
||||
}
|
||||
|
||||
async function testPrompt(key) {
|
||||
const btn = document.getElementById(`testBtn_${key}`);
|
||||
const resultElement = document.getElementById(`testResult_${key}`);
|
||||
const editElement = document.getElementById(`edit_prompt_${key}`);
|
||||
|
||||
const promptText = editElement ? editElement.value : '';
|
||||
const originalHtml = btn.innerHTML;
|
||||
|
||||
btn.disabled = true;
|
||||
btn.innerHTML = '<span class="spinner-border spinner-border-sm me-1" role="status" aria-hidden="true"></span>Tester';
|
||||
|
||||
resultElement.className = 'alert alert-secondary m-3 py-2 px-3';
|
||||
resultElement.classList.remove('d-none');
|
||||
resultElement.textContent = 'Tester AI...';
|
||||
|
||||
try {
|
||||
const response = await fetch(`/api/v1/ai-prompts/${key}/test`, {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({ prompt_text: promptText })
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
const message = await getErrorMessage(response, 'Kunne ikke teste AI prompt');
|
||||
throw new Error(message);
|
||||
}
|
||||
|
||||
const result = await response.json();
|
||||
const fullResponse = (result.ai_response || '').trim();
|
||||
const preview = fullResponse.length > 1200 ? `${fullResponse.slice(0, 1200)}\n...` : fullResponse;
|
||||
|
||||
resultElement.className = 'alert alert-success m-3 py-2 px-3';
|
||||
resultElement.textContent =
|
||||
`✅ AI svar modtaget (${result.latency_ms} ms)\n` +
|
||||
`Model: ${result.model}\n\n` +
|
||||
`${preview || '[Tomt svar]'}`;
|
||||
} catch (error) {
|
||||
console.error('Error testing AI prompt:', error);
|
||||
resultElement.className = 'alert alert-danger m-3 py-2 px-3';
|
||||
resultElement.textContent = `❌ ${error.message || 'Kunne ikke teste AI prompt'}`;
|
||||
} finally {
|
||||
btn.disabled = false;
|
||||
btn.innerHTML = originalHtml;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
function copyPrompt(key) {
|
||||
|
||||
35
updateto.sh
35
updateto.sh
@ -28,10 +28,10 @@ fi
|
||||
CURRENT_IP=$(hostname -I | awk '{print $1}' 2>/dev/null || echo "unknown")
|
||||
CURRENT_DIR=$(pwd)
|
||||
|
||||
if [[ "$CURRENT_IP" != "172.16.31.183" ]] && [[ "$CURRENT_DIR" != "/srv/podman/bmc_hub_v1.0" ]]; then
|
||||
if [[ "$CURRENT_IP" != "172.16.31.183" ]] && [[ "$CURRENT_DIR" != "/srv/podman/bmc_hub_v2" ]]; then
|
||||
echo "⚠️ ADVARSEL: Dette script skal kun køres på PRODUCTION serveren!"
|
||||
echo " Forventet IP: 172.16.31.183"
|
||||
echo " Forventet mappe: /srv/podman/bmc_hub_v1.0"
|
||||
echo " Forventet mappe: /srv/podman/bmc_hub_v2"
|
||||
echo " Nuværende IP: $CURRENT_IP"
|
||||
echo " Nuværende mappe: $CURRENT_DIR"
|
||||
echo ""
|
||||
@ -87,13 +87,30 @@ podman-compose -f "$PODMAN_COMPOSE_FILE" up -d --build
|
||||
# Sync migrations from container to host
|
||||
echo ""
|
||||
echo "📁 Syncer migrations fra container til host..."
|
||||
if podman cp bmc-hub-api-prod:/app/migrations ./migrations_temp 2>/dev/null; then
|
||||
rm -rf ./migrations
|
||||
mv ./migrations_temp ./migrations
|
||||
chmod -R 755 ./migrations
|
||||
echo "✅ Migrations synced"
|
||||
else
|
||||
echo "⚠️ Warning: Could not sync migrations (container might not be ready yet)"
|
||||
SYNC_OK=false
|
||||
for i in {1..20}; do
|
||||
rm -rf ./migrations_temp
|
||||
if podman cp bmc-hub-api-prod:/app/migrations ./migrations_temp 2>/dev/null; then
|
||||
rm -rf ./migrations
|
||||
mv ./migrations_temp ./migrations
|
||||
chmod -R 755 ./migrations
|
||||
SYNC_OK=true
|
||||
echo "✅ Migrations synced"
|
||||
break
|
||||
fi
|
||||
echo "⏳ Venter på API container for migration sync... ($i/20)"
|
||||
sleep 2
|
||||
done
|
||||
|
||||
if [ "$SYNC_OK" != "true" ]; then
|
||||
echo "❌ Fejl: Kunne ikke sync'e migrations fra bmc-hub-api-prod:/app/migrations"
|
||||
echo " Afbryder for at undgå kørsel af gamle migrations"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ ! -f "./migrations/138_customers_economic_unique_constraint.sql" ]; then
|
||||
echo "❌ Fejl: Forventet migration 138 mangler efter sync"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Wait a bit for startup
|
||||
|
||||
Loading…
Reference in New Issue
Block a user