Remove Ki-Moderator add more backlist
This commit is contained in:
@@ -44,7 +44,14 @@ async function loadBlacklistFromDB() {
|
||||
offensive: [],
|
||||
titles: [],
|
||||
brands: [],
|
||||
inappropriate: []
|
||||
inappropriate: [],
|
||||
racial: [],
|
||||
religious: [],
|
||||
disability: [],
|
||||
leetspeak: [],
|
||||
cyberbullying: [],
|
||||
drugs: [],
|
||||
violence: []
|
||||
};
|
||||
|
||||
// Erstelle neuen Trigram-Index
|
||||
@@ -101,7 +108,14 @@ function getStaticBlacklist() {
|
||||
'sex', 'porn', 'porno', 'fuck', 'shit',
|
||||
'bitch', 'whore', 'prostitute',
|
||||
'drug', 'cocaine', 'heroin', 'marijuana'
|
||||
]
|
||||
],
|
||||
racial: [],
|
||||
religious: [],
|
||||
disability: [],
|
||||
leetspeak: [],
|
||||
cyberbullying: [],
|
||||
drugs: [],
|
||||
violence: []
|
||||
};
|
||||
}
|
||||
|
||||
@@ -219,7 +233,14 @@ function getCategoryReason(category) {
|
||||
offensive: 'Beleidigender oder anstößiger Begriff',
|
||||
titles: 'Titel oder Berufsbezeichnung',
|
||||
brands: 'Markenname',
|
||||
inappropriate: 'Unpassender Begriff'
|
||||
inappropriate: 'Unpassender Begriff',
|
||||
racial: 'Rassistischer oder ethnisch beleidigender Begriff',
|
||||
religious: 'Religiös beleidigender oder blasphemischer Begriff',
|
||||
disability: 'Beleidigender Begriff bezüglich Behinderungen',
|
||||
leetspeak: 'Verschleierter beleidigender Begriff',
|
||||
cyberbullying: 'Cyberbullying oder Online-Belästigung',
|
||||
drugs: 'Drogenbezogener Begriff',
|
||||
violence: 'Gewalt- oder bedrohungsbezogener Begriff'
|
||||
};
|
||||
|
||||
return reasons[category] || 'Unzulässiger Begriff';
|
||||
|
||||
@@ -1,253 +0,0 @@
|
||||
/**
|
||||
* LLM-basierte Blacklist-Prüfung mit Ollama
|
||||
* Verwendet ein lokales LLM zur intelligenten Bewertung von Namen
|
||||
*/
|
||||
|
||||
const axios = require('axios');
|
||||
|
||||
// Ollama-Konfiguration
|
||||
const OLLAMA_BASE_URL = process.env.OLLAMA_BASE_URL || 'http://localhost:11434';
|
||||
const OLLAMA_MODEL = process.env.OLLAMA_MODEL || 'llama3.2:3b'; // Schnelles, kleines Modell
|
||||
|
||||
/**
|
||||
* Prüft einen Namen mit dem LLM
|
||||
* @param {string} firstname - Vorname
|
||||
* @param {string} lastname - Nachname
|
||||
* @returns {Object} - {isBlocked: boolean, reason: string, confidence: number}
|
||||
*/
|
||||
async function checkNameWithLLM(firstname, lastname) {
|
||||
if (!firstname || !lastname) {
|
||||
return { isBlocked: false, reason: '', confidence: 0 };
|
||||
}
|
||||
|
||||
try {
|
||||
const fullName = `${firstname} ${lastname}`;
|
||||
|
||||
// Prompt für das LLM
|
||||
const prompt = `Du bist ein strenger Moderator für ein Spielsystem. Prüfe ob der Name "${fullName}" für die Verwendung geeignet ist.
|
||||
|
||||
WICHTIG: Blockiere ALLE Namen die:
|
||||
- Historisch belastet sind (Adolf Hitler, Stalin, Mussolini, etc.)
|
||||
- Beleidigend oder anstößig sind (Satan, Idiot, etc.)
|
||||
- Unpassende Titel sind (Dr., Professor, etc.)
|
||||
- Markennamen sind (Coca-Cola, Nike, etc.)
|
||||
- Andere unangemessene Inhalte haben
|
||||
|
||||
Antworte NUR mit "TRUE" (blockiert) oder "FALSE" (erlaubt) - keine Erklärungen.
|
||||
|
||||
Name: "${fullName}"
|
||||
Antwort:`;
|
||||
|
||||
// Ollama API-Aufruf
|
||||
const response = await axios.post(`${OLLAMA_BASE_URL}/api/generate`, {
|
||||
model: OLLAMA_MODEL,
|
||||
prompt: prompt,
|
||||
stream: false,
|
||||
options: {
|
||||
temperature: 0.1, // Niedrige Temperatur für konsistente Antworten
|
||||
top_p: 0.9,
|
||||
max_tokens: 10 // Nur TRUE/FALSE erwartet
|
||||
}
|
||||
}, {
|
||||
timeout: 10000 // 10 Sekunden Timeout
|
||||
});
|
||||
|
||||
const llmResponse = response.data.response.trim().toUpperCase();
|
||||
|
||||
// Parse LLM-Antwort
|
||||
let isBlocked = false;
|
||||
let reason = '';
|
||||
let confidence = 0.8; // Standard-Konfidenz für LLM
|
||||
|
||||
if (llmResponse === 'TRUE') {
|
||||
isBlocked = true;
|
||||
reason = 'Name wurde vom KI-Moderator als ungeeignet eingestuft';
|
||||
} else if (llmResponse === 'FALSE') {
|
||||
isBlocked = false;
|
||||
reason = 'Name wurde vom KI-Moderator als geeignet eingestuft';
|
||||
} else {
|
||||
// Fallback bei unerwarteter Antwort
|
||||
console.warn(`Unerwartete LLM-Antwort: "${llmResponse}" für Name: "${fullName}"`);
|
||||
isBlocked = false;
|
||||
reason = 'KI-Moderator konnte Name nicht eindeutig bewerten';
|
||||
confidence = 0.3;
|
||||
}
|
||||
|
||||
return {
|
||||
isBlocked,
|
||||
reason,
|
||||
confidence,
|
||||
llmResponse: llmResponse,
|
||||
matchType: 'llm'
|
||||
};
|
||||
|
||||
} catch (error) {
|
||||
console.error('Error checking name with LLM:', error);
|
||||
|
||||
// Fallback bei LLM-Fehlern
|
||||
return {
|
||||
isBlocked: false,
|
||||
reason: 'KI-Moderator nicht verfügbar - Name wurde erlaubt',
|
||||
confidence: 0.1,
|
||||
error: error.message,
|
||||
matchType: 'llm-error'
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Testet die LLM-Verbindung
|
||||
* @returns {Object} - {connected: boolean, model: string, error?: string}
|
||||
*/
|
||||
async function testLLMConnection() {
|
||||
try {
|
||||
const response = await axios.post(`${OLLAMA_BASE_URL}/api/generate`, {
|
||||
model: OLLAMA_MODEL,
|
||||
prompt: 'Test',
|
||||
stream: false,
|
||||
options: {
|
||||
max_tokens: 1
|
||||
}
|
||||
}, {
|
||||
timeout: 5000
|
||||
});
|
||||
|
||||
return {
|
||||
connected: true,
|
||||
model: OLLAMA_MODEL,
|
||||
baseUrl: OLLAMA_BASE_URL
|
||||
};
|
||||
} catch (error) {
|
||||
return {
|
||||
connected: false,
|
||||
model: OLLAMA_MODEL,
|
||||
baseUrl: OLLAMA_BASE_URL,
|
||||
error: error.message
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Erweiterte LLM-Prüfung mit Kontext
|
||||
* @param {string} firstname - Vorname
|
||||
* @param {string} lastname - Nachname
|
||||
* @param {string} context - Zusätzlicher Kontext (optional)
|
||||
* @returns {Object} - Prüfungsergebnis
|
||||
*/
|
||||
async function checkNameWithContext(firstname, lastname, context = '') {
|
||||
if (!firstname || !lastname) {
|
||||
return { isBlocked: false, reason: '', confidence: 0 };
|
||||
}
|
||||
|
||||
try {
|
||||
const fullName = `${firstname} ${lastname}`;
|
||||
|
||||
// Erweiterter Prompt mit Kontext
|
||||
const prompt = `Du bist ein Moderator für ein Spielsystem. Prüfe ob der Name "${fullName}" für die Verwendung geeignet ist.
|
||||
|
||||
Kontext: ${context || 'Standard-Spielname'}
|
||||
|
||||
Beurteile den Namen basierend auf:
|
||||
- Historisch belastete Namen (z.B. Adolf Hitler, Stalin, etc.)
|
||||
- Beleidigende oder anstößige Begriffe
|
||||
- Unpassende Titel oder Berufsbezeichnungen
|
||||
- Markennamen die nicht verwendet werden sollten
|
||||
- Andere unangemessene Inhalte
|
||||
|
||||
Antworte NUR mit "TRUE" oder "FALSE" - keine Erklärungen.
|
||||
|
||||
Name: "${fullName}"
|
||||
Antwort:`;
|
||||
|
||||
const response = await axios.post(`${OLLAMA_BASE_URL}/api/generate`, {
|
||||
model: OLLAMA_MODEL,
|
||||
prompt: prompt,
|
||||
stream: false,
|
||||
options: {
|
||||
temperature: 0.1,
|
||||
top_p: 0.9,
|
||||
max_tokens: 10
|
||||
}
|
||||
}, {
|
||||
timeout: 10000
|
||||
});
|
||||
|
||||
const llmResponse = response.data.response.trim().toUpperCase();
|
||||
|
||||
let isBlocked = false;
|
||||
let reason = '';
|
||||
let confidence = 0.8;
|
||||
|
||||
if (llmResponse === 'TRUE') {
|
||||
isBlocked = true;
|
||||
reason = 'Name wurde vom KI-Moderator als ungeeignet eingestuft';
|
||||
} else if (llmResponse === 'FALSE') {
|
||||
isBlocked = false;
|
||||
reason = 'Name wurde vom KI-Moderator als geeignet eingestuft';
|
||||
} else {
|
||||
console.warn(`Unerwartete LLM-Antwort: "${llmResponse}" für Name: "${fullName}"`);
|
||||
isBlocked = false;
|
||||
reason = 'KI-Moderator konnte Name nicht eindeutig bewerten';
|
||||
confidence = 0.3;
|
||||
}
|
||||
|
||||
return {
|
||||
isBlocked,
|
||||
reason,
|
||||
confidence,
|
||||
llmResponse: llmResponse,
|
||||
matchType: 'llm-context',
|
||||
context: context
|
||||
};
|
||||
|
||||
} catch (error) {
|
||||
console.error('Error checking name with LLM context:', error);
|
||||
|
||||
return {
|
||||
isBlocked: false,
|
||||
reason: 'KI-Moderator nicht verfügbar - Name wurde erlaubt',
|
||||
confidence: 0.1,
|
||||
error: error.message,
|
||||
matchType: 'llm-error'
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Batch-Prüfung mehrerer Namen
|
||||
* @param {Array} names - Array von {firstname, lastname} Objekten
|
||||
* @returns {Array} - Array von Prüfungsergebnissen
|
||||
*/
|
||||
async function checkNamesBatch(names) {
|
||||
const results = [];
|
||||
|
||||
for (const name of names) {
|
||||
try {
|
||||
const result = await checkNameWithLLM(name.firstname, name.lastname);
|
||||
results.push({
|
||||
...name,
|
||||
...result
|
||||
});
|
||||
} catch (error) {
|
||||
results.push({
|
||||
...name,
|
||||
isBlocked: false,
|
||||
reason: 'Fehler bei der Prüfung',
|
||||
confidence: 0,
|
||||
error: error.message,
|
||||
matchType: 'error'
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
return results;
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
checkNameWithLLM,
|
||||
checkNameWithContext,
|
||||
checkNamesBatch,
|
||||
testLLMConnection,
|
||||
OLLAMA_BASE_URL,
|
||||
OLLAMA_MODEL
|
||||
};
|
||||
@@ -106,12 +106,6 @@
|
||||
<button class="btn" onclick="showBlacklistManagement()">Blacklist verwalten</button>
|
||||
</div>
|
||||
|
||||
<!-- KI-Moderator -->
|
||||
<div class="card">
|
||||
<h3><span class="icon">🧠</span> KI-Moderator</h3>
|
||||
<p>Intelligente Namensprüfung mit Ollama LLM</p>
|
||||
<button class="btn" onclick="showLLMManagement()">KI-Moderator testen</button>
|
||||
</div>
|
||||
|
||||
<!-- Läufe-Verwaltung -->
|
||||
<div class="card">
|
||||
@@ -211,6 +205,13 @@
|
||||
<option value="titles">Titel/Berufsbezeichnung</option>
|
||||
<option value="brands">Markenname</option>
|
||||
<option value="inappropriate">Unpassend</option>
|
||||
<option value="racial">Rassistisch/ethnisch</option>
|
||||
<option value="religious">Religiös beleidigend</option>
|
||||
<option value="disability">Behinderungsbezogen</option>
|
||||
<option value="leetspeak">Verschleiert</option>
|
||||
<option value="cyberbullying">Cyberbullying</option>
|
||||
<option value="drugs">Drogenbezogen</option>
|
||||
<option value="violence">Gewalt/Bedrohung</option>
|
||||
</select>
|
||||
<button class="btn btn-success" onclick="addToBlacklist()">Hinzufügen</button>
|
||||
</div>
|
||||
@@ -237,49 +238,6 @@
|
||||
</div>
|
||||
</footer>
|
||||
|
||||
<!-- LLM-Moderator Modal -->
|
||||
<div id="llmModal" class="modal">
|
||||
<div class="modal-content">
|
||||
<span class="close" onclick="closeLLMModal()">×</span>
|
||||
<h2>🧠 KI-Moderator</h2>
|
||||
|
||||
<!-- LLM-Status -->
|
||||
<div id="llmStatus" class="llm-status-section">
|
||||
<h3>Status</h3>
|
||||
<div id="llmStatusContent">Lade...</div>
|
||||
</div>
|
||||
|
||||
<!-- Name testen -->
|
||||
<div class="llm-test-section">
|
||||
<h3>Name testen</h3>
|
||||
<div class="form-group">
|
||||
<label for="llmFirstname">Vorname:</label>
|
||||
<input type="text" id="llmFirstname" placeholder="Vorname eingeben">
|
||||
</div>
|
||||
<div class="form-group">
|
||||
<label for="llmLastname">Nachname:</label>
|
||||
<input type="text" id="llmLastname" placeholder="Nachname eingeben">
|
||||
</div>
|
||||
<div class="form-group">
|
||||
<label for="llmContext">Kontext (optional):</label>
|
||||
<input type="text" id="llmContext" placeholder="Zusätzlicher Kontext">
|
||||
</div>
|
||||
<button onclick="testNameWithLLM()" class="btn btn-primary">Mit KI prüfen</button>
|
||||
</div>
|
||||
|
||||
<!-- Ergebnis -->
|
||||
<div id="llmResult" class="llm-result-section" style="display: none;">
|
||||
<h3>Ergebnis</h3>
|
||||
<div id="llmResultContent"></div>
|
||||
</div>
|
||||
|
||||
<!-- Vergleich mit Blacklist -->
|
||||
<div id="llmComparison" class="llm-comparison-section" style="display: none;">
|
||||
<h3>Vergleich mit Blacklist</h3>
|
||||
<div id="llmComparisonContent"></div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Application JavaScript -->
|
||||
<script src="/js/cookie-consent.js"></script>
|
||||
|
||||
@@ -893,7 +893,14 @@ function getCategoryIcon(category) {
|
||||
offensive: '⚠️',
|
||||
titles: '👑',
|
||||
brands: '🏷️',
|
||||
inappropriate: '🚫'
|
||||
inappropriate: '🚫',
|
||||
racial: '🌍',
|
||||
religious: '⛪',
|
||||
disability: '♿',
|
||||
leetspeak: '🔤',
|
||||
cyberbullying: '💻',
|
||||
drugs: '💊',
|
||||
violence: '⚔️'
|
||||
};
|
||||
return icons[category] || '📝';
|
||||
}
|
||||
@@ -904,7 +911,14 @@ function getCategoryDisplayName(category) {
|
||||
offensive: 'Beleidigend/anstößig',
|
||||
titles: 'Titel/Berufsbezeichnung',
|
||||
brands: 'Markenname',
|
||||
inappropriate: 'Unpassend'
|
||||
inappropriate: 'Unpassend',
|
||||
racial: 'Rassistisch/ethnisch',
|
||||
religious: 'Religiös beleidigend',
|
||||
disability: 'Behinderungsbezogen',
|
||||
leetspeak: 'Verschleiert',
|
||||
cyberbullying: 'Cyberbullying',
|
||||
drugs: 'Drogenbezogen',
|
||||
violence: 'Gewalt/Bedrohung'
|
||||
};
|
||||
return names[category] || category;
|
||||
}
|
||||
@@ -1103,196 +1117,7 @@ function displayBlacklistStats(stats) {
|
||||
statsDiv.innerHTML = html;
|
||||
}
|
||||
|
||||
// LLM-Management
|
||||
function showLLMManagement() {
|
||||
document.getElementById('llmModal').style.display = 'block';
|
||||
loadLLMStatus();
|
||||
}
|
||||
|
||||
function closeLLMModal() {
|
||||
document.getElementById('llmModal').style.display = 'none';
|
||||
}
|
||||
|
||||
// LLM-Status laden
|
||||
async function loadLLMStatus() {
|
||||
try {
|
||||
const response = await fetch('/api/v1/admin/llm/status', {
|
||||
headers: {
|
||||
'Authorization': `Bearer ${localStorage.getItem('adminToken')}`
|
||||
}
|
||||
});
|
||||
|
||||
const result = await response.json();
|
||||
const statusContent = document.getElementById('llmStatusContent');
|
||||
|
||||
if (result.success) {
|
||||
const status = result.data;
|
||||
if (status.connected) {
|
||||
statusContent.innerHTML = `
|
||||
<div style="background: #e8f5e8; color: #2e7d32; padding: 0.5rem; border-radius: 3px;">
|
||||
<strong>✅ KI-Moderator verbunden</strong><br>
|
||||
Modell: ${status.model}<br>
|
||||
URL: ${status.baseUrl}
|
||||
</div>
|
||||
`;
|
||||
} else {
|
||||
statusContent.innerHTML = `
|
||||
<div style="background: #ffebee; color: #c62828; padding: 0.5rem; border-radius: 3px;">
|
||||
<strong>❌ KI-Moderator nicht verfügbar</strong><br>
|
||||
Modell: ${status.model}<br>
|
||||
URL: ${status.baseUrl}<br>
|
||||
Fehler: ${status.error}
|
||||
</div>
|
||||
`;
|
||||
}
|
||||
} else {
|
||||
statusContent.innerHTML = `
|
||||
<div style="background: #ffebee; color: #c62828; padding: 0.5rem; border-radius: 3px;">
|
||||
<strong>❌ Fehler beim Laden des Status</strong><br>
|
||||
${result.message}
|
||||
</div>
|
||||
`;
|
||||
}
|
||||
} catch (error) {
|
||||
console.error('Error loading LLM status:', error);
|
||||
document.getElementById('llmStatusContent').innerHTML = `
|
||||
<div style="background: #ffebee; color: #c62828; padding: 0.5rem; border-radius: 3px;">
|
||||
<strong>❌ Fehler beim Laden des Status</strong><br>
|
||||
${error.message}
|
||||
</div>
|
||||
`;
|
||||
}
|
||||
}
|
||||
|
||||
// Name mit LLM testen
|
||||
async function testNameWithLLM() {
|
||||
const firstname = document.getElementById('llmFirstname').value.trim();
|
||||
const lastname = document.getElementById('llmLastname').value.trim();
|
||||
const context = document.getElementById('llmContext').value.trim();
|
||||
|
||||
if (!firstname || !lastname) {
|
||||
showLLMMessage('Bitte gib Vorname und Nachname ein', 'error');
|
||||
return;
|
||||
}
|
||||
|
||||
try {
|
||||
// LLM-Test
|
||||
const llmResponse = await fetch('/api/v1/admin/llm/test', {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
'Authorization': `Bearer ${localStorage.getItem('adminToken')}`
|
||||
},
|
||||
body: JSON.stringify({ firstname, lastname, context })
|
||||
});
|
||||
|
||||
const llmResult = await llmResponse.json();
|
||||
|
||||
// Blacklist-Test zum Vergleich
|
||||
const blacklistResponse = await fetch('/api/v1/admin/blacklist/test', {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
'Authorization': `Bearer ${localStorage.getItem('adminToken')}`
|
||||
},
|
||||
body: JSON.stringify({ firstname, lastname })
|
||||
});
|
||||
|
||||
const blacklistResult = await blacklistResponse.json();
|
||||
|
||||
// Ergebnisse anzeigen
|
||||
displayLLMResults(llmResult, blacklistResult);
|
||||
|
||||
} catch (error) {
|
||||
console.error('Error testing name with LLM:', error);
|
||||
showLLMMessage('Fehler beim Testen: ' + error.message, 'error');
|
||||
}
|
||||
}
|
||||
|
||||
// LLM-Ergebnisse anzeigen
|
||||
function displayLLMResults(llmResult, blacklistResult) {
|
||||
const resultDiv = document.getElementById('llmResult');
|
||||
const resultContent = document.getElementById('llmResultContent');
|
||||
const comparisonDiv = document.getElementById('llmComparison');
|
||||
const comparisonContent = document.getElementById('llmComparisonContent');
|
||||
|
||||
if (llmResult.success) {
|
||||
const llm = llmResult.data;
|
||||
|
||||
let llmStatus = '';
|
||||
if (llm.isBlocked) {
|
||||
llmStatus = `
|
||||
<div style="background: #ffebee; color: #c62828; padding: 0.5rem; border-radius: 3px;">
|
||||
<strong>❌ Name blockiert</strong><br>
|
||||
Grund: ${llm.reason}<br>
|
||||
Konfidenz: ${Math.round(llm.confidence * 100)}%<br>
|
||||
LLM-Antwort: "${llm.llmResponse}"<br>
|
||||
Typ: ${llm.matchType}
|
||||
</div>
|
||||
`;
|
||||
} else {
|
||||
llmStatus = `
|
||||
<div style="background: #e8f5e8; color: #2e7d32; padding: 0.5rem; border-radius: 3px;">
|
||||
<strong>✅ Name erlaubt</strong><br>
|
||||
Grund: ${llm.reason}<br>
|
||||
Konfidenz: ${Math.round(llm.confidence * 100)}%<br>
|
||||
LLM-Antwort: "${llm.llmResponse}"<br>
|
||||
Typ: ${llm.matchType}
|
||||
</div>
|
||||
`;
|
||||
}
|
||||
|
||||
resultContent.innerHTML = llmStatus;
|
||||
resultDiv.style.display = 'block';
|
||||
|
||||
// Vergleich mit Blacklist
|
||||
if (blacklistResult.success) {
|
||||
const blacklist = blacklistResult.data;
|
||||
let comparisonStatus = '';
|
||||
|
||||
if (blacklist.combined.isBlocked) {
|
||||
comparisonStatus = `
|
||||
<div style="background: #fff3e0; color: #f57c00; padding: 0.5rem; border-radius: 3px;">
|
||||
<strong>⚠️ Name blockiert (${blacklist.combined.source})</strong><br>
|
||||
Grund: ${blacklist.combined.reason}
|
||||
</div>
|
||||
`;
|
||||
} else {
|
||||
comparisonStatus = `
|
||||
<div style="background: #e8f5e8; color: #2e7d32; padding: 0.5rem; border-radius: 3px;">
|
||||
<strong>✅ Name erlaubt</strong><br>
|
||||
Sowohl KI als auch Blacklist erlauben den Namen
|
||||
</div>
|
||||
`;
|
||||
}
|
||||
|
||||
comparisonContent.innerHTML = comparisonStatus;
|
||||
comparisonDiv.style.display = 'block';
|
||||
}
|
||||
|
||||
} else {
|
||||
resultContent.innerHTML = `
|
||||
<div style="background: #ffebee; color: #c62828; padding: 0.5rem; border-radius: 3px;">
|
||||
<strong>❌ Fehler beim Testen</strong><br>
|
||||
${llmResult.message}
|
||||
</div>
|
||||
`;
|
||||
resultDiv.style.display = 'block';
|
||||
}
|
||||
}
|
||||
|
||||
// LLM-Nachricht anzeigen
|
||||
function showLLMMessage(message, type) {
|
||||
const resultDiv = document.getElementById('llmResult');
|
||||
const resultContent = document.getElementById('llmResultContent');
|
||||
|
||||
const color = type === 'error' ? '#c62828' : '#2e7d32';
|
||||
const bgColor = type === 'error' ? '#ffebee' : '#e8f5e8';
|
||||
|
||||
resultContent.innerHTML = `
|
||||
<div style="background: ${bgColor}; color: ${color}; padding: 0.5rem; border-radius: 3px;">
|
||||
<strong>${type === 'error' ? '❌' : '✅'} ${message}</strong>
|
||||
</div>
|
||||
`;
|
||||
resultDiv.style.display = 'block';
|
||||
}
|
||||
|
||||
@@ -1019,7 +1019,6 @@ router.post('/v1/private/users/find', requireApiKey, async (req, res) => {
|
||||
|
||||
// Import blacklist module
|
||||
const { checkNameAgainstBlacklist, addToBlacklist, removeFromBlacklist, getBlacklist } = require('../config/blacklist-db');
|
||||
const { checkNameWithLLM, checkNameWithContext, testLLMConnection } = require('../config/llm-blacklist');
|
||||
|
||||
// Create new player with RFID and blacklist validation (no auth required for dashboard)
|
||||
router.post('/v1/public/players/create-with-rfid', async (req, res) => {
|
||||
@@ -1034,27 +1033,13 @@ router.post('/v1/public/players/create-with-rfid', async (req, res) => {
|
||||
}
|
||||
|
||||
try {
|
||||
// LLM-basierte Blacklist-Prüfung
|
||||
const llmCheck = await checkNameWithLLM(firstname, lastname);
|
||||
if (llmCheck.isBlocked) {
|
||||
return res.status(400).json({
|
||||
success: false,
|
||||
message: `Name nicht zulässig: ${llmCheck.reason}`,
|
||||
details: llmCheck
|
||||
});
|
||||
}
|
||||
|
||||
// Fallback: Traditionelle Blacklist-Prüfung (optional)
|
||||
// Blacklist-Prüfung mit Levenshtein-Algorithmus
|
||||
const blacklistCheck = await checkNameAgainstBlacklist(firstname, lastname);
|
||||
if (blacklistCheck.isBlocked) {
|
||||
return res.status(400).json({
|
||||
success: false,
|
||||
message: `Name nicht zulässig: ${blacklistCheck.reason}`,
|
||||
details: {
|
||||
reason: blacklistCheck.reason,
|
||||
category: blacklistCheck.category,
|
||||
matchedTerm: blacklistCheck.matchedTerm
|
||||
}
|
||||
details: blacklistCheck
|
||||
});
|
||||
}
|
||||
|
||||
@@ -1618,22 +1603,16 @@ router.post('/v1/admin/blacklist/test', requireAdminAuth, async (req, res) => {
|
||||
}
|
||||
|
||||
try {
|
||||
// LLM-Prüfung
|
||||
const llmResult = await checkNameWithLLM(firstname, lastname);
|
||||
|
||||
// Traditionelle Blacklist-Prüfung
|
||||
// Blacklist-Prüfung mit Levenshtein-Algorithmus
|
||||
const blacklistResult = await checkNameAgainstBlacklist(firstname, lastname);
|
||||
|
||||
res.json({
|
||||
success: true,
|
||||
data: {
|
||||
llm: llmResult,
|
||||
blacklist: blacklistResult,
|
||||
combined: {
|
||||
isBlocked: llmResult.isBlocked || blacklistResult.isBlocked,
|
||||
reason: llmResult.isBlocked ? llmResult.reason : blacklistResult.reason,
|
||||
source: llmResult.isBlocked ? 'llm' : (blacklistResult.isBlocked ? 'blacklist' : 'none')
|
||||
}
|
||||
isBlocked: blacklistResult.isBlocked,
|
||||
reason: blacklistResult.reason,
|
||||
source: blacklistResult.isBlocked ? 'blacklist' : 'none'
|
||||
}
|
||||
});
|
||||
} catch (error) {
|
||||
|
||||
Reference in New Issue
Block a user