615 lines
28 KiB
Python
615 lines
28 KiB
Python
"""Router Audit Complet — import JSON, liste, detail, carte flux, carte applicative"""
|
|
import json
|
|
from fastapi import APIRouter, Request, Depends, UploadFile, File
|
|
from fastapi.responses import HTMLResponse, RedirectResponse, StreamingResponse
|
|
from fastapi.templating import Jinja2Templates
|
|
from sqlalchemy import text
|
|
from ..dependencies import get_db, get_current_user, get_user_perms, can_view, base_context
|
|
from ..services.server_audit_full_service import (
|
|
import_json_report, get_latest_audits, get_audit_detail,
|
|
get_flow_map, get_flow_map_for_server, get_app_map,
|
|
)
|
|
from ..config import APP_NAME
|
|
|
|
router = APIRouter()
|
|
templates = Jinja2Templates(directory="app/templates")
|
|
|
|
|
|
@router.get("/audit-full", response_class=HTMLResponse)
|
|
async def audit_full_list(request: Request, db=Depends(get_db)):
|
|
user = get_current_user(request)
|
|
if not user:
|
|
return RedirectResponse(url="/login")
|
|
perms = get_user_perms(db, user)
|
|
if not can_view(perms, "audit"):
|
|
return RedirectResponse(url="/dashboard")
|
|
|
|
filtre = request.query_params.get("filter", "")
|
|
search = request.query_params.get("q", "").strip()
|
|
domain = request.query_params.get("domain", "")
|
|
page = int(request.query_params.get("page", "1"))
|
|
per_page = 20
|
|
|
|
# KPIs (toujours sur tout le jeu)
|
|
kpis = db.execute(text("""
|
|
SELECT
|
|
COUNT(*) as total,
|
|
COUNT(*) FILTER (WHERE reboot_required = true) as needs_reboot,
|
|
COUNT(*) FILTER (WHERE EXISTS (
|
|
SELECT 1 FROM jsonb_array_elements(disk_usage) d
|
|
WHERE (d->>'pct')::int >= 90
|
|
)) as disk_critical,
|
|
COUNT(*) FILTER (WHERE EXISTS (
|
|
SELECT 1 FROM jsonb_array_elements(disk_usage) d
|
|
WHERE (d->>'pct')::int >= 80 AND (d->>'pct')::int < 90
|
|
)) as disk_warning,
|
|
COUNT(*) FILTER (WHERE
|
|
uptime LIKE '%month%' OR uptime LIKE '%year%'
|
|
OR (uptime LIKE '%week%' AND (
|
|
CASE WHEN uptime ~ '(\d+) week' THEN (substring(uptime from '(\d+) week'))::int ELSE 0 END >= 17
|
|
))
|
|
) as uptime_long,
|
|
COUNT(*) FILTER (WHERE services::text ~* 'postgres') as app_postgres,
|
|
COUNT(*) FILTER (WHERE services::text ~* 'mariadb|mysqld') as app_mariadb,
|
|
COUNT(*) FILTER (WHERE services::text ~* 'hdb|sapstart|HANA') as app_hana,
|
|
COUNT(*) FILTER (WHERE services::text ~* 'oracle|ora_pmon' OR processes::text ~* 'ora_pmon|oracle') as app_oracle,
|
|
COUNT(*) FILTER (WHERE services::text ~* '"httpd"' OR listen_ports::text ~* '"httpd"') as app_httpd,
|
|
COUNT(*) FILTER (WHERE services::text ~* '"nginx"' OR listen_ports::text ~* '"nginx"') as app_nginx,
|
|
COUNT(*) FILTER (WHERE services::text ~* 'haproxy') as app_haproxy,
|
|
COUNT(*) FILTER (WHERE services::text ~* 'tomcat' OR processes::text ~* 'tomcat|catalina') as app_tomcat,
|
|
COUNT(*) FILTER (WHERE listen_ports::text ~* '"node"' OR processes::text ~* '/applis.*node') as app_nodejs,
|
|
COUNT(*) FILTER (WHERE services::text ~* 'redis') as app_redis,
|
|
COUNT(*) FILTER (WHERE services::text ~* 'mongod') as app_mongodb,
|
|
COUNT(*) FILTER (WHERE services::text ~* 'elasticsearch|opensearch') as app_elastic,
|
|
COUNT(*) FILTER (WHERE services::text ~* 'docker|podman' OR processes::text ~* 'dockerd|podman') as app_container,
|
|
COUNT(*) FILTER (WHERE listen_ports::text ~* '"java"' OR processes::text ~* '\.jar') as app_java
|
|
FROM server_audit_full
|
|
WHERE status IN ('ok','partial')
|
|
AND id IN (SELECT DISTINCT ON (hostname) id FROM server_audit_full WHERE status IN ('ok','partial') ORDER BY hostname, audit_date DESC)
|
|
""")).fetchone()
|
|
|
|
# Domaines + zones pour le filtre
|
|
all_domains = db.execute(text(
|
|
"SELECT code, name, 'domain' as type FROM domains ORDER BY name"
|
|
)).fetchall()
|
|
all_zones = db.execute(text(
|
|
"SELECT name as code, name, 'zone' as type FROM zones ORDER BY name"
|
|
)).fetchall()
|
|
|
|
# Requete avec filtres
|
|
audits = get_latest_audits(db, limit=9999)
|
|
|
|
# Filtre KPI
|
|
if filtre == "reboot":
|
|
audits = [a for a in audits if a.reboot_required]
|
|
elif filtre == "disk_critical":
|
|
ids = {r.id for r in db.execute(text("""
|
|
SELECT saf.id FROM server_audit_full saf
|
|
WHERE saf.status IN ('ok','partial') AND EXISTS (
|
|
SELECT 1 FROM jsonb_array_elements(saf.disk_usage) d WHERE (d->>'pct')::int >= 90
|
|
) AND saf.id IN (SELECT DISTINCT ON (hostname) id FROM server_audit_full WHERE status IN ('ok','partial') ORDER BY hostname, audit_date DESC)
|
|
""")).fetchall()}
|
|
audits = [a for a in audits if a.id in ids]
|
|
elif filtre == "disk_warning":
|
|
ids = {r.id for r in db.execute(text("""
|
|
SELECT saf.id FROM server_audit_full saf
|
|
WHERE saf.status IN ('ok','partial') AND EXISTS (
|
|
SELECT 1 FROM jsonb_array_elements(saf.disk_usage) d WHERE (d->>'pct')::int >= 80
|
|
) AND saf.id IN (SELECT DISTINCT ON (hostname) id FROM server_audit_full WHERE status IN ('ok','partial') ORDER BY hostname, audit_date DESC)
|
|
""")).fetchall()}
|
|
audits = [a for a in audits if a.id in ids]
|
|
elif filtre == "uptime":
|
|
audits = [a for a in audits if a.uptime and ("month" in a.uptime or "year" in a.uptime)]
|
|
elif filtre and filtre.startswith("app_"):
|
|
# Filtre applicatif generique
|
|
app_patterns = {
|
|
"app_postgres": "postgres",
|
|
"app_mariadb": "mariadb|mysqld",
|
|
"app_hana": "hdb|sapstart|HANA",
|
|
"app_oracle": "ora_pmon|oracle",
|
|
"app_httpd": "httpd",
|
|
"app_nginx": "nginx",
|
|
"app_haproxy": "haproxy",
|
|
"app_tomcat": "tomcat|catalina",
|
|
"app_nodejs": "node",
|
|
"app_redis": "redis",
|
|
"app_mongodb": "mongod",
|
|
"app_elastic": "elasticsearch|opensearch",
|
|
"app_container": "docker|podman",
|
|
"app_java": "java|\\.jar",
|
|
}
|
|
pattern = app_patterns.get(filtre, "")
|
|
if pattern:
|
|
ids = {r.id for r in db.execute(text("""
|
|
SELECT id FROM server_audit_full
|
|
WHERE status IN ('ok','partial')
|
|
AND (services::text ~* :pat OR listen_ports::text ~* :pat OR processes::text ~* :pat)
|
|
AND id IN (SELECT DISTINCT ON (hostname) id FROM server_audit_full WHERE status IN ('ok','partial') ORDER BY hostname, audit_date DESC)
|
|
"""), {"pat": pattern}).fetchall()}
|
|
audits = [a for a in audits if a.id in ids]
|
|
|
|
# Filtre domaine ou zone
|
|
if domain:
|
|
# D'abord chercher comme zone
|
|
zone_servers = {r.hostname for r in db.execute(text("""
|
|
SELECT s.hostname FROM servers s
|
|
JOIN zones z ON s.zone_id = z.id
|
|
WHERE z.name = :name
|
|
"""), {"name": domain}).fetchall()}
|
|
if zone_servers:
|
|
audits = [a for a in audits if a.hostname in zone_servers]
|
|
else:
|
|
# Sinon chercher comme domaine
|
|
domain_servers = {r.hostname for r in db.execute(text("""
|
|
SELECT s.hostname FROM servers s
|
|
JOIN domain_environments de ON s.domain_env_id = de.id
|
|
JOIN domains d ON de.domain_id = d.id
|
|
WHERE d.code = :dc
|
|
"""), {"dc": domain}).fetchall()}
|
|
audits = [a for a in audits if a.hostname in domain_servers]
|
|
|
|
# Recherche hostname
|
|
if search:
|
|
q = search.lower()
|
|
audits = [a for a in audits if q in a.hostname.lower()]
|
|
|
|
# Tri
|
|
sort = request.query_params.get("sort", "hostname")
|
|
sort_dir = request.query_params.get("dir", "asc")
|
|
if sort == "hostname":
|
|
audits.sort(key=lambda a: a.hostname.lower(), reverse=(sort_dir == "desc"))
|
|
elif sort == "uptime":
|
|
def uptime_days(a):
|
|
u = a.uptime or ""
|
|
d = 0
|
|
import re as _re
|
|
m = _re.search(r"(\d+) year", u)
|
|
if m: d += int(m.group(1)) * 365
|
|
m = _re.search(r"(\d+) month", u)
|
|
if m: d += int(m.group(1)) * 30
|
|
m = _re.search(r"(\d+) week", u)
|
|
if m: d += int(m.group(1)) * 7
|
|
m = _re.search(r"(\d+) day", u)
|
|
if m: d += int(m.group(1))
|
|
return d
|
|
audits.sort(key=uptime_days, reverse=(sort_dir == "desc"))
|
|
elif sort == "reboot":
|
|
audits.sort(key=lambda a: (1 if a.reboot_required else 0), reverse=(sort_dir == "desc"))
|
|
elif sort == "patch":
|
|
def patch_sort_key(a):
|
|
if a.last_patch_date:
|
|
return a.last_patch_date
|
|
elif a.last_patch_year and a.last_patch_week:
|
|
return f"{a.last_patch_year}-{a.last_patch_week}"
|
|
return ""
|
|
audits.sort(key=patch_sort_key, reverse=(sort_dir == "desc"))
|
|
|
|
# Pagination
|
|
total_filtered = len(audits)
|
|
total_pages = max(1, (total_filtered + per_page - 1) // per_page)
|
|
page = max(1, min(page, total_pages))
|
|
audits_page = audits[(page - 1) * per_page : page * per_page]
|
|
|
|
ctx = base_context(request, db, user)
|
|
ctx.update({
|
|
"app_name": APP_NAME, "audits": audits_page, "kpis": kpis,
|
|
"filter": filtre, "search": search, "domain": domain,
|
|
"all_domains": all_domains, "all_zones": all_zones,
|
|
"sort": sort, "sort_dir": sort_dir,
|
|
"page": page, "total_pages": total_pages, "total_filtered": total_filtered,
|
|
"msg": request.query_params.get("msg"),
|
|
})
|
|
return templates.TemplateResponse("audit_full_list.html", ctx)
|
|
|
|
|
|
@router.post("/audit-full/import")
|
|
async def audit_full_import(request: Request, db=Depends(get_db),
|
|
file: UploadFile = File(...)):
|
|
user = get_current_user(request)
|
|
if not user:
|
|
return RedirectResponse(url="/login")
|
|
|
|
try:
|
|
content = await file.read()
|
|
json_data = json.loads(content.decode("utf-8-sig"))
|
|
imported, errors = import_json_report(db, json_data)
|
|
return RedirectResponse(
|
|
url=f"/audit-full?msg=imported_{imported}_{errors}",
|
|
status_code=303,
|
|
)
|
|
except Exception as e:
|
|
return RedirectResponse(
|
|
url=f"/audit-full?msg=error_{str(e)[:50]}",
|
|
status_code=303,
|
|
)
|
|
|
|
|
|
@router.get("/audit-full/patching", response_class=HTMLResponse)
|
|
async def audit_full_patching(request: Request, db=Depends(get_db)):
|
|
user = get_current_user(request)
|
|
if not user:
|
|
return RedirectResponse(url="/login")
|
|
|
|
year = int(request.query_params.get("year", "2026"))
|
|
search = request.query_params.get("q", "").strip()
|
|
domain = request.query_params.get("domain", "")
|
|
scope = request.query_params.get("scope", "") # secops, other, ou vide=tout
|
|
page = int(request.query_params.get("page", "1"))
|
|
sort = request.query_params.get("sort", "hostname")
|
|
sort_dir = request.query_params.get("dir", "asc")
|
|
per_page = 30
|
|
|
|
yr_count = "patch_count_2026" if year == 2026 else "patch_count_2025"
|
|
yr_weeks = "patch_weeks_2026" if year == 2026 else "patch_weeks_2025"
|
|
|
|
# KPIs globaux + secops/autre
|
|
_latest = "id IN (SELECT DISTINCT ON (hostname) id FROM server_audit_full WHERE status IN ('ok','partial') ORDER BY hostname, audit_date DESC)"
|
|
kpis = db.execute(text(
|
|
f"SELECT COUNT(*) as total,"
|
|
f" COUNT(*) FILTER (WHERE {yr_count} >= 1) as patched,"
|
|
f" COUNT(*) FILTER (WHERE {yr_count} = 1) as once,"
|
|
f" COUNT(*) FILTER (WHERE {yr_count} >= 2) as twice,"
|
|
f" COUNT(*) FILTER (WHERE {yr_count} >= 3) as thrice,"
|
|
f" COUNT(*) FILTER (WHERE {yr_count} = 0 OR {yr_count} IS NULL) as never"
|
|
f" FROM server_audit_full WHERE status IN ('ok','partial') AND {_latest}"
|
|
)).fetchone()
|
|
|
|
kpis_secops = db.execute(text(
|
|
f"SELECT COUNT(*) as total,"
|
|
f" COUNT(*) FILTER (WHERE saf.{yr_count} >= 1) as patched,"
|
|
f" COUNT(*) FILTER (WHERE saf.{yr_count} = 0 OR saf.{yr_count} IS NULL) as never"
|
|
f" FROM server_audit_full saf JOIN servers s ON saf.server_id = s.id"
|
|
f" WHERE saf.status IN ('ok','partial') AND s.patch_os_owner = 'secops'"
|
|
f" AND saf.{_latest}"
|
|
)).fetchone()
|
|
|
|
kpis_other = db.execute(text(
|
|
f"SELECT COUNT(*) as total,"
|
|
f" COUNT(*) FILTER (WHERE saf.{yr_count} >= 1) as patched,"
|
|
f" COUNT(*) FILTER (WHERE saf.{yr_count} = 0 OR saf.{yr_count} IS NULL) as never"
|
|
f" FROM server_audit_full saf JOIN servers s ON saf.server_id = s.id"
|
|
f" WHERE saf.status IN ('ok','partial') AND (s.patch_os_owner != 'secops' OR s.patch_os_owner IS NULL)"
|
|
f" AND saf.{_latest}"
|
|
)).fetchone()
|
|
|
|
# Comparaison Y-1 a meme semaine
|
|
compare = None
|
|
from datetime import datetime as _dt
|
|
current_week = _dt.now().isocalendar()[1]
|
|
if year == 2026:
|
|
# Cumulatif 2025 a la meme semaine (pre-calcule)
|
|
import json as _json, os as _os
|
|
cumul_2025_path = _os.path.join(_os.path.dirname(_os.path.abspath(__file__)), "..", "data", "cumul_2025_by_week.json")
|
|
prev_at_same_week = 0
|
|
prev_total = 1045
|
|
prev_data_ok = False
|
|
try:
|
|
with open(cumul_2025_path) as f:
|
|
cumul_2025 = _json.load(f)
|
|
prev_at_same_week = cumul_2025.get(str(current_week - 1), cumul_2025.get(str(current_week), 0))
|
|
prev_data_ok = True
|
|
except Exception:
|
|
pass
|
|
|
|
compare = db.execute(text(
|
|
f"SELECT"
|
|
f" COUNT(*) FILTER (WHERE patch_count_2026 >= 1) as current_patched,"
|
|
f" COUNT(*) FILTER (WHERE patch_count_2025 >= 1) as prev_year_total,"
|
|
f" COUNT(*) as total"
|
|
f" FROM server_audit_full WHERE status IN ('ok','partial') AND {_latest}"
|
|
)).fetchone()
|
|
compare = {
|
|
"current_patched": compare.current_patched,
|
|
"current_total": compare.total,
|
|
"prev_year_total": compare.prev_year_total,
|
|
"prev_at_same_week": prev_at_same_week,
|
|
"prev_total": prev_total,
|
|
"prev_data_ok": prev_data_ok,
|
|
"compare_week": current_week - 1,
|
|
}
|
|
|
|
patch_by_domain = db.execute(text(
|
|
f"SELECT d.name as domain, d.code,"
|
|
f" COUNT(DISTINCT saf.hostname) as total,"
|
|
f" COUNT(DISTINCT saf.hostname) FILTER (WHERE saf.{yr_count} >= 1) as patched,"
|
|
f" COUNT(DISTINCT saf.hostname) FILTER (WHERE saf.{yr_count} >= 2) as twice,"
|
|
f" COUNT(DISTINCT saf.hostname) FILTER (WHERE saf.{yr_count} = 0 OR saf.{yr_count} IS NULL) as never"
|
|
f" FROM server_audit_full saf JOIN servers s ON saf.server_id = s.id"
|
|
f" JOIN domain_environments de ON s.domain_env_id = de.id JOIN domains d ON de.domain_id = d.id"
|
|
f" WHERE saf.status IN ('ok','partial')"
|
|
f" AND saf.id IN (SELECT DISTINCT ON (hostname) id FROM server_audit_full WHERE status IN ('ok','partial') ORDER BY hostname, audit_date DESC)"
|
|
f" GROUP BY d.name, d.code, d.display_order ORDER BY d.display_order"
|
|
)).fetchall()
|
|
|
|
patch_weekly = []
|
|
if year == 2026:
|
|
patch_weekly = db.execute(text("""
|
|
SELECT week, SUM(patched)::int as patched, SUM(cancelled)::int as cancelled FROM (
|
|
SELECT unnest(string_to_array(patch_weeks_2026, ',')) as week, 1 as patched, 0 as cancelled
|
|
FROM server_audit_full
|
|
WHERE status IN ('ok','partial') AND patch_weeks_2026 IS NOT NULL AND patch_weeks_2026 != ''
|
|
AND id IN (SELECT DISTINCT ON (hostname) id FROM server_audit_full WHERE status IN ('ok','partial') ORDER BY hostname, audit_date DESC)
|
|
UNION ALL
|
|
SELECT unnest(string_to_array(cancelled_weeks_2026, ',')) as week, 0 as patched, 1 as cancelled
|
|
FROM server_audit_full
|
|
WHERE status IN ('ok','partial') AND cancelled_weeks_2026 IS NOT NULL AND cancelled_weeks_2026 != ''
|
|
AND id IN (SELECT DISTINCT ON (hostname) id FROM server_audit_full WHERE status IN ('ok','partial') ORDER BY hostname, audit_date DESC)
|
|
) combined WHERE week != '' GROUP BY week ORDER BY week
|
|
""")).fetchall()
|
|
|
|
all_domains = db.execute(text("SELECT code, name, 'domain' as type FROM domains ORDER BY name")).fetchall()
|
|
all_zones = db.execute(text("SELECT name as code, name, 'zone' as type FROM zones ORDER BY name")).fetchall()
|
|
|
|
servers = db.execute(text(
|
|
f"SELECT DISTINCT ON (saf.hostname) saf.id, saf.hostname, saf.os_release,"
|
|
f" saf.last_patch_date, saf.last_patch_week, saf.last_patch_year,"
|
|
f" saf.{yr_count} as patch_count, saf.{yr_weeks} as patch_weeks,"
|
|
f" d.name as domain, e.name as env, z.name as zone"
|
|
f" FROM server_audit_full saf"
|
|
f" LEFT JOIN servers s ON saf.server_id = s.id"
|
|
f" LEFT JOIN domain_environments de ON s.domain_env_id = de.id"
|
|
f" LEFT JOIN domains d ON de.domain_id = d.id"
|
|
f" LEFT JOIN environments e ON de.environment_id = e.id"
|
|
f" LEFT JOIN zones z ON s.zone_id = z.id"
|
|
f" WHERE saf.status IN ('ok','partial')"
|
|
f" ORDER BY saf.hostname, saf.audit_date DESC"
|
|
)).fetchall()
|
|
|
|
if domain:
|
|
zone_hosts = {r.hostname for r in db.execute(text(
|
|
"SELECT s.hostname FROM servers s JOIN zones z ON s.zone_id = z.id WHERE z.name = :n"
|
|
), {"n": domain}).fetchall()}
|
|
if zone_hosts:
|
|
servers = [s for s in servers if s.hostname in zone_hosts]
|
|
else:
|
|
dom_hosts = {r.hostname for r in db.execute(text(
|
|
"SELECT s.hostname FROM servers s JOIN domain_environments de ON s.domain_env_id = de.id"
|
|
" JOIN domains d ON de.domain_id = d.id WHERE d.code = :dc"
|
|
), {"dc": domain}).fetchall()}
|
|
servers = [s for s in servers if s.hostname in dom_hosts]
|
|
if search:
|
|
servers = [s for s in servers if search.lower() in s.hostname.lower()]
|
|
|
|
# Filtre scope secops / autre
|
|
if scope == "secops":
|
|
secops_hosts = {r.hostname for r in db.execute(text(
|
|
"SELECT hostname FROM servers WHERE patch_os_owner = 'secops'"
|
|
)).fetchall()}
|
|
servers = [s for s in servers if s.hostname in secops_hosts]
|
|
elif scope == "other":
|
|
secops_hosts = {r.hostname for r in db.execute(text(
|
|
"SELECT hostname FROM servers WHERE patch_os_owner = 'secops'"
|
|
)).fetchall()}
|
|
servers = [s for s in servers if s.hostname not in secops_hosts]
|
|
|
|
if sort == "hostname":
|
|
servers.sort(key=lambda s: s.hostname.lower(), reverse=(sort_dir == "desc"))
|
|
elif sort == "count":
|
|
servers.sort(key=lambda s: s.patch_count or 0, reverse=(sort_dir == "desc"))
|
|
elif sort == "last":
|
|
servers.sort(key=lambda s: s.last_patch_week or "", reverse=(sort_dir == "desc"))
|
|
|
|
total_filtered = len(servers)
|
|
total_pages = max(1, (total_filtered + per_page - 1) // per_page)
|
|
page = max(1, min(page, total_pages))
|
|
servers_page = servers[(page - 1) * per_page : page * per_page]
|
|
|
|
ctx = base_context(request, db, user)
|
|
ctx.update({
|
|
"app_name": APP_NAME, "year": year, "kpis": kpis,
|
|
"kpis_secops": kpis_secops, "kpis_other": kpis_other,
|
|
"compare": compare,
|
|
"patch_by_domain": patch_by_domain, "patch_weekly": patch_weekly,
|
|
"servers": servers_page, "all_domains": all_domains, "all_zones": all_zones,
|
|
"search": search, "domain": domain, "scope": scope,
|
|
"sort": sort, "sort_dir": sort_dir,
|
|
"page": page, "total_pages": total_pages, "total_filtered": total_filtered,
|
|
})
|
|
return templates.TemplateResponse("audit_full_patching.html", ctx)
|
|
|
|
|
|
@router.get("/audit-full/export-csv")
|
|
async def audit_full_export_csv(request: Request, db=Depends(get_db)):
|
|
user = get_current_user(request)
|
|
if not user:
|
|
return RedirectResponse(url="/login")
|
|
|
|
import io, csv
|
|
filtre = request.query_params.get("filter", "")
|
|
search = request.query_params.get("q", "").strip()
|
|
domain = request.query_params.get("domain", "")
|
|
|
|
audits = get_latest_audits(db, limit=9999)
|
|
|
|
# Memes filtres que la page liste
|
|
if filtre == "reboot":
|
|
audits = [a for a in audits if a.reboot_required]
|
|
elif filtre == "uptime":
|
|
audits = [a for a in audits if a.uptime and ("month" in a.uptime or "year" in a.uptime)]
|
|
elif filtre and filtre.startswith("app_"):
|
|
app_patterns = {
|
|
"app_postgres": "postgres", "app_mariadb": "mariadb|mysqld",
|
|
"app_hana": "hdb|sapstart|HANA", "app_oracle": "ora_pmon|oracle",
|
|
"app_httpd": "httpd", "app_nginx": "nginx", "app_haproxy": "haproxy",
|
|
"app_tomcat": "tomcat|catalina", "app_nodejs": "node",
|
|
"app_redis": "redis", "app_mongodb": "mongod",
|
|
"app_elastic": "elasticsearch|opensearch", "app_container": "docker|podman",
|
|
"app_java": "java|\\.jar",
|
|
}
|
|
pattern = app_patterns.get(filtre, "")
|
|
if pattern:
|
|
ids = {r.id for r in db.execute(text("""
|
|
SELECT id FROM server_audit_full
|
|
WHERE status IN ('ok','partial')
|
|
AND (services::text ~* :pat OR listen_ports::text ~* :pat OR processes::text ~* :pat)
|
|
AND id IN (SELECT DISTINCT ON (hostname) id FROM server_audit_full WHERE status IN ('ok','partial') ORDER BY hostname, audit_date DESC)
|
|
"""), {"pat": pattern}).fetchall()}
|
|
audits = [a for a in audits if a.id in ids]
|
|
if domain:
|
|
zone_servers = {r.hostname for r in db.execute(text(
|
|
"SELECT s.hostname FROM servers s JOIN zones z ON s.zone_id = z.id WHERE z.name = :name"
|
|
), {"name": domain}).fetchall()}
|
|
if zone_servers:
|
|
audits = [a for a in audits if a.hostname in zone_servers]
|
|
else:
|
|
domain_servers = {r.hostname for r in db.execute(text("""
|
|
SELECT s.hostname FROM servers s JOIN domain_environments de ON s.domain_env_id = de.id
|
|
JOIN domains d ON de.domain_id = d.id WHERE d.code = :dc
|
|
"""), {"dc": domain}).fetchall()}
|
|
audits = [a for a in audits if a.hostname in domain_servers]
|
|
if search:
|
|
q = search.lower()
|
|
audits = [a for a in audits if q in a.hostname.lower()]
|
|
|
|
# Generer CSV
|
|
output = io.StringIO()
|
|
writer = csv.writer(output, delimiter=";")
|
|
writer.writerow(["Hostname", "OS", "Kernel", "Uptime", "Services", "Processus",
|
|
"Ports", "Connexions", "Reboot requis", "Date audit"])
|
|
for a in audits:
|
|
writer.writerow([
|
|
a.hostname, a.os_release or "", a.kernel or "", a.uptime or "",
|
|
a.svc_count, a.proc_count, a.port_count, a.conn_count,
|
|
"Oui" if a.reboot_required else "Non",
|
|
a.audit_date.strftime("%Y-%m-%d %H:%M") if a.audit_date else "",
|
|
])
|
|
|
|
output.seek(0)
|
|
return StreamingResponse(
|
|
iter(["\ufeff" + output.getvalue()]),
|
|
media_type="text/csv",
|
|
headers={"Content-Disposition": "attachment; filename=audit_serveurs.csv"},
|
|
)
|
|
|
|
|
|
@router.get("/audit-full/flow-map", response_class=HTMLResponse)
|
|
async def audit_full_flow_map(request: Request, db=Depends(get_db)):
|
|
user = get_current_user(request)
|
|
if not user:
|
|
return RedirectResponse(url="/login")
|
|
|
|
domain_filter = request.query_params.get("domain", "")
|
|
server_filter = request.query_params.get("server", "").strip()
|
|
|
|
# Domaines + zones pour le dropdown
|
|
all_domains = db.execute(text(
|
|
"SELECT code, name, 'domain' as type FROM domains ORDER BY name"
|
|
)).fetchall()
|
|
all_zones = db.execute(text(
|
|
"SELECT name as code, name, 'zone' as type FROM zones ORDER BY name"
|
|
)).fetchall()
|
|
|
|
# Serveurs audites pour l'autocompletion
|
|
audited_servers = db.execute(text("""
|
|
SELECT DISTINCT hostname FROM server_audit_full WHERE status IN ('ok','partial') ORDER BY hostname
|
|
""")).fetchall()
|
|
|
|
if server_filter:
|
|
# Flux pour un serveur specifique (IN + OUT)
|
|
flows = db.execute(text("""
|
|
SELECT source_hostname, source_ip, dest_ip, dest_port,
|
|
dest_hostname, process_name, direction, state,
|
|
COUNT(*) as cnt
|
|
FROM network_flow_map nfm
|
|
JOIN server_audit_full saf ON nfm.audit_id = saf.id
|
|
WHERE saf.id IN (
|
|
SELECT DISTINCT ON (hostname) id FROM server_audit_full
|
|
WHERE status IN ('ok','partial') ORDER BY hostname, audit_date DESC
|
|
)
|
|
AND (nfm.source_hostname = :srv OR nfm.dest_hostname = :srv)
|
|
AND nfm.source_hostname != nfm.dest_hostname
|
|
AND nfm.dest_hostname IS NOT NULL
|
|
GROUP BY source_hostname, source_ip, dest_ip, dest_port,
|
|
dest_hostname, process_name, direction, state
|
|
ORDER BY source_hostname
|
|
"""), {"srv": server_filter}).fetchall()
|
|
elif domain_filter:
|
|
# Flux pour un domaine ou une zone
|
|
# D'abord chercher comme zone
|
|
hostnames = [r.hostname for r in db.execute(text("""
|
|
SELECT s.hostname FROM servers s
|
|
JOIN zones z ON s.zone_id = z.id WHERE z.name = :name
|
|
"""), {"name": domain_filter}).fetchall()]
|
|
if not hostnames:
|
|
# Sinon comme domaine
|
|
hostnames = [r.hostname for r in db.execute(text("""
|
|
SELECT s.hostname FROM servers s
|
|
JOIN domain_environments de ON s.domain_env_id = de.id
|
|
JOIN domains d ON de.domain_id = d.id WHERE d.code = :dc
|
|
"""), {"dc": domain_filter}).fetchall()]
|
|
if hostnames:
|
|
flows = db.execute(text("""
|
|
SELECT source_hostname, source_ip, dest_ip, dest_port,
|
|
dest_hostname, process_name, direction, state,
|
|
COUNT(*) as cnt
|
|
FROM network_flow_map nfm
|
|
JOIN server_audit_full saf ON nfm.audit_id = saf.id
|
|
WHERE saf.id IN (
|
|
SELECT DISTINCT ON (hostname) id FROM server_audit_full
|
|
WHERE status IN ('ok','partial') ORDER BY hostname, audit_date DESC
|
|
)
|
|
AND (nfm.source_hostname = ANY(:hosts) OR nfm.dest_hostname = ANY(:hosts))
|
|
AND nfm.source_hostname != COALESCE(nfm.dest_hostname, '')
|
|
AND nfm.dest_hostname IS NOT NULL
|
|
GROUP BY source_hostname, source_ip, dest_ip, dest_port,
|
|
dest_hostname, process_name, direction, state
|
|
ORDER BY source_hostname
|
|
"""), {"hosts": hostnames}).fetchall()
|
|
else:
|
|
flows = []
|
|
else:
|
|
flows = get_flow_map(db)
|
|
|
|
app_map = get_app_map(db)
|
|
|
|
ctx = base_context(request, db, user)
|
|
ctx.update({
|
|
"app_name": APP_NAME, "flows": flows, "app_map": app_map,
|
|
"all_domains": all_domains, "all_zones": all_zones,
|
|
"audited_servers": audited_servers,
|
|
"domain_filter": domain_filter, "server_filter": server_filter,
|
|
})
|
|
return templates.TemplateResponse("audit_full_flowmap.html", ctx)
|
|
|
|
|
|
@router.get("/audit-full/{audit_id}", response_class=HTMLResponse)
|
|
async def audit_full_detail(request: Request, audit_id: int, db=Depends(get_db)):
|
|
user = get_current_user(request)
|
|
if not user:
|
|
return RedirectResponse(url="/login")
|
|
|
|
audit = get_audit_detail(db, audit_id)
|
|
if not audit:
|
|
return RedirectResponse(url="/audit-full")
|
|
|
|
def _j(val, default):
|
|
if val is None: return default
|
|
if isinstance(val, (list, dict)): return val
|
|
try: return json.loads(val)
|
|
except: return default
|
|
|
|
# Serveur partial (pas encore audite via SSH)
|
|
is_partial = (audit.status == "partial")
|
|
|
|
flows = [] if is_partial else get_flow_map_for_server(db, audit.hostname)
|
|
|
|
ctx = base_context(request, db, user)
|
|
ctx.update({
|
|
"app_name": APP_NAME, "a": audit, "flows": flows,
|
|
"is_partial": is_partial,
|
|
"services": _j(audit.services, []),
|
|
"processes": _j(audit.processes, []),
|
|
"listen_ports": _j(audit.listen_ports, []),
|
|
"connections": _j(audit.connections, []),
|
|
"flux_in": _j(audit.flux_in, []),
|
|
"flux_out": _j(audit.flux_out, []),
|
|
"disk_usage": _j(audit.disk_usage, []),
|
|
"interfaces": _j(audit.interfaces, []),
|
|
"correlation": _j(audit.correlation_matrix, []),
|
|
"outbound": _j(audit.outbound_only, []),
|
|
"firewall": _j(audit.firewall, {}),
|
|
"conn_wait": _j(audit.conn_wait, []),
|
|
"traffic": _j(audit.traffic, []),
|
|
})
|
|
return templates.TemplateResponse("audit_full_detail.html", ctx)
|