Add import_sanef_databases: table server_databases + import CSV iTop
Cree server_databases (FK servers.id, UNIQUE hostname+instance_name). Pour chaque instance BDD iTop: match hostname via Serveur->Nom, store type (SAP Hana/Oracle/PostgreSQL/SQL Server/MySQL), version, edition, cluster, environnement, etat.
This commit is contained in:
parent
2379a2fdc0
commit
7c56725f6e
144
tools/import_sanef_databases.py
Normal file
144
tools/import_sanef_databases.py
Normal file
@ -0,0 +1,144 @@
|
||||
"""Import iTop 'Instance de base de donnees' -> table server_databases.
|
||||
|
||||
Cree la table si absente. Pour chaque ligne CSV, match sur Serveur->Nom
|
||||
(hostname) puis insert un enregistrement instance BDD avec:
|
||||
- instance_name (Nom)
|
||||
- db_type (Logiciel->Nom: SAP Hana, Oracle, PostgreSQL, SQL Server, MySQL...)
|
||||
- db_version (Database_version / Logiciel->Version)
|
||||
- db_edition (Database édition)
|
||||
- cluster_name (Cluster id->Nom)
|
||||
- environnement / etat iTop
|
||||
- description
|
||||
|
||||
Usage:
|
||||
python tools/import_sanef_databases.py <csv> [--truncate] [--dry-run]
|
||||
"""
|
||||
import os
|
||||
import csv
|
||||
import argparse
|
||||
from sqlalchemy import create_engine, text
|
||||
|
||||
DATABASE_URL = os.getenv("DATABASE_URL_DEMO") or os.getenv("DATABASE_URL") \
|
||||
or "postgresql://patchcenter:PatchCenter2026!@localhost:5432/patchcenter_demo"
|
||||
|
||||
|
||||
def clean(v):
|
||||
if v is None:
|
||||
return None
|
||||
s = str(v).strip()
|
||||
return s or None
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("csv_path")
|
||||
parser.add_argument("--truncate", action="store_true",
|
||||
help="Vide server_databases avant import")
|
||||
parser.add_argument("--dry-run", action="store_true")
|
||||
args = parser.parse_args()
|
||||
|
||||
engine = create_engine(DATABASE_URL)
|
||||
print(f"[INFO] DB: {DATABASE_URL.split('@')[-1]}")
|
||||
print(f"[INFO] CSV: {args.csv_path}")
|
||||
conn = engine.connect().execution_options(isolation_level="AUTOCOMMIT")
|
||||
|
||||
# Cree la table si absente
|
||||
conn.execute(text("""
|
||||
CREATE TABLE IF NOT EXISTS server_databases (
|
||||
id serial PRIMARY KEY,
|
||||
server_id integer REFERENCES servers(id) ON DELETE CASCADE,
|
||||
hostname citext,
|
||||
instance_name varchar(100),
|
||||
db_type varchar(50),
|
||||
db_version varchar(100),
|
||||
db_edition varchar(50),
|
||||
cluster_name varchar(100),
|
||||
environnement varchar(50),
|
||||
etat varchar(30),
|
||||
description text,
|
||||
created_at timestamptz DEFAULT now(),
|
||||
UNIQUE (hostname, instance_name)
|
||||
)
|
||||
"""))
|
||||
conn.execute(text("CREATE INDEX IF NOT EXISTS idx_srv_db_server ON server_databases(server_id)"))
|
||||
conn.execute(text("CREATE INDEX IF NOT EXISTS idx_srv_db_type ON server_databases(db_type)"))
|
||||
|
||||
if args.truncate and not args.dry_run:
|
||||
conn.execute(text("TRUNCATE server_databases RESTART IDENTITY CASCADE"))
|
||||
print("[INFO] Table vidée")
|
||||
|
||||
with open(args.csv_path, "r", encoding="utf-8-sig", newline="") as f:
|
||||
sample = f.read(4096); f.seek(0)
|
||||
delim = ";" if sample.count(";") > sample.count(",") else ","
|
||||
rows = list(csv.DictReader(f, delimiter=delim))
|
||||
print(f"[INFO] {len(rows)} lignes (delim={delim!r})")
|
||||
|
||||
stats = {"inserted": 0, "updated": 0, "no_server": 0, "skipped": 0}
|
||||
|
||||
for r in rows:
|
||||
hostname = clean(r.get("Serveur->Nom") or "")
|
||||
if hostname:
|
||||
hostname = hostname.split(".")[0].lower()
|
||||
instance = clean(r.get("Nom"))
|
||||
if not hostname or not instance:
|
||||
stats["skipped"] += 1
|
||||
continue
|
||||
|
||||
db_type = clean(r.get("Logiciel->Nom") or r.get("Type"))
|
||||
db_version = clean(r.get("Database_version") or r.get("Logiciel->Version")
|
||||
or r.get("Version complète avec niveau de patch"))
|
||||
db_edition = clean(r.get("Database édition"))
|
||||
cluster = clean(r.get("Cluster id->Nom"))
|
||||
env = clean(r.get("Environnement"))
|
||||
etat = clean(r.get("Etat"))
|
||||
desc = clean(r.get("Description"))
|
||||
|
||||
if db_version:
|
||||
db_version = db_version[:100]
|
||||
if db_type:
|
||||
db_type = db_type[:50]
|
||||
|
||||
# Retrouve server_id
|
||||
srv = conn.execute(text("SELECT id FROM servers WHERE hostname=:h"),
|
||||
{"h": hostname}).fetchone()
|
||||
sid = srv.id if srv else None
|
||||
if not sid:
|
||||
stats["no_server"] += 1
|
||||
|
||||
if args.dry_run:
|
||||
print(f" DRY: {hostname:20s} {instance:20s} [{db_type or '?'}]")
|
||||
continue
|
||||
|
||||
# Upsert via ON CONFLICT (hostname, instance_name)
|
||||
try:
|
||||
conn.execute(text("""
|
||||
INSERT INTO server_databases
|
||||
(server_id, hostname, instance_name, db_type, db_version,
|
||||
db_edition, cluster_name, environnement, etat, description)
|
||||
VALUES (:sid, :h, :inst, :type, :ver, :ed, :cl, :env, :etat, :desc)
|
||||
ON CONFLICT (hostname, instance_name) DO UPDATE SET
|
||||
server_id = EXCLUDED.server_id,
|
||||
db_type = EXCLUDED.db_type,
|
||||
db_version = EXCLUDED.db_version,
|
||||
db_edition = EXCLUDED.db_edition,
|
||||
cluster_name = EXCLUDED.cluster_name,
|
||||
environnement = EXCLUDED.environnement,
|
||||
etat = EXCLUDED.etat,
|
||||
description = EXCLUDED.description
|
||||
"""), {
|
||||
"sid": sid, "h": hostname, "inst": instance,
|
||||
"type": db_type, "ver": db_version, "ed": db_edition,
|
||||
"cl": cluster, "env": env, "etat": etat, "desc": desc,
|
||||
})
|
||||
stats["inserted"] += 1
|
||||
except Exception as e:
|
||||
print(f" [ERR] {hostname}/{instance}: {str(e)[:150]}")
|
||||
stats["skipped"] += 1
|
||||
|
||||
conn.close()
|
||||
print(f"\n[DONE] Inserts/upserts: {stats['inserted']} | "
|
||||
f"Sans serveur en base: {stats['no_server']} | Skip: {stats['skipped']}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
Loading…
Reference in New Issue
Block a user