myhomelab/src/neuro/tools/llm_agent/main.py
neo.webmaster.2@gmail.com 7e0d9b68be
Some checks are pending
Python Code Quality / lock_file (push) Waiting to run
Python Code Quality / linting (push) Blocked by required conditions
Python Code Quality / formatting (push) Blocked by required conditions
Python Code Quality / type_consistency (push) Blocked by required conditions
Python Code Quality / tests (push) Blocked by required conditions
Python Code Quality / build (push) Blocked by required conditions
"Auto-commit via make git"
2026-03-05 08:11:11 +01:00

49 lines
1.0 KiB
Python

# src/neuro/tools/llm_agent/main.py
from __future__ import annotations
import shutil
import subprocess
import threading
import time
def _print(msg: str):
print(f"[llm] {msg}", flush=True)
def ensure_ollama():
if shutil.which("ollama"):
_print("Ollama encontrado.")
return
_print("Ollama não encontrado. A instalar...")
subprocess.run(
["bash", "-c", "curl -fsSL https://ollama.com/install.sh | sh"],
check=True
)
def _ollama_running():
try:
subprocess.run(
["ollama", "list"],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
timeout=2,
)
return True
except Exception:
return False
def ensure_ollama_serve():
if _ollama_running():
_print("Ollama serve já ativo.")
return
_print("A iniciar ollama serve...")
def _serve():
subprocess.run(["ollama", "serve"])
t = threading.Thread(target=_serve, daemon=True)
t.start()
time.sleep(2)