Initial implementation of IRC LLM bot

Full implementation from spec: ZNC/IRC client with TLS, Ollama LLM backend,
per-user SQLite conversation memory, and Flask web admin portal with 7 pages.

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
This commit is contained in:
tocmo0nlord
2026-04-17 22:08:53 -04:00
commit b154f63cfa
25 changed files with 2916 additions and 0 deletions

0
bot/__init__.py Normal file
View File

372
bot/irc_client.py Normal file
View File

@@ -0,0 +1,372 @@
"""
IRC bot entry point — connects to ZNC via TLS, handles the message loop,
reconnect backoff, config reload (SIGHUP + Unix socket), and PID file.
"""
import json
import logging
import logging.handlers
import os
import re
import signal
import socket
import ssl
import sys
import threading
import time
from pathlib import Path
from dotenv import load_dotenv
load_dotenv()
# ── Logging ────────────────────────────────────────────────────────────────
os.makedirs("logs", exist_ok=True)
os.makedirs("data", exist_ok=True)
os.makedirs("config", exist_ok=True)
handler = logging.handlers.RotatingFileHandler(
"logs/bot.log", maxBytes=5 * 1024 * 1024, backupCount=3, encoding="utf-8"
)
handler.setFormatter(
logging.Formatter("%(asctime)s [%(levelname)s] %(message)s")
)
logging.basicConfig(
level=logging.INFO,
handlers=[handler, logging.StreamHandler(sys.stdout)],
)
logger = logging.getLogger(__name__)
from bot import memory as mem
from bot.message_handler import handle_privmsg
# ── Config ─────────────────────────────────────────────────────────────────
CONFIG_PATH = "config/config.json"
PID_PATH = "data/ircbot.pid"
SOCK_PATH = "data/ircbot.sock"
_config: dict = {}
_config_lock = threading.Lock()
# Runtime state
_sock: socket.socket | None = None
_connected = False
_session_msg_count = 0
_status = "disconnected" # disconnected | connecting | connected | reconnecting
def _load_config() -> dict:
defaults = {
"channels": [],
"trigger_on_nick": True,
"trigger_prefix": None,
"ignored_nicks": ["ChanServ", "NickServ"],
"bot_nick": os.getenv("BOT_NICK", "avcbot"),
"system_prompt": "You are a helpful IRC assistant for Active Blue. Keep responses concise and under 3 sentences when possible.",
"max_response_length": 400,
"ollama_host": os.getenv("OLLAMA_HOST", "192.168.2.10"),
"ollama_port": int(os.getenv("OLLAMA_PORT", 11434)),
"ollama_model": os.getenv("OLLAMA_MODEL", "llama3.1"),
"ollama_temperature": 0.7,
"ollama_num_predict": 120,
"ollama_num_ctx": 2048,
"response_timeout_seconds": 30,
"context_window": 5,
"memory_enabled": True,
"memory_history_limit": 8,
"memory_max_age_days": 90,
"log_level": "INFO",
}
if os.path.exists(CONFIG_PATH):
try:
with open(CONFIG_PATH, "r") as f:
file_cfg = json.load(f)
defaults.update(file_cfg)
logger.info("[CONFIG] Loaded config.json")
except Exception as e:
logger.error(f"[CONFIG] Failed to load config.json: {e}")
return defaults
def _reload_config() -> None:
global _config
new_cfg = _load_config()
with _config_lock:
_config = new_cfg
level = logging.getLevelName(_config.get("log_level", "INFO"))
logging.getLogger().setLevel(level)
logger.info("[CONFIG] Reloaded")
def get_config() -> dict:
with _config_lock:
return dict(_config)
# ── PID + Unix socket ──────────────────────────────────────────────────────
def _write_pid() -> None:
with open(PID_PATH, "w") as f:
f.write(str(os.getpid()))
def _remove_pid() -> None:
try:
os.remove(PID_PATH)
except FileNotFoundError:
pass
def _start_sock_listener() -> None:
"""Listens for RELOAD command on Unix socket (used by portal in Docker)."""
if sys.platform == "win32":
return # Unix sockets not supported on Windows
try:
if os.path.exists(SOCK_PATH):
os.remove(SOCK_PATH)
srv = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
srv.bind(SOCK_PATH)
srv.listen(5)
srv.settimeout(1)
os.chmod(SOCK_PATH, 0o660)
logger.info(f"[CONFIG] Unix socket listening at {SOCK_PATH}")
def _loop():
while True:
try:
conn, _ = srv.accept()
data = conn.recv(64).decode().strip()
conn.close()
if data == "RELOAD":
_reload_config()
elif data == "RECONNECT":
_trigger_reconnect()
except socket.timeout:
continue
except Exception as e:
logger.error(f"[CONFIG] Socket error: {e}")
t = threading.Thread(target=_loop, daemon=True)
t.start()
except Exception as e:
logger.warning(f"[CONFIG] Could not start Unix socket: {e}")
# ── SIGHUP handler (non-Docker) ────────────────────────────────────────────
_reconnect_flag = threading.Event()
def _trigger_reconnect() -> None:
_reconnect_flag.set()
if sys.platform != "win32":
signal.signal(signal.SIGHUP, lambda s, f: _reload_config())
# ── IRC helpers ────────────────────────────────────────────────────────────
def _send(sock: socket.socket, line: str) -> None:
logger.debug(f"IRC OUT: {line}")
sock.sendall((line + "\r\n").encode("utf-8", errors="replace"))
def _join_channels(sock: socket.socket, channels: list[str]) -> None:
for ch in channels:
_send(sock, f"JOIN {ch}")
logger.info(f"[IRC] Joining {ch}")
PLAYBACK_RE = re.compile(r"^\[\d{2}:\d{2}:\d{2}\] ")
def _is_playback(text: str) -> bool:
return bool(PLAYBACK_RE.match(text))
def _parse_privmsg(line: str) -> tuple[str, str, str] | None:
"""Returns (nick, channel, text) or None."""
m = re.match(r"^:([^!]+)![^ ]+ PRIVMSG (#\S+) :(.+)$", line)
if m:
return m.group(1), m.group(2), m.group(3)
return None
# ── Connection ─────────────────────────────────────────────────────────────
def _connect() -> socket.socket:
global _status
host = os.getenv("ZNC_HOST", "ham.activeblue.net")
port = int(os.getenv("ZNC_PORT", 6501))
use_ssl = os.getenv("ZNC_SSL", "true").lower() == "true"
znc_user = os.getenv("ZNC_USER", "")
znc_password = os.getenv("ZNC_PASSWORD", "")
znc_network = os.getenv("ZNC_NETWORK", "activeblue")
bot_nick = get_config().get("bot_nick", os.getenv("BOT_NICK", "avcbot"))
bot_realname = os.getenv("BOT_REALNAME", "Active Blue IRC Bot")
_status = "connecting"
logger.info(f"[IRC] Connecting to {host}:{port} (SSL={use_ssl})")
raw = socket.create_connection((host, port), timeout=30)
if use_ssl:
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
sock = ctx.wrap_socket(raw, server_hostname=host)
else:
sock = raw
_send(sock, f"NICK {bot_nick}")
_send(sock, f"USER {bot_nick} 0 * :{bot_realname}")
_send(sock, f"PASS {znc_user}/{znc_network}:{znc_password}")
return sock
# ── Main message loop ──────────────────────────────────────────────────────
def _run_loop(sock: socket.socket) -> None:
global _connected, _status, _session_msg_count
buf = ""
_connected = True
_status = "connected"
while True:
if _reconnect_flag.is_set():
_reconnect_flag.clear()
raise ConnectionResetError("Reconnect triggered")
try:
sock.settimeout(1)
chunk = sock.recv(4096).decode("utf-8", errors="replace")
except socket.timeout:
continue
except Exception:
raise
if not chunk:
raise ConnectionResetError("Remote closed connection")
buf += chunk
while "\r\n" in buf:
line, buf = buf.split("\r\n", 1)
# Strip IRCv3 server-time tag (clientbuffer playback)
is_tagged_playback = False
if line.startswith("@time="):
is_tagged_playback = True
line = re.sub(r"^@[^ ]+ ", "", line)
logger.debug(f"IRC IN: {line}")
if line.startswith("PING"):
_send(sock, "PONG" + line[4:])
continue
if " 001 " in line:
logger.info("[IRC] Connected — joining channels")
cfg = get_config()
_join_channels(sock, cfg.get("channels", []))
continue
if " 433 " in line:
bot_nick = get_config().get("bot_nick", "avcbot")
_send(sock, f"NICK {bot_nick}_")
logger.warning("[IRC] Nick in use, trying alternate")
continue
parsed = _parse_privmsg(line)
if not parsed:
continue
nick, channel, text = parsed
if is_tagged_playback or _is_playback(text):
# Add to context buffer but don't send to LLM
from bot.message_handler import _get_context
cfg = get_config()
ctx = _get_context(channel, cfg.get("context_window", 5))
ctx.append(f"<{nick}> {text}")
continue
cfg = get_config()
_session_msg_count += 1
reply = handle_privmsg(nick, channel, text, cfg)
if reply:
_send(sock, f"PRIVMSG {channel} :{reply}")
logger.info(f"IRC OUT: PRIVMSG {channel} :{reply[:80]}")
def get_status() -> dict:
cfg = get_config()
return {
"status": _status,
"nick": cfg.get("bot_nick", "avcbot"),
"znc_host": os.getenv("ZNC_HOST", "ham.activeblue.net"),
"znc_port": os.getenv("ZNC_PORT", "6501"),
"znc_network": os.getenv("ZNC_NETWORK", "activeblue"),
"ollama_host": cfg.get("ollama_host"),
"ollama_port": cfg.get("ollama_port"),
"ollama_model": cfg.get("ollama_model"),
"channels": cfg.get("channels", []),
"session_msg_count": _session_msg_count,
}
def send_raw(line: str) -> None:
global _sock
if _sock and _connected:
_send(_sock, line)
# ── Entry point ────────────────────────────────────────────────────────────
def main() -> None:
global _sock, _connected, _status
_reload_config()
cfg = get_config()
mem.prune_old_exchanges(cfg.get("memory_max_age_days", 90))
_write_pid()
_start_sock_listener()
backoff = [5, 10, 30, 60, 120, 300]
attempt = 0
while True:
try:
_sock = _connect()
attempt = 0
_run_loop(_sock)
except (ConnectionResetError, ConnectionRefusedError, OSError) as e:
_connected = False
_status = "reconnecting"
logger.warning(f"[IRC] Disconnected: {e}")
except Exception as e:
_connected = False
_status = "reconnecting"
logger.error(f"[IRC] Unexpected error: {e}", exc_info=True)
finally:
if _sock:
try:
_sock.close()
except Exception:
pass
_sock = None
delay = backoff[min(attempt, len(backoff) - 1)]
attempt += 1
logger.info(f"[IRC] Reconnecting in {delay}s (attempt {attempt})")
time.sleep(delay)
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
logger.info("[IRC] Shutting down")
_remove_pid()

70
bot/llm_client.py Normal file
View File

@@ -0,0 +1,70 @@
import httpx
import logging
logger = logging.getLogger(__name__)
def generate(prompt: str, system: str, config: dict) -> str:
host = config.get("ollama_host", "192.168.2.10")
port = config.get("ollama_port", 11434)
model = config.get("ollama_model", "llama3.1")
timeout = config.get("response_timeout_seconds", 30)
num_predict = config.get("ollama_num_predict", 120)
num_ctx = config.get("ollama_num_ctx", 2048)
temperature = config.get("ollama_temperature", 0.7)
max_length = config.get("max_response_length", 400)
url = f"http://{host}:{port}/api/generate"
payload = {
"model": model,
"system": system,
"prompt": prompt,
"stream": False,
"options": {
"temperature": temperature,
"num_predict": num_predict,
"num_ctx": num_ctx,
},
}
logger.debug(f"[LLM] POST {url} model={model} prompt_len={len(prompt)}")
try:
response = httpx.post(url, json=payload, timeout=timeout)
response.raise_for_status()
text = response.json().get("response", "").strip()
if len(text) > max_length:
text = text[:max_length].rsplit(" ", 1)[0] + ""
logger.debug(f"[LLM] Response ({len(text)} chars): {text[:80]}")
return text
except httpx.TimeoutException:
logger.error(f"[LLM] Timeout after {timeout}s")
raise TimeoutError(f"Ollama did not respond within {timeout}s")
except Exception as e:
logger.error(f"[LLM] Request failed: {e}")
raise
def build_prompt(
user_message: str,
nick: str,
persistent_history: list[dict],
context_buffer: list[str],
) -> str:
parts = []
if persistent_history:
parts.append("--- Past conversation with this user ---")
for ex in persistent_history:
parts.append(f"User: {ex['user']}")
parts.append(f"Assistant: {ex['assistant']}")
parts.append("--- End of past conversation ---")
if context_buffer:
parts.append("--- Recent channel activity ---")
parts.extend(context_buffer)
parts.append("--- End of channel activity ---")
parts.append(f"{nick} asks: {user_message}")
return "\n".join(parts)

164
bot/memory.py Normal file
View File

@@ -0,0 +1,164 @@
import sqlite3
import os
import logging
import re
logger = logging.getLogger(__name__)
HISTORY_DIR = "data/history"
def _sanitize_channel(channel: str) -> str:
name = channel.lstrip("#")
name = re.sub(r"[#&+!]", "_", name)
return name
def _db_path(channel: str, nick: str) -> str:
chan_dir = os.path.join(HISTORY_DIR, _sanitize_channel(channel))
os.makedirs(chan_dir, exist_ok=True)
return os.path.join(chan_dir, f"{nick}.db")
def _get_conn(path: str) -> sqlite3.Connection:
conn = sqlite3.connect(path)
conn.execute("PRAGMA journal_mode=WAL;")
conn.execute("""
CREATE TABLE IF NOT EXISTS exchanges (
id INTEGER PRIMARY KEY AUTOINCREMENT,
timestamp DATETIME DEFAULT CURRENT_TIMESTAMP,
user_input TEXT NOT NULL,
bot_reply TEXT NOT NULL
)
""")
conn.commit()
return conn
def load_history(channel: str, nick: str, limit: int) -> list[dict]:
path = _db_path(channel, nick)
if not os.path.exists(path):
return []
try:
conn = _get_conn(path)
cursor = conn.execute(
"SELECT user_input, bot_reply FROM exchanges ORDER BY id DESC LIMIT ?",
(limit,),
)
rows = cursor.fetchall()
conn.close()
return [{"user": r[0], "assistant": r[1]} for r in reversed(rows)]
except Exception as e:
logger.error(f"[MEMORY] Failed to load history for {nick} in {channel}: {e}")
return []
def save_exchange(channel: str, nick: str, user_input: str, bot_reply: str) -> None:
path = _db_path(channel, nick)
try:
conn = _get_conn(path)
conn.execute(
"INSERT INTO exchanges (user_input, bot_reply) VALUES (?, ?)",
(user_input, bot_reply),
)
conn.commit()
conn.close()
except Exception as e:
logger.error(f"[MEMORY] Failed to write exchange for {nick} in {channel}: {e}")
def delete_user_history(channel: str, nick: str) -> None:
path = _db_path(channel, nick)
if os.path.exists(path):
os.remove(path)
logger.info(f"[MEMORY] Deleted history for {nick} in {channel}")
def delete_channel_history(channel: str) -> None:
chan_dir = os.path.join(HISTORY_DIR, _sanitize_channel(channel))
if os.path.isdir(chan_dir):
for f in os.listdir(chan_dir):
if f.endswith(".db"):
os.remove(os.path.join(chan_dir, f))
logger.info(f"[MEMORY] Cleared all history for {channel}")
def delete_all_history() -> None:
for root, dirs, files in os.walk(HISTORY_DIR):
for f in files:
if f.endswith(".db"):
os.remove(os.path.join(root, f))
logger.info("[MEMORY] All history deleted")
def prune_old_exchanges(max_age_days: int) -> None:
if max_age_days <= 0:
return
pruned = 0
for root, dirs, files in os.walk(HISTORY_DIR):
for f in files:
if not f.endswith(".db"):
continue
path = os.path.join(root, f)
try:
conn = sqlite3.connect(path)
conn.execute("PRAGMA journal_mode=WAL;")
cursor = conn.execute(
"DELETE FROM exchanges WHERE timestamp < datetime('now', ?)",
(f"-{max_age_days} days",),
)
pruned += cursor.rowcount
conn.commit()
conn.close()
except Exception as e:
logger.error(f"[MEMORY] Pruning failed for {path}: {e}")
if pruned:
logger.info(f"[MEMORY] Pruned {pruned} old exchanges (>{max_age_days} days)")
def list_channels() -> list[str]:
if not os.path.isdir(HISTORY_DIR):
return []
return [d for d in os.listdir(HISTORY_DIR) if os.path.isdir(os.path.join(HISTORY_DIR, d))]
def list_nicks(channel_dir: str) -> list[str]:
path = os.path.join(HISTORY_DIR, channel_dir)
if not os.path.isdir(path):
return []
return [f[:-3] for f in os.listdir(path) if f.endswith(".db")]
def get_all_exchanges(channel_dir: str, nick: str) -> list[dict]:
path = os.path.join(HISTORY_DIR, channel_dir, f"{nick}.db")
if not os.path.exists(path):
return []
try:
conn = sqlite3.connect(path)
cursor = conn.execute(
"SELECT id, timestamp, user_input, bot_reply FROM exchanges ORDER BY id ASC"
)
rows = cursor.fetchall()
conn.close()
return [{"id": r[0], "timestamp": r[1], "user": r[2], "assistant": r[3]} for r in rows]
except Exception as e:
logger.error(f"[MEMORY] Failed to read all exchanges: {e}")
return []
def get_stats() -> dict:
total = 0
total_size = 0
for root, dirs, files in os.walk(HISTORY_DIR):
for f in files:
if f.endswith(".db"):
path = os.path.join(root, f)
total_size += os.path.getsize(path)
try:
conn = sqlite3.connect(path)
row = conn.execute("SELECT COUNT(*) FROM exchanges").fetchone()
total += row[0] if row else 0
conn.close()
except Exception:
pass
return {"total_exchanges": total, "total_size_bytes": total_size}

99
bot/message_handler.py Normal file
View File

@@ -0,0 +1,99 @@
import logging
import re
from collections import deque
from bot import memory as mem
from bot import llm_client
logger = logging.getLogger(__name__)
# Per-channel rolling context buffer: {channel: deque}
_context_buffers: dict[str, deque] = {}
def _get_context(channel: str, window: int) -> deque:
if channel not in _context_buffers:
_context_buffers[channel] = deque(maxlen=window)
else:
_context_buffers[channel] = deque(_context_buffers[channel], maxlen=window)
return _context_buffers[channel]
def handle_privmsg(nick: str, channel: str, text: str, config: dict) -> str | None:
"""
Returns a reply string if the bot should respond, else None.
Also maintains the context buffer as a side effect.
"""
window = config.get("context_window", 5)
ctx = _get_context(channel, window)
ignored = [n.lower() for n in config.get("ignored_nicks", [])]
if nick.lower() in ignored:
return None
bot_nick = config.get("bot_nick", "avcbot").lower()
trigger_prefix = config.get("trigger_prefix")
trigger_on_nick = config.get("trigger_on_nick", True)
# Detect "forget me" command before trigger check
forget_pattern = re.compile(
rf"^{re.escape(bot_nick)}\s*[:,]\s*forget\s+me\s*$", re.IGNORECASE
)
if forget_pattern.match(text.strip()):
mem.delete_user_history(channel, nick)
logger.info(f"[MEMORY] Forgot history for {nick} in {channel}")
return f"{nick}: Done, I've cleared your history."
# Determine if triggered
stripped = None
if trigger_on_nick:
nick_pattern = re.compile(
rf"^{re.escape(bot_nick)}\s*[:,]\s*", re.IGNORECASE
)
m = nick_pattern.match(text)
if m:
stripped = text[m.end():].strip()
if stripped is None and trigger_prefix:
if text.startswith(trigger_prefix):
stripped = text[len(trigger_prefix):].strip()
# Add to context buffer regardless
ctx.append(f"<{nick}> {text}")
if stripped is None:
return None
# Build and send to LLM
history = []
if config.get("memory_enabled", True):
limit = config.get("memory_history_limit", 8)
history = mem.load_history(channel, nick, limit)
prompt = llm_client.build_prompt(
user_message=stripped,
nick=nick,
persistent_history=history,
context_buffer=list(ctx)[:-1], # exclude the current message already in buffer
)
system = config.get(
"system_prompt",
"You are a helpful IRC assistant for Active Blue. Keep responses concise and under 3 sentences when possible.",
)
logger.info(f"[LLM] Request from {nick} in {channel}: {stripped[:80]}")
try:
reply = llm_client.generate(prompt, system, config)
except TimeoutError:
return f"{nick}: [LLM timeout — try again]"
except Exception as e:
logger.error(f"[LLM] Generation error: {e}")
return f"{nick}: [LLM error — check logs]"
if config.get("memory_enabled", True):
mem.save_exchange(channel, nick, stripped, reply)
return f"{nick}: {reply}"