diff --git a/.env.example b/.env.example new file mode 100644 index 0000000..27c1472 --- /dev/null +++ b/.env.example @@ -0,0 +1,23 @@ +# Environment overrides for the Automations toolbox. +# +# Copy this file to `.env` in the repo root and adjust values. Tools read +# these via `tools._common.config.get_config` / `get_bool` / `get_path`. +# All settings are OPTIONAL — each tool ships with a sensible default. +# +# IMPORTANT: `.env` must NEVER be committed. Add the line: +# .env +# to your `.gitignore` before creating the file. +# +# Precedence: real shell env vars > `.env` values > coded defaults. + +# -------- FFmpeg Studio -------- +# Default output folders for the Record / Capture / Convert tabs. +# AUTOMATIONS_FFMPEG_OUTPUT_DIR=~/Videos +# AUTOMATIONS_FFMPEG_CAPTURE_DIR=~/Pictures +# AUTOMATIONS_FFMPEG_CONVERT_DIR=~/Videos + +# -------- Network Stability Monitor -------- +# Auto-export schedule and destination. +# AUTOMATIONS_NSM_AUTO_EXPORT=1 # 1/true/yes or 0/false/no +# AUTOMATIONS_NSM_EXPORT_TIME=23:30 # HH:MM, 24-hour +# AUTOMATIONS_NSM_EXPORT_DIR=exports # absolute path or repo-relative diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml new file mode 100644 index 0000000..d506390 --- /dev/null +++ b/.github/workflows/test.yml @@ -0,0 +1,30 @@ +name: tests + +on: + push: + branches: [master] + pull_request: + +jobs: + test: + runs-on: windows-latest + steps: + - uses: actions/checkout@v4 + + - name: Set up Python 3.11 + uses: actions/setup-python@v5 + with: + python-version: '3.11' + cache: pip + + - name: Install runtime + test dependencies + # requirements.txt is intentionally minimal today (Plan B-B4 will pin + # the rest). Until then, list the deps the test suite needs to import + # tools/* without ImportError. + run: | + python -m pip install --upgrade pip + pip install -r requirements.txt + pip install pytest customtkinter psutil pillow scapy requests pywin32-ctypes + + - name: Run pytest + run: pytest -v diff --git a/.gitignore b/.gitignore index dbb9872..e35cf66 100644 --- a/.gitignore +++ b/.gitignore @@ -2,7 +2,8 @@ venv/ __pycache__/ *.pyc *.pyo -portable/build/ +portable/build/* +!portable/build/*.spec portable/dist/ portable/*.spec exports/ @@ -13,3 +14,4 @@ exports/ *.log *.txt tools/OpenHardwareMonitor/ +security/poc/ diff --git a/HANDOFF.md b/HANDOFF.md index 7ff346b..5213265 100644 --- a/HANDOFF.md +++ b/HANDOFF.md @@ -134,14 +134,17 @@ python -m PyInstaller --clean --noconfirm ToolName.spec --distpath ..\dist # Key settings: # console=False (no console window) # uac_admin=True (for tools that need admin: Security Audit, Account Activity) -# hiddenimports=['customtkinter', 'psutil', ...] +# pathex=[os.path.abspath(os.path.join(SPECPATH, '..', '..'))] +# -> repo root, so PyInstaller can find the canonical tools/ package +# hiddenimports=['customtkinter', 'psutil', 'tools.', +# 'tools._common.threadsafe', ...] # collect_all('customtkinter') for CTk theme files ``` -After building, also copy the updated tool source to `portable/`: -```bash -copy tools\my_tool.py portable\my_tool.py -``` +Launchers import from the canonical `tools/` package (e.g. +`from tools.account_activity_monitor import App`) — **do not copy +sources into `portable/`**. The `tests/test_no_portable_source_drift.py` +guard fails CI if duplicate sources reappear there. --- @@ -199,7 +202,7 @@ copy tools\my_tool.py portable\my_tool.py | How tools are loaded | `Main.py` lines 266-322 | | Tool module template | Any tool's `run_tool()` function | | Dark theme constants | Top of any tool file | -| Network scanner | `NETWORK STABILITY MONITOR.py` → `scan_wifi_networks()` | +| Network scanner | `network_stability_monitor.py` → `scan_wifi_networks()` | | Event log querying | `account_activity_monitor.py` → `ActivityMonitorEngine._query_log()` | | GPU detection | `ffmpeg_studio.py` → `_test_encoder()`, `_probe_hardware()` | | Process management | `system_health_monitor.py` → `SystemHealthEngine.get_processes()` | diff --git a/SPEC.md b/SPEC.md index 0bd26a5..18b5cd7 100644 --- a/SPEC.md +++ b/SPEC.md @@ -49,7 +49,7 @@ Every tool in `tools/` must expose: | Tool | File | Lines | Purpose | |------|------|-------|---------| -| Network Stability Monitor Pro | `NETWORK STABILITY MONITOR.py` | 3,080 | Live network health monitoring with latency tracking, incident timeline, Wi-Fi analyzer, diagnostics | +| Network Stability Monitor Pro | `network_stability_monitor.py` | 3,080 | Live network health monitoring with latency tracking, incident timeline, Wi-Fi analyzer, diagnostics | | Network Intrusion Detector Pro | `network_intrusion_detector_pro.py` | 3,063 | LAN device discovery, connection classification (5 levels), threat detection (Flipper Zero, Tor, audio spying), firewall blocking | | Security Audit | `security_audit.py` | 1,836 | One-shot system security scan: startup items, processes, ports, filesystem, DNS, accounts, certificates, event logs | | Account Activity Monitor | `account_activity_monitor.py` | 2,362 | Windows Event Log monitor: account changes, logon activity, device events, system changes, spy check (camera/mic access, remote tools) | @@ -144,7 +144,15 @@ Every tool in `tools/` must expose: - `Security_Audit.exe` (~14 MB, UAC admin elevation) - `Account_Activity_Monitor.exe` (~14 MB, UAC admin elevation) -Each has: PyInstaller spec file, portable Python launcher, batch file wrapper. +Each has: PyInstaller spec file (`portable/build/*.spec`), portable +Python launcher (`portable/*_Portable.py`), batch file wrapper +(`portable/run_*.bat`). + +Launchers import tool code directly from the canonical `tools/` package +(`from tools.account_activity_monitor import App` etc.). Specs set +`pathex=[repo-root]` so PyInstaller resolves the package from source. +No duplicate `.py` sources live under `portable/` — +`tests/test_no_portable_source_drift.py` guards against drift. --- diff --git a/plans/2026-04-20-claude-usage-monitor-ux-overhaul.md b/plans/2026-04-20-claude-usage-monitor-ux-overhaul.md new file mode 100644 index 0000000..745bda1 --- /dev/null +++ b/plans/2026-04-20-claude-usage-monitor-ux-overhaul.md @@ -0,0 +1,209 @@ +# Plan — Claude Usage Monitor UX overhaul (single pass) + +Derived from `knowledge/research/2026-04-20-claude-usage-monitor-ux-patterns.md` + brainstorm on 2026-04-20. Bundles all 8 improvements into one branch. + +## Goal + +Close the four reported pain points (scroll loss on refresh, non-scrollable Session Detail, cramped Cost-by-Project panel, missing rotate notifications) and ship four supporting improvements (mtime-cache, collapsible cards, border tint, time-window filter) in a single pass through `tools/claude_usage_monitor.py`. + +## Success criteria + +1. Adding a new session/project during live refresh does not reset the Sessions tab scroll position. +2. Session Detail tab scrolls end-to-end; all cards + chart + tools + turn table reachable on a 900×550 window. +3. Dashboard Cost-by-Project shows 10+ projects without forcing window scroll; "Show all" toggle reveals remainder. +4. Any LIVE session crossing the AMBER or RED rotate threshold fires exactly one toast + in-app banner per threshold crossing (dedup across refreshes). +5. Unchanged JSONL files are not re-parsed on refresh (mtime-cache hit). +6. Time-window filter (`Today` / `Week` / `Month` / `All`) updates both Dashboard totals and Sessions list. +7. Existing 22 pytest tests still pass; new tests added for mtime-cache, time-window bucketing, and notification dedup. +8. Single-file structure preserved (per project convention). + +## Hard blockers (decide before starting) + +- **B1 — winotify dep approval.** Zero runtime deps, pure-Python, maintained. If rejected: in-app banner only, skip OS toast. +- **B2 — ORDER_FILE schema extension.** Add `collapsed: [project_name, ...]` and `notif_state: {session_id: last_level}` keys, or use two new sibling JSON files. Recommend single file for atomicity. + +## Scope notes + +- One branch: `ui/claude-usage-monitor-overhaul`. +- No changes to pricing, rotate-score formula, or cold-turn detection — behavior preserved. +- Dark palette unchanged (`#1e1e1e`, `#2b2b2b`, `#3a7ebf`, `#9e6a3a`, `#bf6a3a`, `#ffd479`). +- Keep helpers on top / GUI class below; no multi-file split. + +## Tasks + +### T1 — mtime-cache on JSONL parse [Effort: S, Risk: LOW] + +**Problem:** `load_all_sessions()` re-parses every `*.jsonl` file every 30s. On machines with 50+ projects this is wasted I/O. + +**Files:** `tools/claude_usage_monitor.py`, `tests/test_claude_usage_monitor.py`. + +**Approach:** +1. Module-scope cache: `_SESSION_CACHE: dict[str, tuple[float, dict]]` mapping filepath → (mtime, parsed_session). +2. In `load_all_sessions`, `stat` each file; reuse cached dict if `st_mtime` matches. +3. Evict entries for files that no longer exist. +4. Test: same file parsed twice returns the same object; modifying file invalidates cache. + +### T2 — Scrollable Session Detail tab [Effort: S, Risk: LOW, ║ parallel with T1/T3/T4] + +**Problem:** Cards, chart, tools panel, and turn table overflow on small windows; no scrollbar. + +**Approach:** +1. Wrap the pack-layout body of `_build_detail_tab` in `ctk.CTkScrollableFrame`. +2. Bind mousewheel passthrough so the inner `ttk.Treeview` (turn table) doesn't swallow scroll events when cursor outside it. +3. Verify minsize 900×550 renders full detail without clipping. + +### T3 — Cost-by-Project 2-col grid + Show all toggle [Effort: S, Risk: LOW, ║] + +**Problem:** Dashboard Cost-by-Project list is a single tall column; many projects force window scroll. + +**Approach:** +1. Replace the `pack(fill="x")` rows with a 2-column grid inside `_project_breakdown_frame`. +2. Default: top 10 projects; add "Show all (N)" / "Show top 10" toggle button on panel header. +3. Preserve existing bar widths + colors. + +### T4 — Time-window filter control [Effort: S, Risk: LOW, ║] + +**Problem:** All-time totals drown out today/this-week signal. + +**Approach:** +1. Add `ctk.CTkSegmentedButton` to top bar: `Today | Week | Month | All` (default: All). +2. Store selection in `self._window`; expose `_session_in_window(s) -> bool` helper that compares `last_timestamp` to cutoff. +3. No rendering change yet — landed fully in T9. + +### T5 — Project-card widget map extraction [Effort: M, Risk: MED] + +**Problem:** Current `_build_project_card` creates and returns widgets implicitly; no way to update them later without destroy+rebuild. + +**Approach:** +1. Refactor `_build_project_card` to return a dict of widget handles: `{card, header_label, meta_label, live_label, tree, collapse_btn}`. +2. Store in `self._project_cards[proj]` (already exists; expand shape). +3. Add `_update_project_card(proj, items)` that configures existing widgets in place (no destroy). +4. Tree rows: save iids on insert; `tree.delete(*get_children())` then re-insert is fine — scroll position of the outer CTkScrollableFrame is what matters, not per-Treeview. + +### T6 — Reconcile-by-key refactor of `_render_sessions` [Effort: M, Risk: HIGH] + +**Problem:** `_render_sessions` destroys all children every 30s → scroll reset + flicker. + +**Approach:** +1. Compute new ordered project list (saved order + remaining by cost). +2. Diff vs `self._project_cards.keys()`: + - `removed = old - new` → destroy those cards. + - `added = new - old` → build via `_build_project_card`. + - `kept = old & new` → call `_update_project_card`. +3. Re-pack in new order via `card.pack_forget()` then `card.pack(fill="x", pady=6, padx=4)` in loop order. +4. Persist the saved order as before. +5. Belt-and-braces: save `self._sess_scroll._parent_canvas.yview()` at top, restore after `update_idletasks()` (T7 covers this as its own concern). + +**Risk mitigation:** if reconcile misbehaves, fall back to current destroy-rebuild behind a `_USE_RECONCILE = True` module flag for quick rollback. + +### T7 — Scroll preservation safety net [Effort: S, Risk: LOW] + +**Problem:** Belt-and-braces for T6 in case reconcile ever degrades. + +**Approach:** +1. Wrapper on `_render_sessions`: save `self._sess_scroll._parent_canvas.yview()` pre-reconcile, restore post-`update_idletasks()`. +2. Same pattern for any future scroll-bearing rebuild. +3. Pin `customtkinter` version in `requirements.txt` since `_parent_canvas` is internal. + +### T8 — Collapsible project cards [Effort: M, Risk: MED] + +**Problem:** Users with many projects still can't see overview without scrolling. + +**Approach:** +1. Add `▼/▶` toggle button to card header (left of the project name). +2. Clicking toggles `tree.pack_forget()` / `tree.pack(...)`. +3. Persist `collapsed: [proj_name, ...]` in ORDER_FILE (pending B2). +4. On load, restore collapsed state before first render. +5. `Ctrl+click` the toggle = collapse/expand all (small UX bonus). + +### T9 — Plumb window filter through renderers [Effort: M, Risk: MED] + +**Problem:** T4 adds the control; now wire it. + +**Approach:** +1. Dashboard: filter `self._sessions` by `_session_in_window` before computing totals, peak hours, model/tool/project breakdowns. +2. Sessions: same filter before bucketing into projects. +3. "Plan Cost" card: scale to months-in-window (e.g. Week = 0.25 month) for accurate ratio. +4. Status label shows window context: `"42 sessions in Week | Last refresh: 14:32"`. + +### T10 — Notification dedup state [Effort: S, Risk: LOW] + +**Problem:** Without dedup, every 30s refresh would re-fire the same alert. + +**Approach:** +1. State structure: `{session_id: {"level": "amber"|"red", "fired_at": iso_ts}}`. +2. Persisted in ORDER_FILE under `notif_state` (pending B2). +3. Helper `_should_notify(session_id, new_level) -> bool`: fires only when `new_level` is a *higher* tier than last recorded, or when no record exists. +4. Evict entries whose sessions are no longer LIVE. +5. Test: amber → amber no fire; amber → red fires once; red → amber no fire (downgrade silent). + +### T11 — winotify integration with in-app banner fallback [Effort: M, Risk: MED] + +**Problem:** Need OS toast *and* an in-app signal the user can click to jump to the session. + +**Approach:** +1. `try: import winotify` at top; set `_HAS_TOAST` flag. +2. If available, `winotify.Notification(app_id="Claude Usage Monitor", title=..., msg=..., launch=deep_link)`. +3. In-app banner: a `ctk.CTkFrame` slot in the top bar, initially `pack_forget`'d. Shown when any unsatisfied notification exists. Single "Jump to session" button calls `_show_session_detail(s)`. +4. Banner color: amber `#e09a1a`, red `#cc3333` to match pill palette. +5. Banner persists until user clicks "Jump" or session drops below threshold. +6. If `winotify` rejected (B1), steps 2 is skipped — banner alone covers the need. + +### T12 — Rotate-threshold scan in `_on_loaded` [Effort: M, Risk: MED] + +**Problem:** Need to detect crossings after every refresh. + +**Approach:** +1. After `_on_loaded` finishes populating `self._sessions` + `self._active_ids`: +2. For each LIVE session with ≥5 turns, compute `_rotate_subscores`. +3. Map total score → level (`red` ≥60, `amber` ≥30, else `none`). +4. Consult `_should_notify`; if fire: toast + banner update. +5. Record to dedup state file. + +### T13 — Ambient rotation border on project cards [Effort: S, Risk: LOW] + +**Problem:** Users want at-a-glance signal of which projects have a burning session. + +**Approach:** +1. In `_update_project_card` (T5), compute max rotate score across its LIVE sessions. +2. Set `card.configure(border_color=...)`: + - red ≥60 → `#cc3333` + - amber ≥30 → `#e09a1a` + - else → `#333` (default). +3. Border is already 1px; no width change. + +### T14 — Tests [Effort: S, Risk: LOW] + +**Files:** `tests/test_claude_usage_monitor.py`. + +**Approach:** +- mtime-cache: same file → cached; mtime bump → re-parse. +- Window filter: `_session_in_window` bucketing at day/week/month boundaries (build fixture with fixed `now`). +- Dedup state: amber→amber no fire; amber→red fires; red→amber no fire; session removed → state evicted. +- All existing 22 tests pass unchanged. + +## Sequencing + +Parallel lane (land first, any order): **T1, T2, T3, T4**. +Then refactor lane (sequential): **T5 → T6 → T7 → T8 → T13**. +Then feature lane: **T9** (needs T4). +Then notifications lane (sequential): **T10 → T11 → T12**. +Finally: **T14** (can incrementally grow alongside each task). + +## Verification + +- Manual: open monitor, trigger a live claude session in another cwd, wait 30s — Sessions tab does not scroll-jump; banner appears when score goes amber. +- Manual: resize window to 900×550 — Session Detail scrolls cleanly. +- Automated: `pytest tests/test_claude_usage_monitor.py -v` all green (22 existing + ~6 new). + +## Rollback plan + +Each task is a separate commit. If T6 (reconcile refactor) regresses, flip `_USE_RECONCILE = False` and revert only that commit. T11 (winotify) can be toggled via import-guard. + +## Out of scope + +- Rotate-score formula changes. +- New metrics beyond time-window filter. +- CSV export (defer to a follow-up plan). +- System-tray icon (too heavy — would need pystray). +- Splitting the single-file tool (project convention to keep tools as single files). diff --git a/portable/Account_Activity_Monitor_Portable.py b/portable/Account_Activity_Monitor_Portable.py index cef7b3a..aee50b0 100644 --- a/portable/Account_Activity_Monitor_Portable.py +++ b/portable/Account_Activity_Monitor_Portable.py @@ -10,8 +10,9 @@ import os _HERE = os.path.dirname(os.path.abspath(__file__)) -if _HERE not in sys.path: - sys.path.insert(0, _HERE) +_REPO_ROOT = os.path.dirname(_HERE) +if _REPO_ROOT not in sys.path: + sys.path.insert(0, _REPO_ROOT) _missing = [] try: @@ -37,7 +38,7 @@ sys.exit(1) try: - from account_activity_monitor import App + from tools.account_activity_monitor import App except ImportError as e: import tkinter as tk from tkinter import messagebox @@ -45,8 +46,8 @@ _root.withdraw() messagebox.showerror( "Import Error", - f"Could not load account_activity_monitor.py\n\n" - f"Make sure it is in the same folder as this file.\n\nError: {e}" + f"Could not load tools.account_activity_monitor\n\n" + f"Make sure the repo's tools/ directory is present at {_REPO_ROOT}\n\nError: {e}" ) sys.exit(1) diff --git a/portable/Network_Intrusion_Detector_Portable.py b/portable/Network_Intrusion_Detector_Portable.py index f470126..57731e0 100644 --- a/portable/Network_Intrusion_Detector_Portable.py +++ b/portable/Network_Intrusion_Detector_Portable.py @@ -11,10 +11,11 @@ import sys import os -# Make sure the tool module can be found when this file is run from any location +# Make sure the tools/ package can be found when this file is run from any location _HERE = os.path.dirname(os.path.abspath(__file__)) -if _HERE not in sys.path: - sys.path.insert(0, _HERE) +_REPO_ROOT = os.path.dirname(_HERE) +if _REPO_ROOT not in sys.path: + sys.path.insert(0, _REPO_ROOT) # Dependency check with friendly error messages _missing = [] @@ -44,9 +45,9 @@ ) sys.exit(1) -# Import the tool's App class from the original module +# Import the tool's App class from the canonical tools/ package try: - from network_intrusion_detector_pro import App + from tools.network_intrusion_detector_pro import App except ImportError as e: import tkinter as tk from tkinter import messagebox @@ -54,8 +55,8 @@ _root.withdraw() messagebox.showerror( "Import Error", - f"Could not load network_intrusion_detector_pro.py\n\n" - f"Make sure it is in the same folder as this file.\n\nError: {e}" + f"Could not load tools.network_intrusion_detector_pro\n\n" + f"Make sure the repo's tools/ directory is present at {_REPO_ROOT}\n\nError: {e}" ) sys.exit(1) diff --git a/portable/Network_Stability_Monitor_Portable.py b/portable/Network_Stability_Monitor_Portable.py index 1172206..91aa70e 100644 --- a/portable/Network_Stability_Monitor_Portable.py +++ b/portable/Network_Stability_Monitor_Portable.py @@ -10,10 +10,11 @@ import sys import os -# Make sure the tool module can be found when this file is run from any location +# Make sure the tools/ package can be found when this file is run from any location _HERE = os.path.dirname(os.path.abspath(__file__)) -if _HERE not in sys.path: - sys.path.insert(0, _HERE) +_REPO_ROOT = os.path.dirname(_HERE) +if _REPO_ROOT not in sys.path: + sys.path.insert(0, _REPO_ROOT) # Dependency check with friendly error messages _missing = [] @@ -43,9 +44,9 @@ ) sys.exit(1) -# Import the tool's App class from the renamed copy +# Import the tool's App class from the canonical tools/ package try: - from network_stability_monitor import App + from tools.network_stability_monitor import App except ImportError as e: import tkinter as tk from tkinter import messagebox @@ -53,8 +54,8 @@ _root.withdraw() messagebox.showerror( "Import Error", - f"Could not load network_stability_monitor.py\n\n" - f"Make sure it is in the same folder as this file.\n\nError: {e}" + f"Could not load tools.network_stability_monitor\n\n" + f"Make sure the repo's tools/ directory is present at {_REPO_ROOT}\n\nError: {e}" ) sys.exit(1) diff --git a/portable/Security_Audit_Portable.py b/portable/Security_Audit_Portable.py index efc7c12..6c4da62 100644 --- a/portable/Security_Audit_Portable.py +++ b/portable/Security_Audit_Portable.py @@ -10,10 +10,11 @@ import sys import os -# Make sure the tool module can be found when this file is run from any location +# Make sure the tools/ package can be found when this file is run from any location _HERE = os.path.dirname(os.path.abspath(__file__)) -if _HERE not in sys.path: - sys.path.insert(0, _HERE) +_REPO_ROOT = os.path.dirname(_HERE) +if _REPO_ROOT not in sys.path: + sys.path.insert(0, _REPO_ROOT) # Dependency check with friendly error messages _missing = [] @@ -39,9 +40,9 @@ ) sys.exit(1) -# Import the tool's App class from the renamed copy +# Import the tool's App class from the canonical tools/ package try: - from security_audit import App + from tools.security_audit import App except ImportError as e: import tkinter as tk from tkinter import messagebox @@ -49,8 +50,8 @@ _root.withdraw() messagebox.showerror( "Import Error", - f"Could not load security_audit.py\n\n" - f"Make sure it is in the same folder as this file.\n\nError: {e}" + f"Could not load tools.security_audit\n\n" + f"Make sure the repo's tools/ directory is present at {_REPO_ROOT}\n\nError: {e}" ) sys.exit(1) diff --git a/portable/account_activity_monitor.py b/portable/account_activity_monitor.py deleted file mode 100644 index e408ee5..0000000 --- a/portable/account_activity_monitor.py +++ /dev/null @@ -1,2362 +0,0 @@ -""" -Account Activity Monitor — Historical timeline and live monitoring of -Windows account changes, logon activity, device events, system changes, -security policy modifications, and software installations. -""" - -import os -import sys -import re -import json -import time -import queue -import threading -import subprocess -import xml.etree.ElementTree as ET -from dataclasses import dataclass, field -from datetime import datetime, timedelta -from typing import List, Dict, Tuple, Optional, Set - -import tkinter as tk -from tkinter import ttk, messagebox, filedialog - -try: - import customtkinter as ctk - HAS_CTK = True -except ImportError: - HAS_CTK = False - -try: - import winreg -except ImportError: - winreg = None - -try: - import psutil -except ImportError: - psutil = None - -# ───────────────────────────────────────────── -# Constants -# ───────────────────────────────────────────── - -TOOL_NAME = "Account Activity Monitor" -TOOL_DESCRIPTION = "Track account changes, logon activity, device events, and system modifications" - -_CNW = 0x08000000 # CREATE_NO_WINDOW - -# Colors -C_GREEN = "#00FF88" -C_YELLOW = "#FFD700" -C_RED = "#FF4444" -C_CYAN = "#00BFFF" -C_ORANGE = "#FFA500" -C_GRAY = "#888888" -C_WHITE = "#FFFFFF" -CARD_BG = "#1e1e1e" -TREE_BG = "#2b2b2b" - -# Severity colors -SEV_COLORS = { - "CRITICAL": C_RED, - "WARNING": C_ORANGE, - "INFO": C_WHITE, -} - -# ───────────────────────────────────────────── -# Event Definitions -# ───────────────────────────────────────────── - -@dataclass -class EventDef: - event_id: int - log: str - category: str - severity: str - title: str - description: str = "" - -# All tracked events organized by category -EVENT_DEFS: Dict[int, EventDef] = {} - -def _reg(eid, log, cat, sev, title, desc=""): - EVENT_DEFS[eid] = EventDef(eid, log, cat, sev, title, desc) - -# ── Account Management ── -_reg(4720, "Security", "Account", "CRITICAL", "User account created") -_reg(4726, "Security", "Account", "CRITICAL", "User account deleted") -_reg(4722, "Security", "Account", "WARNING", "User account enabled") -_reg(4725, "Security", "Account", "WARNING", "User account disabled") -_reg(4738, "Security", "Account", "WARNING", "User account changed") -_reg(4724, "Security", "Account", "WARNING", "Password reset attempted") -_reg(4723, "Security", "Account", "INFO", "Password change attempted") -_reg(4732, "Security", "Account", "WARNING", "Member added to security group") -_reg(4733, "Security", "Account", "WARNING", "Member removed from security group") -_reg(4781, "Security", "Account", "WARNING", "Account name changed") - -# ── Logon Activity ── -# Note: 4624 (successful logon) excluded — too noisy (hundreds/day from services) -# Note: 4634 (logoff) excluded — pairs with 4624, equally noisy -_reg(4625, "Security", "Logon", "WARNING", "Failed logon attempt") -_reg(4648, "Security", "Logon", "WARNING", "Logon using explicit credentials") -_reg(4800, "Security", "Logon", "INFO", "Workstation locked") -_reg(4801, "Security", "Logon", "INFO", "Workstation unlocked") -_reg(1149, "Microsoft-Windows-TerminalServices-RemoteConnectionManager/Operational", - "Logon", "WARNING", "RDP connection established") - -# ── Device Changes ── -_reg(20001, "Microsoft-Windows-UserPnp/DeviceInstall", "Device", "WARNING", - "Driver installed") -_reg(20003, "Microsoft-Windows-UserPnp/DeviceInstall", "Device", "INFO", - "Driver service added") -_reg(6416, "Security", "Device", "WARNING", "New external device recognized") -_reg(400, "Microsoft-Windows-Kernel-PnP/Device Configuration", - "Device", "INFO", "Device connected (PnP)") -_reg(410, "Microsoft-Windows-Kernel-PnP/Device Configuration", - "Device", "INFO", "Device disconnected (PnP)") - -# ── System Changes ── -# Note: 6013 (uptime) excluded — fires every 12h, no security value -# Note: 1 (time changed) excluded — NTP sync fires constantly, not suspicious -_reg(6005, "System", "System", "INFO", "System boot") -_reg(6006, "System", "System", "INFO", "System shutdown") -_reg(6008, "System", "System", "WARNING", "Unexpected shutdown (crash/power loss)") -_reg(19, "System", "System", "INFO", "Windows Update installed") - -# ── Security Policy ── -_reg(4719, "Security", "Security", "CRITICAL", "System audit policy changed") -_reg(1102, "Security", "Security", "CRITICAL", "Audit log cleared") -_reg(4946, "Security", "Security", "WARNING", "Firewall rule added") -_reg(4947, "Security", "Security", "WARNING", "Firewall rule modified") -_reg(4948, "Security", "Security", "WARNING", "Firewall rule deleted") -_reg(5001, "Microsoft-Windows-Windows Defender/Operational", - "Security", "CRITICAL", "Windows Defender real-time protection disabled") -_reg(5010, "Microsoft-Windows-Windows Defender/Operational", - "Security", "WARNING", "Windows Defender scan disabled") -# Note: 5007 (Defender config changed) excluded — fires 50+ times/day from signature updates - -# ── Software ── -# Note: 7040 (service start type changed) excluded — BITS service alone generates 20+/day -# Note: 1033/1034 (MSI completed) excluded — duplicates 11707/11724 -_reg(7045, "System", "Software", "WARNING", "Service installed") -_reg(11707, "Application", "Software", "INFO", "Application installed") -_reg(11724, "Application", "Software", "WARNING", "Application removed") - -# Group by log source for efficient querying -EVENTS_BY_LOG: Dict[str, List[int]] = {} -for eid, edef in EVENT_DEFS.items(): - EVENTS_BY_LOG.setdefault(edef.log, []).append(eid) - -ALL_CATEGORIES = ["Account", "Logon", "Device", "System", "Security", "Software"] - -CATEGORY_ICONS = { - "Account": "👤", - "Logon": "🔑", - "Device": "🔌", - "System": "⚙️", - "Security": "🛡️", - "Software": "📦", -} - -# ───────────────────────────────────────────── -# Utilities -# ───────────────────────────────────────────── - -def safe_run(cmd: List[str], timeout: int = 15) -> Tuple[int, str, str]: - try: - cp = subprocess.run(cmd, capture_output=True, text=True, errors="replace", - timeout=timeout, shell=False, creationflags=_CNW) - return cp.returncode, cp.stdout, cp.stderr - except Exception as e: - return 1, "", str(e) - -def now_ts() -> str: - return datetime.now().strftime("%Y-%m-%d %H:%M:%S") - -def is_admin() -> bool: - try: - import ctypes - return ctypes.windll.shell32.IsUserAnAdmin() != 0 - except Exception: - return False - -def fmt_time(dt_str: str) -> str: - """Parse ISO timestamp from event XML and return readable format.""" - try: - # Handle both formats: 2026-03-30T12:00:00.000Z and 2026-03-30T12:00:00.0000000Z - dt_str = dt_str.rstrip('Z') - if '.' in dt_str: - # Truncate fractional seconds to 6 digits max - parts = dt_str.split('.') - frac = parts[1][:6] - dt_str = f"{parts[0]}.{frac}" - dt = datetime.strptime(dt_str, "%Y-%m-%dT%H:%M:%S.%f") - else: - dt = datetime.strptime(dt_str, "%Y-%m-%dT%H:%M:%S") - return dt.strftime("%Y-%m-%d %H:%M:%S") - except Exception: - return dt_str[:19] if len(dt_str) >= 19 else dt_str - - -# ───────────────────────────────────────────── -# Parsed Event -# ───────────────────────────────────────────── - -@dataclass -class ParsedEvent: - timestamp: str # formatted time string - timestamp_raw: str # original ISO for sorting - event_id: int - category: str - severity: str - title: str - details: str # extracted meaningful info - log_source: str - computer: str = "" - user: str = "" - raw_xml: str = "" - parsed_data: Dict = field(default_factory=dict) # all extracted key-value pairs - - def sort_key(self): - return self.timestamp_raw - - -# ───────────────────────────────────────────── -# Engine -# ───────────────────────────────────────────── - -class ActivityMonitorEngine: - def __init__(self): - self._admin = is_admin() - self._last_seen: Dict[str, str] = {} # log -> last timestamp seen - - def query_events(self, hours: int = 24, categories: Optional[Set[str]] = None, - callback=None) -> List[ParsedEvent]: - """Query historical events from the last N hours.""" - if categories is None: - categories = set(ALL_CATEGORIES) - - # Calculate time filter - since = datetime.utcnow() - timedelta(hours=hours) - time_filter = since.strftime("%Y-%m-%dT%H:%M:%S.000Z") - - all_events = [] - total_logs = len(EVENTS_BY_LOG) - - for idx, (log, event_ids) in enumerate(EVENTS_BY_LOG.items()): - if callback: - callback(f"Querying {log}...", idx / total_logs) - - # Filter event IDs to only requested categories - filtered_ids = [eid for eid in event_ids - if EVENT_DEFS[eid].category in categories] - if not filtered_ids: - continue - - # Skip Security log if not admin - if log == "Security" and not self._admin: - continue - - events = self._query_log(log, filtered_ids, time_filter) - all_events.extend(events) - - # Sort by timestamp descending (newest first) - all_events.sort(key=lambda e: e.timestamp_raw, reverse=True) - - # Aggregate repeated events (same event_id within 5 minutes) - all_events = self._aggregate_events(all_events) - - if callback: - callback(f"Done — {len(all_events)} events found", 1.0) - - return all_events - - def query_new_events(self, categories: Optional[Set[str]] = None) -> List[ParsedEvent]: - """Query events newer than last seen. For live monitoring.""" - if categories is None: - categories = set(ALL_CATEGORIES) - - all_events = [] - - for log, event_ids in EVENTS_BY_LOG.items(): - filtered_ids = [eid for eid in event_ids - if EVENT_DEFS[eid].category in categories] - if not filtered_ids: - continue - - if log == "Security" and not self._admin: - continue - - # Use last seen timestamp or last 60 seconds - last = self._last_seen.get(log) - if not last: - since = datetime.utcnow() - timedelta(seconds=60) - last = since.strftime("%Y-%m-%dT%H:%M:%S.000Z") - - events = self._query_log(log, filtered_ids, last) - - # Update last seen - if events: - newest = max(e.timestamp_raw for e in events) - # Add 1ms to avoid re-fetching the same event - self._last_seen[log] = newest - elif log not in self._last_seen: - self._last_seen[log] = datetime.utcnow().strftime( - "%Y-%m-%dT%H:%M:%S.000Z") - - all_events.extend(events) - - all_events.sort(key=lambda e: e.timestamp_raw, reverse=True) - return all_events - - def _aggregate_events(self, events: List[ParsedEvent]) -> List[ParsedEvent]: - """Collapse repeated events of the same type within 5 minutes into one entry.""" - if not events: - return events - - aggregated = [] - i = 0 - while i < len(events): - e = events[i] - # Count consecutive events of the same type - count = 1 - j = i + 1 - while j < len(events): - other = events[j] - if other.event_id != e.event_id: - break - # Check if within 5 minutes - try: - t1 = e.timestamp_raw.rstrip('Z') - t2 = other.timestamp_raw.rstrip('Z') - if '.' in t1: - t1 = t1.split('.')[0] - if '.' in t2: - t2 = t2.split('.')[0] - dt1 = datetime.strptime(t1, "%Y-%m-%dT%H:%M:%S") - dt2 = datetime.strptime(t2, "%Y-%m-%dT%H:%M:%S") - if abs((dt1 - dt2).total_seconds()) > 300: - break - except Exception: - break - count += 1 - j += 1 - - if count > 1: - # Create aggregated event - agg = ParsedEvent( - timestamp=e.timestamp, - timestamp_raw=e.timestamp_raw, - event_id=e.event_id, - category=e.category, - severity=e.severity, - title=f"{e.title} ({count}x in 5min)", - details=e.details, - log_source=e.log_source, - computer=e.computer, - user=e.user, - raw_xml=e.raw_xml, - ) - aggregated.append(agg) - else: - aggregated.append(e) - i = j - - return aggregated - - def _query_log(self, log: str, event_ids: List[int], - time_filter: str) -> List[ParsedEvent]: - """Query a specific event log for specific event IDs since a timestamp.""" - events = [] - - # Build XPath query for multiple event IDs - id_conditions = " or ".join(f"EventID={eid}" for eid in event_ids) - xpath = f"*[System[({id_conditions}) and TimeCreated[@SystemTime>='{time_filter}']]]" - - try: - rc, out, err = safe_run([ - "wevtutil", "qe", log, - f"/q:{xpath}", - "/f:xml", - "/rd:true", # reverse direction (newest first) - "/c:500", # cap at 500 per log - ], timeout=20) - - if rc != 0 or not out.strip(): - return events - - # Parse XML events — wevtutil outputs multiple elements - # Wrap in root to make valid XML - xml_str = f"{out}" - try: - root = ET.fromstring(xml_str) - except ET.ParseError: - # Try parsing individual events - return self._parse_events_fallback(out, event_ids) - - ns = {"e": "http://schemas.microsoft.com/win/2004/08/events/event"} - - for event_elem in root.findall(".//e:Event", ns): - parsed = self._parse_event_xml(event_elem, ns) - if parsed: - events.append(parsed) - - except Exception: - pass - - return events - - def _parse_event_xml(self, event_elem, ns) -> Optional[ParsedEvent]: - """Parse a single Event XML element into a ParsedEvent.""" - try: - sys_elem = event_elem.find("e:System", ns) - if sys_elem is None: - return None - - eid_elem = sys_elem.find("e:EventID", ns) - eid = int(eid_elem.text) if eid_elem is not None and eid_elem.text else 0 - - if eid not in EVENT_DEFS: - return None - - edef = EVENT_DEFS[eid] - - # Timestamp - tc_elem = sys_elem.find("e:TimeCreated", ns) - ts_raw = tc_elem.get("SystemTime", "") if tc_elem is not None else "" - - # Computer - comp_elem = sys_elem.find("e:Computer", ns) - computer = comp_elem.text if comp_elem is not None and comp_elem.text else "" - - # Extract event data - details, parsed_data = self._extract_details(event_elem, ns, eid) - user = self._extract_user(event_elem, ns) - - # Raw XML for detail popup - try: - raw = ET.tostring(event_elem, encoding="unicode") - except Exception: - raw = "" - - # Filter out known Windows-internal noise - if self._is_internal_noise(eid, parsed_data): - return None - - return ParsedEvent( - timestamp=fmt_time(ts_raw), - timestamp_raw=ts_raw, - event_id=eid, - category=edef.category, - severity=edef.severity, - title=edef.title, - details=details, - log_source=edef.log, - computer=computer, - user=user, - raw_xml=raw, - parsed_data=parsed_data, - ) - except Exception: - return None - - def _is_internal_noise(self, eid: int, data: Dict) -> bool: - """Filter out Windows-internal events that look scary but are normal.""" - target_user = data.get("TargetUserName", "").lower() - target_domain = data.get("TargetDomainName", "").lower() - subject_user = data.get("SubjectUserName", "").lower() - process = data.get("ProcessName", "").lower() - - # 4648: Explicit credential logons from Windows internals - if eid == 4648: - # DWM (Desktop Window Manager) sessions — every boot - if target_domain == "window manager" or target_user.startswith("dwm-"): - return True - # UMFD (User Mode Font Driver) — every boot - if target_user.startswith("umfd-"): - return True - # SYSTEM account logging into localhost services - if subject_user.endswith("$") and target_user == subject_user: - return True - - # 4624/4625: Filter service/system logons (type 5 = service, type 0 = system) - if eid in (4624, 4625): - logon_type = data.get("LogonType", "") - if logon_type in ("0", "5"): # System and Service logons - return True - # SYSTEM, LOCAL SERVICE, NETWORK SERVICE accounts - if target_user in ("system", "local service", "network service"): - return True - # Anonymous logon (Windows internal) - if target_user == "anonymous logon": - return True - - # 4634: Logoff from service/system accounts - if eid == 4634: - if target_user in ("system", "local service", "network service", - "anonymous logon") or target_user.startswith("dwm-") \ - or target_user.startswith("umfd-"): - return True - - return False - - def _extract_details(self, event_elem, ns, eid: int) -> Tuple[str, Dict]: - """Extract meaningful details from event data fields.""" - data_elems = event_elem.findall(".//e:EventData/e:Data", ns) - if not data_elems: - # Try UserData - data_elems = event_elem.findall(".//{http://schemas.microsoft.com/win/2004/08/events/event}UserData//*") - - data = {} - for d in data_elems: - name = d.get("Name", "") - value = d.text or "" - if name and value.strip(): - data[name] = value.strip() - - # Build meaningful detail string based on event type - parts = [] - - if eid in (4720, 4726, 4722, 4725, 4738, 4781): - # Account events - target = data.get("TargetUserName", data.get("NewTargetUserName", "")) - actor = data.get("SubjectUserName", "") - if target: - parts.append(f"Account: {target}") - if actor and actor != "-": - parts.append(f"By: {actor}") - if eid == 4781: - old = data.get("OldTargetUserName", "") - new = data.get("NewTargetUserName", "") - if old and new: - parts.append(f"Renamed: {old} → {new}") - - elif eid in (4624, 4625, 4634, 4648): - # Logon events - user = data.get("TargetUserName", "") - domain = data.get("TargetDomainName", "") - logon_type = data.get("LogonType", "") - ip = data.get("IpAddress", "") - if user: - parts.append(f"User: {domain}\\{user}" if domain and domain != "-" else f"User: {user}") - if logon_type: - type_names = { - "2": "Interactive", "3": "Network", "4": "Batch", - "5": "Service", "7": "Unlock", "8": "NetworkCleartext", - "10": "RemoteInteractive", "11": "CachedInteractive", - } - parts.append(f"Type: {type_names.get(logon_type, logon_type)}") - if ip and ip not in ("-", "::1", "127.0.0.1"): - parts.append(f"IP: {ip}") - if eid == 4625: - reason = data.get("FailureReason", data.get("Status", "")) - if reason: - parts.append(f"Reason: {reason}") - - elif eid == 7045: - # Service installed - svc = data.get("ServiceName", "") - path = data.get("ImagePath", "") - if svc: - parts.append(f"Service: {svc}") - if path: - parts.append(f"Path: {path[:150]}") - - elif eid in (11707, 11724, 1033, 1034): - # Software install/remove - product = data.get("Product", data.get("ProductName", "")) - if product: - parts.append(f"Product: {product}") - - elif eid == 19: - # Windows Update - title = data.get("updateTitle", "") - if title: - parts.append(f"Update: {title}") - - elif eid in (4946, 4947, 4948): - # Firewall rules - rule = data.get("RuleName", "") - if rule: - parts.append(f"Rule: {rule}") - - elif eid == 1149: - # RDP - user = data.get("Param1", "") - domain = data.get("Param2", "") - ip = data.get("Param3", "") - if user: - parts.append(f"User: {domain}\\{user}" if domain else f"User: {user}") - if ip: - parts.append(f"From: {ip}") - - elif eid in (4732, 4733): - # Group membership - member = data.get("MemberName", data.get("MemberSid", "")) - group = data.get("TargetUserName", "") - if member: - parts.append(f"Member: {member}") - if group: - parts.append(f"Group: {group}") - - elif eid in (6416, 20001, 20003, 400, 410): - # Device events - desc = data.get("DeviceDescription", "") - class_name = data.get("ClassName", "") - device_id = data.get("DeviceId", data.get("DeviceInstanceId", "")) - if desc: - parts.append(f"Device: {desc}") - elif device_id: - # Extract friendly name from device ID - short_id = device_id.split("\\")[-1] if "\\" in device_id else device_id - parts.append(f"Device: {short_id}") - if class_name: - # Map class names to friendly descriptions - class_friendly = { - "AudioEndpoint": "Audio Device", - "Bluetooth": "Bluetooth Device", - "USB": "USB Device", - "HIDClass": "Input Device (keyboard/mouse)", - "DiskDrive": "Disk Drive", - "Net": "Network Adapter", - "Monitor": "Display/Monitor", - "Camera": "Camera/Webcam", - "Image": "Scanner/Imaging Device", - "Printer": "Printer", - "WPD": "Portable Device (phone/tablet)", - } - friendly = class_friendly.get(class_name, class_name) - parts.append(f"Type: {friendly}") - # Driver info for install events - driver = data.get("DriverName", data.get("DriverProvider", "")) - if driver: - parts.append(f"Driver: {driver}") - - elif eid in (5001, 5010): - # Defender disabled events - parts.append("Windows Defender protection was disabled!") - - elif eid == 6008: - # Unexpected shutdown - parts.append("System was not shut down cleanly (crash, power loss, or forced)") - - # Fallback: show first few data fields - if not parts and data: - for k, v in list(data.items())[:3]: - if v and v != "-": - parts.append(f"{k}: {v[:100]}") - - detail_str = " | ".join(parts) if parts else EVENT_DEFS.get(eid, EventDef(0, "", "", "", "")).description - return detail_str, data - - def _extract_user(self, event_elem, ns) -> str: - """Extract the user/subject from event.""" - data_elems = event_elem.findall(".//e:EventData/e:Data", ns) - for d in data_elems: - name = d.get("Name", "") - if name in ("TargetUserName", "SubjectUserName") and d.text: - return d.text.strip() - return "" - - def _parse_events_fallback(self, raw: str, event_ids: List[int]) -> List[ParsedEvent]: - """Fallback parser when XML is malformed.""" - events = [] - ns = {"e": "http://schemas.microsoft.com/win/2004/08/events/event"} - # Split by List[Dict]: - """Get all local user accounts with details.""" - accounts = [] - try: - rc, out, _ = safe_run([ - "powershell", "-NoProfile", "-Command", - "Get-LocalUser | Select-Object Name, Enabled, LastLogon, " - "PasswordLastSet, Description, SID, " - "@{N='Created';E={$_.PrincipalSource}} | ConvertTo-Json -Depth 2" - ], timeout=10) - if rc == 0 and out.strip(): - data = json.loads(out) - if isinstance(data, dict): - data = [data] - for u in data: - accounts.append({ - "name": u.get("Name", ""), - "enabled": u.get("Enabled", False), - "last_logon": u.get("LastLogon", ""), - "password_set": u.get("PasswordLastSet", ""), - "description": u.get("Description", ""), - "sid": str(u.get("SID", {}).get("Value", "")) if isinstance(u.get("SID"), dict) else str(u.get("SID", "")), - }) - except Exception: - pass - - # Enrich with net user details (creation date, groups) - for acc in accounts: - try: - rc, out, _ = safe_run(["net", "user", acc["name"]], timeout=5) - if rc == 0: - for line in out.splitlines(): - line = line.strip() - if line.startswith("Full Name"): - acc["full_name"] = line.split(None, 2)[-1].strip() if len(line.split(None, 2)) > 2 else "" - elif line.startswith("Account active"): - acc["active_str"] = line.split(None, 2)[-1].strip() - elif "Password last set" in line: - acc["password_last_set_str"] = line.split(None, 3)[-1].strip() if len(line.split(None, 3)) > 3 else "" - elif "Last logon" in line: - acc["last_logon_str"] = line.split(None, 2)[-1].strip() if len(line.split(None, 2)) > 2 else "" - elif "Local Group" in line: - groups = re.findall(r'\*(\S+)', line) - acc["groups"] = groups - elif "Account expires" in line: - acc["expires"] = line.split(None, 2)[-1].strip() if len(line.split(None, 2)) > 2 else "" - except Exception: - pass - - # SID analysis: SID-1001 = first user created during setup - sid = acc.get("sid", "") - rid = sid.split("-")[-1] if sid else "" - if rid.isdigit(): - rid_int = int(rid) - if rid_int == 500: - acc["sid_note"] = "Built-in Administrator" - elif rid_int == 501: - acc["sid_note"] = "Built-in Guest" - elif rid_int == 503: - acc["sid_note"] = "DefaultAccount (system)" - elif rid_int == 504: - acc["sid_note"] = "WDAGUtilityAccount (Defender)" - elif rid_int == 1001: - acc["sid_note"] = "First user created during Windows setup" - elif rid_int > 1001: - acc["sid_note"] = "Created after initial setup (added later)" - else: - acc["sid_note"] = "" - else: - acc["sid_note"] = "" - - return accounts - - def get_account_events_all_time(self) -> List[ParsedEvent]: - """Query ALL account-related events with no time limit. Requires admin.""" - if not self._admin: - return [] - account_ids = [e.event_id for e in EVENT_DEFS.values() if e.category == "Account"] - return self._query_log("Security", account_ids, "2000-01-01T00:00:00.000Z") - - # ── Spy Check ── - - def run_spy_check(self) -> List[Dict]: - """Run comprehensive spy/intrusion checks. Returns list of findings.""" - findings = [] - - # 1. Camera access history - findings.extend(self._check_capability_access("webcam", "Camera")) - - # 2. Microphone access history - findings.extend(self._check_capability_access("microphone", "Microphone")) - - # 3. Remote access settings - findings.extend(self._check_remote_access()) - - # 4. Suspicious user accounts - findings.extend(self._check_suspicious_accounts()) - - # 5. Recent user profile changes - findings.extend(self._check_profile_changes()) - - # 6. Event log tampering - findings.extend(self._check_log_tampering()) - - # 7. Suspicious scheduled tasks - findings.extend(self._check_suspicious_tasks()) - - # 8. Recent remote access tools - findings.extend(self._check_remote_tools()) - - return findings - - def _check_capability_access(self, capability: str, label: str) -> List[Dict]: - """Check Windows Privacy camera/mic access history.""" - findings = [] - if not winreg: - return findings - - base_path = rf"SOFTWARE\Microsoft\Windows\CurrentVersion\CapabilityAccessManager\ConsentStore\{capability}" - - for hive, hive_name in [(winreg.HKEY_CURRENT_USER, "User"), - (winreg.HKEY_LOCAL_MACHINE, "System")]: - try: - key = winreg.OpenKey(hive, base_path, 0, winreg.KEY_READ) - # Check global allow/deny - try: - val, _ = winreg.QueryValueEx(key, "Value") - if val == "Deny": - findings.append({ - "category": label, - "severity": "INFO", - "title": f"{label} access is BLOCKED globally", - "detail": f"No apps can access your {capability}. This is safe.", - "icon": "✓", - }) - except FileNotFoundError: - pass - - # Enumerate apps - i = 0 - while True: - try: - subkey_name = winreg.EnumKey(key, i) - subkey = winreg.OpenKey(key, subkey_name, 0, winreg.KEY_READ) - try: - start_raw, _ = winreg.QueryValueEx(subkey, "LastUsedTimeStart") - stop_raw, _ = winreg.QueryValueEx(subkey, "LastUsedTimeStop") - - # Convert Windows FILETIME to datetime - start_dt = self._filetime_to_datetime(start_raw) - stop_dt = self._filetime_to_datetime(stop_raw) - - # Clean up app name - app_name = subkey_name.replace("#", "\\") - # Extract readable name - if "_" in app_name and "." in app_name: - # UWP app — extract base name - parts = app_name.split("_") - app_name = parts[0] - - start_str = start_dt.strftime("%Y-%m-%d %H:%M:%S") if start_dt else "Unknown" - stop_str = stop_dt.strftime("%Y-%m-%d %H:%M:%S") if stop_dt else "Still active" - - # Determine if currently active - is_active = stop_raw == 0 or (stop_dt and start_dt and stop_dt < start_dt) - - severity = "WARNING" if is_active else "INFO" - status = "ACTIVE NOW" if is_active else "Last used" - - findings.append({ - "category": label, - "severity": severity, - "title": f"{app_name} accessed {capability}", - "detail": f"{status}: {start_str} — {stop_str}", - "icon": "🔴" if is_active else "📋", - }) - except FileNotFoundError: - pass - winreg.CloseKey(subkey) - i += 1 - except OSError: - break - winreg.CloseKey(key) - except (OSError, PermissionError): - pass - - if not any(f["category"] == label for f in findings): - findings.append({ - "category": label, - "severity": "INFO", - "title": f"No {capability} access history found", - "detail": "No apps have accessed this device recently.", - "icon": "✓", - }) - - return findings - - def _filetime_to_datetime(self, ft) -> Optional[datetime]: - """Convert Windows FILETIME (100ns intervals since 1601) to datetime.""" - if not ft or ft == 0: - return None - try: - # FILETIME epoch is Jan 1, 1601 - EPOCH_DIFF = 116444736000000000 # 100ns intervals between 1601 and 1970 - timestamp = (ft - EPOCH_DIFF) / 10000000 # Convert to seconds - return datetime.fromtimestamp(timestamp) - except (ValueError, OSError, OverflowError): - return None - - def _check_remote_access(self) -> List[Dict]: - """Check if remote access is enabled.""" - findings = [] - if not winreg: - return findings - - # RDP - try: - key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, - r"SYSTEM\CurrentControlSet\Control\Terminal Server", 0, winreg.KEY_READ) - deny, _ = winreg.QueryValueEx(key, "fDenyTSConnections") - winreg.CloseKey(key) - if deny == 0: - findings.append({ - "category": "Remote Access", - "severity": "CRITICAL", - "title": "Remote Desktop (RDP) is ENABLED", - "detail": "Someone can remotely control this PC if they have credentials.\n" - "Disable: Settings → System → Remote Desktop → Off", - "icon": "⚠", - }) - else: - findings.append({ - "category": "Remote Access", - "severity": "INFO", - "title": "Remote Desktop is disabled", - "detail": "RDP is off. No one can remote desktop into this PC.", - "icon": "✓", - }) - except Exception: - pass - - # Remote Assistance - try: - key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, - r"SYSTEM\CurrentControlSet\Control\Remote Assistance", 0, winreg.KEY_READ) - allow, _ = winreg.QueryValueEx(key, "fAllowToGetHelp") - winreg.CloseKey(key) - if allow: - findings.append({ - "category": "Remote Access", - "severity": "WARNING", - "title": "Remote Assistance is ENABLED", - "detail": "Someone could send a remote assistance invitation.\n" - "Disable: System Properties → Remote → uncheck 'Allow Remote Assistance'", - "icon": "⚠", - }) - except Exception: - pass - - # Check for remote access software running - if psutil: - remote_tools = { - "teamviewer": "TeamViewer", - "anydesk": "AnyDesk", - "rustdesk": "RustDesk", - "vnc": "VNC Server", - "ammyy": "Ammyy Admin", - "supremo": "Supremo", - "logmein": "LogMeIn", - "splashtop": "Splashtop", - "remotepc": "RemotePC", - } - for proc in psutil.process_iter(['name', 'exe']): - try: - pname = proc.info['name'].lower() - for key, label in remote_tools.items(): - if key in pname: - findings.append({ - "category": "Remote Access", - "severity": "WARNING", - "title": f"{label} is RUNNING", - "detail": f"Process: {proc.info['name']}\n" - f"Path: {proc.info.get('exe', 'Unknown')}", - "icon": "⚠", - }) - except (psutil.NoSuchProcess, psutil.AccessDenied): - continue - - return findings - - def _check_suspicious_accounts(self) -> List[Dict]: - """Check for suspicious user accounts.""" - findings = [] - try: - rc, out, _ = safe_run(["net", "user"], timeout=10) - if rc == 0: - lines = out.splitlines() - for line in lines: - if line.startswith("---") or line.startswith("User accounts") or \ - line.startswith("The command") or not line.strip(): - continue - for name in line.split(): - name = name.strip() - if not name: - continue - # Check each account - rc2, detail, _ = safe_run(["net", "user", name], timeout=5) - if rc2 != 0: - continue - is_active = "Yes" in [l.split()[-1] for l in detail.splitlines() - if "Account active" in l] - is_admin = "Administrators" in detail - - # Flag unknown active admin accounts - if is_active and is_admin and name.lower() not in ( - "administrator", "christophoros", "chrpa"): - findings.append({ - "category": "Accounts", - "severity": "CRITICAL", - "title": f"Unknown admin account: {name}", - "detail": "This active administrator account is not recognized.\n" - "Could be created by an attacker for persistent access.", - "icon": "⚠", - }) - except Exception: - pass - - if not findings: - findings.append({ - "category": "Accounts", - "severity": "INFO", - "title": "No suspicious accounts found", - "detail": "All active admin accounts are recognized.", - "icon": "✓", - }) - - return findings - - def _check_profile_changes(self) -> List[Dict]: - """Check for signs someone modified user profile/desktop.""" - findings = [] - if not winreg: - return findings - - # Wallpaper source check - try: - key = winreg.OpenKey(winreg.HKEY_CURRENT_USER, - r"Control Panel\Desktop", 0, winreg.KEY_READ) - wp, _ = winreg.QueryValueEx(key, "Wallpaper") - winreg.CloseKey(key) - if wp: - findings.append({ - "category": "Profile", - "severity": "INFO", - "title": "Current wallpaper", - "detail": wp, - "icon": "🖼", - }) - except Exception: - pass - - # Check if screensaver has password protection - try: - key = winreg.OpenKey(winreg.HKEY_CURRENT_USER, - r"Control Panel\Desktop", 0, winreg.KEY_READ) - try: - ss_secure, _ = winreg.QueryValueEx(key, "ScreenSaverIsSecure") - if ss_secure == "0": - findings.append({ - "category": "Profile", - "severity": "WARNING", - "title": "Screen saver NOT password-protected", - "detail": "When the screen saver activates, no password is required to unlock.\n" - "Anyone can access the PC when you step away.", - "icon": "⚠", - }) - except FileNotFoundError: - pass - winreg.CloseKey(key) - except Exception: - pass - - # Check auto-lock timeout - try: - key = winreg.OpenKey(winreg.HKEY_CURRENT_USER, - r"Control Panel\Desktop", 0, winreg.KEY_READ) - try: - timeout, _ = winreg.QueryValueEx(key, "ScreenSaveTimeOut") - timeout_min = int(timeout) // 60 if timeout else 0 - if timeout_min > 15 or timeout_min == 0: - findings.append({ - "category": "Profile", - "severity": "WARNING", - "title": f"Screen auto-lock: {timeout_min} min" if timeout_min else "Screen auto-lock: DISABLED", - "detail": "Long timeout or disabled auto-lock means the PC stays unlocked\n" - "when you walk away. Recommended: 5 minutes or less.", - "icon": "⚠", - }) - except FileNotFoundError: - findings.append({ - "category": "Profile", - "severity": "WARNING", - "title": "Screen auto-lock: NOT CONFIGURED", - "detail": "No screen timeout is set. PC won't auto-lock when idle.", - "icon": "⚠", - }) - winreg.CloseKey(key) - except Exception: - pass - - return findings - - def _check_log_tampering(self) -> List[Dict]: - """Check if event logs show signs of tampering.""" - findings = [] - - # Check if Security log was recently cleared (Event 1102) - if self._admin: - events = self._query_log("Security", [1102], "2000-01-01T00:00:00.000Z") - if events: - # Extract who cleared it - last = events[0] - who = last.parsed_data.get("SubjectUserName", "Unknown") - sid = last.parsed_data.get("SubjectUserSid", "") - detail = f"Someone cleared the Windows Security audit log.\n" \ - f"Last cleared: {last.timestamp}\n" \ - f"Cleared by: {who} (SID: {sid})\n" - # Check if it was a known user - if who.endswith("$"): - detail += "This was done by the SYSTEM account (unusual — investigate)." - else: - detail += f"This was done by user '{who}'." - findings.append({ - "category": "Log Integrity", - "severity": "CRITICAL", - "title": f"Security log was CLEARED ({len(events)} times) — by {who}", - "detail": detail, - "icon": "⚠", - }) - else: - findings.append({ - "category": "Log Integrity", - "severity": "INFO", - "title": "Security log has NOT been cleared", - "detail": "No evidence of log tampering.", - "icon": "✓", - }) - - # Check log sizes (too small = events being lost) - for log_name in ["Security", "System", "Application"]: - try: - rc, out, _ = safe_run(["wevtutil", "gl", log_name], timeout=5) - if rc == 0: - for line in out.splitlines(): - if "maxSize" in line: - max_bytes = int(line.split(":")[-1].strip()) - max_mb = max_bytes / (1024 * 1024) - if max_mb <= 20: - findings.append({ - "category": "Log Integrity", - "severity": "WARNING", - "title": f"{log_name} log too small ({max_mb:.0f} MB)", - "detail": f"Default 20 MB only keeps ~3 days of history.\n" - "Click 'Fix Log Sizes' below to increase to 200 MB (~6-12 months).", - "icon": "⚠", - "fixable": "log_size", - "fix_target": log_name, - }) - except Exception: - pass - - return findings - - def _check_suspicious_tasks(self) -> List[Dict]: - """Check for suspicious scheduled tasks.""" - findings = [] - suspicious_paths = [r"\appdata\local\temp", r"\users\public", - r"\programdata", r"\downloads", "cmd.exe /c", - "powershell.exe -e", "powershell.exe -enc"] - try: - rc, out, _ = safe_run(["schtasks", "/query", "/fo", "CSV", "/v"], timeout=20) - if rc == 0 and out.strip(): - import csv, io - reader = csv.DictReader(io.StringIO(out)) - seen = set() - for row in reader: - task_name = row.get("TaskName", "").strip() - task_run = row.get("Task To Run", "").strip() - if not task_name or task_name in seen or task_name == "TaskName": - continue - if "\\Microsoft\\" in task_name: - continue - seen.add(task_name) - - for susp in suspicious_paths: - if susp.lower() in task_run.lower(): - findings.append({ - "category": "Scheduled Tasks", - "severity": "WARNING", - "title": f"Suspicious task: {task_name.split(chr(92))[-1]}", - "detail": f"Command: {task_run[:200]}\n" - "Running from a suspicious location.", - "icon": "⚠", - }) - break - except Exception: - pass - - return findings - - def _check_remote_tools(self) -> List[Dict]: - """Check for installed remote access tools (not just running).""" - findings = [] - remote_indicators = [ - (r"SOFTWARE\TeamViewer", "TeamViewer"), - (r"SOFTWARE\AnyDesk", "AnyDesk"), - (r"SOFTWARE\WOW6432Node\TeamViewer", "TeamViewer (32-bit)"), - ] - if winreg: - for reg_path, name in remote_indicators: - try: - key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, reg_path, 0, winreg.KEY_READ) - winreg.CloseKey(key) - findings.append({ - "category": "Remote Access", - "severity": "WARNING", - "title": f"{name} is INSTALLED", - "detail": f"Found in registry: HKLM\\{reg_path}\n" - "If you don't use this, remove it — it allows remote control.", - "icon": "⚠", - }) - except FileNotFoundError: - pass - except Exception: - pass - - return findings - - def export_events(self, events: List[ParsedEvent], path: str): - """Export events to JSON.""" - data = { - "exported_at": now_ts(), - "total_events": len(events), - "events": [ - { - "timestamp": e.timestamp, - "event_id": e.event_id, - "category": e.category, - "severity": e.severity, - "title": e.title, - "details": e.details, - "user": e.user, - "computer": e.computer, - "log_source": e.log_source, - } - for e in events - ] - } - with open(path, "w", encoding="utf-8") as f: - json.dump(data, f, indent=2, ensure_ascii=False) - - -# ───────────────────────────────────────────── -# App (UI) -# ───────────────────────────────────────────── - -class App(ctk.CTkFrame if HAS_CTK else tk.Frame): - def __init__(self, parent): - super().__init__(parent, corner_radius=10) - self.parent = parent - parent.title(TOOL_NAME) - parent.geometry("1300x750") - parent.minsize(900, 550) - - self.running = True - self.engine = ActivityMonitorEngine() - - # Queues - self.work_q: queue.Queue = queue.Queue() - self.result_q: queue.Queue = queue.Queue() - - # State - self._hist_events: List[ParsedEvent] = [] - self._live_events: List[ParsedEvent] = [] - self._live_paused = False - self._live_counter = 0 - self._poll_interval = 5000 # ms - self._active_categories: Set[str] = set(ALL_CATEGORIES) - self._active_severities: Set[str] = {"CRITICAL", "WARNING", "INFO"} - self._search_text = "" - self._hist_hours = 24 - - # Build - self._apply_tree_style() - self._build_ui() - - # Worker - t = threading.Thread(target=self._worker_loop, daemon=True) - t.start() - - # Schedule - self.after(100, self._process_queue) - self.after(500, self._load_historical) - self.after(2000, self._start_live_poll) - - def _apply_tree_style(self): - style = ttk.Style() - style.theme_use("clam") - style.configure("Treeview", - background=TREE_BG, foreground=C_WHITE, fieldbackground=TREE_BG, - font=("Segoe UI", 9), rowheight=22, borderwidth=0) - style.configure("Treeview.Heading", - background="#333333", foreground=C_WHITE, - font=("Segoe UI", 9, "bold"), borderwidth=1, relief="flat") - style.map("Treeview", - background=[('selected', '#1f538d')], - foreground=[('selected', C_WHITE)]) - style.map("Treeview.Heading", - background=[('active', '#444444')]) - - # ── Build UI ── - - def _build_ui(self): - self.pack(fill="both", expand=True) - - # Admin banner - if not self.engine._admin: - banner = ctk.CTkFrame(self, fg_color="#442200", height=30) - banner.pack(fill="x", padx=6, pady=(6, 0)) - ctk.CTkLabel(banner, - text="⚠ Running without admin — Security log events (Account, Logon, Security Policy) are unavailable. Run as Administrator for full access.", - font=("Segoe UI", 10), text_color=C_ORANGE).pack(padx=10, pady=4) - - # Notebook - self.nb = ctk.CTkTabview(self) - self.nb.pack(fill="both", expand=True, padx=6, pady=6) - - self.tab_spy = self.nb.add("Spy Check") - self.tab_accounts = self.nb.add("User Accounts") - self.tab_historical = self.nb.add("Historical Timeline") - self.tab_live = self.nb.add("Live Monitor") - self.tab_settings = self.nb.add("Settings") - - self._build_spy_check() - self._build_accounts() - self._build_historical() - self._build_live() - self._build_settings() - - # ── Category filter builder (shared) ── - - def _build_category_filters(self, parent, on_change) -> Dict[str, ctk.CTkButton]: - """Build category toggle buttons. Returns dict of category -> button.""" - frame = ctk.CTkFrame(parent, fg_color="transparent") - frame.pack(fill="x", padx=6, pady=(4, 2)) - - ctk.CTkLabel(frame, text="Filter:", font=("Segoe UI", 10), - text_color=C_GRAY).pack(side="left", padx=(0, 6)) - - buttons = {} - for cat in ALL_CATEGORIES: - icon = CATEGORY_ICONS.get(cat, "") - btn = ctk.CTkButton(frame, text=f"{icon} {cat}", width=90, height=26, - fg_color="#1f538d", hover_color="#2b6194", - font=("Segoe UI", 9), - command=lambda c=cat: self._toggle_category(c, buttons, on_change)) - btn.pack(side="left", padx=2) - buttons[cat] = btn - - # Severity filter - ctk.CTkLabel(frame, text=" Severity:", font=("Segoe UI", 10), - text_color=C_GRAY).pack(side="left", padx=(10, 4)) - - self._sev_buttons: Dict[str, ctk.CTkButton] = {} - sev_configs = [ - ("CRITICAL", C_RED, "#661111"), - ("WARNING", C_ORANGE, "#664400"), - ("INFO", C_GRAY, "#333333"), - ] - for sev, active_color, hover in sev_configs: - btn = ctk.CTkButton(frame, text=sev, width=70, height=26, - fg_color=active_color, hover_color=hover, - font=("Segoe UI", 9), - command=lambda s=sev: self._toggle_severity(s, on_change)) - btn.pack(side="left", padx=2) - self._sev_buttons[sev] = btn - - return buttons - - def _toggle_category(self, cat: str, buttons: Dict, on_change): - icon = CATEGORY_ICONS.get(cat, "") - if cat in self._active_categories: - self._active_categories.discard(cat) - buttons[cat].configure(fg_color="#2a2a2a", text=f"[OFF] {cat}", - text_color="#555555", border_width=1, - border_color="#444444") - else: - self._active_categories.add(cat) - buttons[cat].configure(fg_color="#1f538d", text=f"{icon} {cat}", - text_color=C_WHITE, border_width=0) - on_change() - - def _toggle_severity(self, sev: str, on_change): - colors = {"CRITICAL": C_RED, "WARNING": C_ORANGE, "INFO": C_GRAY} - if sev in self._active_severities: - self._active_severities.discard(sev) - self._sev_buttons[sev].configure(fg_color="#2a2a2a", text=f"[OFF] {sev}", - text_color="#555555", border_width=1, - border_color="#444444") - else: - self._active_severities.add(sev) - self._sev_buttons[sev].configure(fg_color=colors[sev], text=sev, - text_color=C_WHITE, border_width=0) - on_change() - - # ── Historical Timeline Tab ── - - # ── User Accounts Tab ── - - # ── Spy Check Tab ── - - def _build_spy_check(self): - f = self.tab_spy - scroll = ctk.CTkScrollableFrame(f) - scroll.pack(fill="both", expand=True, padx=6, pady=6) - - # Header - top = ctk.CTkFrame(scroll, fg_color="transparent") - top.pack(fill="x", pady=(0, 8)) - ctk.CTkLabel(top, text="🔍 Spy & Intrusion Check", - font=("Segoe UI", 16, "bold")).pack(side="left") - ctk.CTkButton(top, text="Run Scan", width=100, height=30, - fg_color="#1f538d", hover_color="#2b6194", - command=self._run_spy_check).pack(side="right") - self._spy_status = ctk.CTkLabel(top, text="", font=("Segoe UI", 10), - text_color=C_GRAY) - self._spy_status.pack(side="right", padx=10) - - # Results container - self._spy_results_frame = ctk.CTkFrame(scroll, fg_color="transparent") - self._spy_results_frame.pack(fill="x") - - # Initial message - ctk.CTkLabel(self._spy_results_frame, - text="Click 'Run Scan' to check for:\n\n" - " • Camera & Microphone access history (which apps, when)\n" - " • Remote access tools (TeamViewer, AnyDesk, RDP)\n" - " • Suspicious user accounts\n" - " • Screen lock settings\n" - " • Event log tampering\n" - " • Suspicious scheduled tasks", - font=("Segoe UI", 11), text_color=C_GRAY, - justify="left").pack(pady=30, padx=20, anchor="w") - - self.after(500, self._run_spy_check) - - def _run_spy_check(self): - self._spy_status.configure(text="Scanning...") - self.work_q.put_nowait(("spy_check",)) - - def _populate_spy_results(self, findings: List[Dict]): - """Build spy check result cards grouped by category.""" - for w in self._spy_results_frame.winfo_children(): - w.destroy() - - if not findings: - ctk.CTkLabel(self._spy_results_frame, text="No findings", - font=("Segoe UI", 12)).pack(pady=20) - return - - # Count severities - crits = sum(1 for f in findings if f["severity"] == "CRITICAL") - warns = sum(1 for f in findings if f["severity"] == "WARNING") - infos = sum(1 for f in findings if f["severity"] == "INFO") - - # Overall status banner - if crits > 0: - banner_color = "#441111" - banner_text = f"⚠ {crits} CRITICAL issue{'s' if crits != 1 else ''} found!" - text_color = C_RED - elif warns > 0: - banner_color = "#332200" - banner_text = f"⚠ {warns} warning{'s' if warns != 1 else ''} — review recommended" - text_color = C_ORANGE - else: - banner_color = "#112211" - banner_text = "✓ No spy indicators detected" - text_color = C_GREEN - - banner = ctk.CTkFrame(self._spy_results_frame, fg_color=banner_color, - corner_radius=8) - banner.pack(fill="x", pady=(0, 8)) - ctk.CTkLabel(banner, text=banner_text, font=("Segoe UI", 13, "bold"), - text_color=text_color).pack(padx=12, pady=8) - - # Group by category - categories = {} - for f in findings: - cat = f["category"] - categories.setdefault(cat, []).append(f) - - for cat, cat_findings in categories.items(): - # Category header - cat_frame = ctk.CTkFrame(self._spy_results_frame, fg_color=CARD_BG, - corner_radius=8) - cat_frame.pack(fill="x", pady=3) - - # Determine category status - cat_crits = sum(1 for f in cat_findings if f["severity"] == "CRITICAL") - cat_warns = sum(1 for f in cat_findings if f["severity"] == "WARNING") - if cat_crits: - status_icon = "🔴" - elif cat_warns: - status_icon = "🟡" - else: - status_icon = "🟢" - - hdr = ctk.CTkFrame(cat_frame, fg_color="transparent") - hdr.pack(fill="x", padx=10, pady=(6, 2)) - ctk.CTkLabel(hdr, text=f"{status_icon} {cat}", - font=("Segoe UI", 12, "bold")).pack(side="left") - - # Findings in this category - for finding in cat_findings: - row = ctk.CTkFrame(cat_frame, fg_color="transparent") - row.pack(fill="x", padx=10, pady=2) - - sev_color = SEV_COLORS.get(finding["severity"], C_WHITE) - icon = finding.get("icon", "•") - - ctk.CTkLabel(row, text=icon, font=("Segoe UI", 10), - width=25).pack(side="left") - ctk.CTkLabel(row, text=finding["title"], - font=("Segoe UI", 10, "bold"), - text_color=sev_color).pack(side="left", padx=(0, 8)) - - if finding.get("detail"): - detail_row = ctk.CTkFrame(cat_frame, fg_color="transparent") - detail_row.pack(fill="x", padx=(45, 10), pady=(0, 2)) - ctk.CTkLabel(detail_row, text=finding["detail"], - font=("Segoe UI", 9), text_color="#999999", - wraplength=700, justify="left").pack(anchor="w") - - # Pad bottom - ctk.CTkLabel(cat_frame, text="", font=("Segoe UI", 2)).pack() - - # Add "Fix Log Sizes" button if any fixable log findings - fixable_logs = [f for f in findings if f.get("fixable") == "log_size"] - if fixable_logs: - fix_frame = ctk.CTkFrame(self._spy_results_frame, fg_color="#1a2a3a", - corner_radius=8) - fix_frame.pack(fill="x", pady=6) - ctk.CTkLabel(fix_frame, text="🔧 Quick Fix Available", - font=("Segoe UI", 11, "bold"), - text_color=C_CYAN).pack(anchor="w", padx=10, pady=(6, 2)) - log_names = [f["fix_target"] for f in fixable_logs] - ctk.CTkLabel(fix_frame, - text=f"Increase {', '.join(log_names)} logs from 20 MB to 200 MB\n" - "This keeps ~6-12 months of history instead of ~3 days.", - font=("Segoe UI", 9), text_color=C_GRAY, - justify="left").pack(anchor="w", padx=10, pady=(0, 4)) - ctk.CTkButton(fix_frame, text="Fix Log Sizes (requires admin)", width=220, - height=30, fg_color="#226622", hover_color="#338833", - command=lambda logs=log_names: self._fix_log_sizes(logs) - ).pack(padx=10, pady=(0, 8)) - - self._spy_status.configure( - text=f"Done — {crits} critical, {warns} warnings, {infos} info") - - # ── User Accounts Tab ── - - def _build_accounts(self): - f = self.tab_accounts - scroll = ctk.CTkScrollableFrame(f) - scroll.pack(fill="both", expand=True, padx=6, pady=6) - - # Header - top = ctk.CTkFrame(scroll, fg_color="transparent") - top.pack(fill="x", pady=(0, 4)) - ctk.CTkLabel(top, text="👤 Local User Accounts", - font=("Segoe UI", 14, "bold")).pack(side="left") - ctk.CTkButton(top, text="Refresh", width=70, height=26, - command=self._load_accounts).pack(side="right") - ctk.CTkButton(top, text="Scan Account History", width=150, height=26, - fg_color="#1f538d", hover_color="#2b6194", - command=self._scan_account_history).pack(side="right", padx=4) - - self._accounts_frame = ctk.CTkFrame(scroll, fg_color="transparent") - self._accounts_frame.pack(fill="x") - - # Account history section (populated on scan) - self._acct_hist_frame = ctk.CTkFrame(scroll, fg_color="transparent") - self._acct_hist_frame.pack(fill="x", pady=(8, 0)) - - self.after(300, self._load_accounts) - - def _load_accounts(self): - self.work_q.put_nowait(("accounts",)) - - def _scan_account_history(self): - """Scan all account events from Security log (all time, needs admin).""" - if not self.engine._admin: - messagebox.showinfo("Admin Required", - "Scanning account history from Security log requires administrator privileges.\n\n" - "Right-click the app → Run as administrator.") - return - self.work_q.put_nowait(("account_history",)) - - def _populate_accounts(self, accounts: List[Dict]): - """Build account cards.""" - for w in self._accounts_frame.winfo_children(): - w.destroy() - - for acc in accounts: - # Determine if this is a notable account - is_system = acc.get("sid_note", "").startswith("Built-in") or \ - acc.get("sid_note", "").endswith("(system)") or \ - acc.get("sid_note", "").endswith("(Defender)") - is_enabled = acc.get("enabled", False) - - if is_system and not is_enabled: - border_color = "#333333" - dim = True - elif is_enabled: - border_color = C_GREEN - dim = False - else: - border_color = C_ORANGE - dim = False - - card = ctk.CTkFrame(self._accounts_frame, fg_color=CARD_BG, - corner_radius=8, border_width=2, - border_color=border_color) - card.pack(fill="x", pady=3) - - # Header row - hdr = ctk.CTkFrame(card, fg_color="transparent") - hdr.pack(fill="x", padx=10, pady=(6, 2)) - - name_text = acc.get("name", "?") - full_name = acc.get("full_name", "") - if full_name and full_name != name_text: - name_text += f" ({full_name})" - text_color = "#666666" if dim else C_WHITE - ctk.CTkLabel(hdr, text=name_text, font=("Segoe UI", 12, "bold"), - text_color=text_color).pack(side="left") - - status_text = "ACTIVE" if is_enabled else "DISABLED" - status_color = C_GREEN if is_enabled else "#666666" - ctk.CTkLabel(hdr, text=status_text, font=("Segoe UI", 10, "bold"), - text_color=status_color).pack(side="right") - - # Details grid - details = ctk.CTkFrame(card, fg_color="transparent") - details.pack(fill="x", padx=10, pady=(0, 6)) - - fields = [] - fields.append(("SID", acc.get("sid", ""))) - if acc.get("sid_note"): - fields.append(("SID Note", acc["sid_note"])) - fields.append(("Last Logon", acc.get("last_logon_str", acc.get("last_logon", "")) or "Never")) - fields.append(("Password Set", acc.get("password_last_set_str", acc.get("password_set", "")) or "Never")) - groups = acc.get("groups", []) - if groups: - fields.append(("Groups", ", ".join(groups))) - if acc.get("description"): - fields.append(("Description", acc["description"])) - if acc.get("expires"): - fields.append(("Expires", acc["expires"])) - - for i, (label, value) in enumerate(fields): - row_idx = i // 2 - col_idx = (i % 2) * 2 - lbl_color = "#555555" if dim else C_GRAY - val_color = "#555555" if dim else C_WHITE - ctk.CTkLabel(details, text=f"{label}:", font=("Segoe UI", 9, "bold"), - text_color=lbl_color, width=90, anchor="w" - ).grid(row=row_idx, column=col_idx, padx=(0, 4), pady=1, sticky="w") - ctk.CTkLabel(details, text=str(value)[:120], font=("Segoe UI", 9), - text_color=val_color - ).grid(row=row_idx, column=col_idx + 1, padx=(0, 20), pady=1, sticky="w") - - for c in range(4): - details.columnconfigure(c, weight=1 if c % 2 else 0) - - def _populate_account_history(self, events: List): - """Show account-related events from Security log.""" - for w in self._acct_hist_frame.winfo_children(): - w.destroy() - - if not events: - ctk.CTkLabel(self._acct_hist_frame, - text="No account events found in Security log.\n" - "Run as Administrator and click 'Scan Account History' to search.", - font=("Segoe UI", 10), text_color=C_GRAY).pack(pady=10) - return - - header = ctk.CTkFrame(self._acct_hist_frame, fg_color="transparent") - header.pack(fill="x", pady=(4, 2)) - ctk.CTkLabel(header, text=f"🔍 Account History — {len(events)} events found (all time)", - font=("Segoe UI", 12, "bold")).pack(side="left") - - # Treeview for account history - tree_frame = ctk.CTkFrame(self._acct_hist_frame) - tree_frame.pack(fill="x", pady=4) - - cols = ("time", "severity", "event", "details") - self._acct_hist_tree = ttk.Treeview(tree_frame, columns=cols, - show="headings", height=12) - self._acct_hist_tree.heading("time", text="Time") - self._acct_hist_tree.heading("severity", text="Severity") - self._acct_hist_tree.heading("event", text="Event") - self._acct_hist_tree.heading("details", text="Details") - self._acct_hist_tree.column("time", width=140, stretch=False) - self._acct_hist_tree.column("severity", width=70, stretch=False) - self._acct_hist_tree.column("event", width=200, stretch=False) - self._acct_hist_tree.column("details", width=500, stretch=True) - self._acct_hist_tree.tag_configure("CRITICAL", foreground=C_RED) - self._acct_hist_tree.tag_configure("WARNING", foreground=C_ORANGE) - self._acct_hist_tree.tag_configure("INFO", foreground="#cccccc") - - vsb = ttk.Scrollbar(tree_frame, orient="vertical", - command=self._acct_hist_tree.yview) - self._acct_hist_tree.configure(yscrollcommand=vsb.set) - self._acct_hist_tree.pack(side="left", fill="both", expand=True) - vsb.pack(side="right", fill="y") - - self._acct_hist_events = events - for e in events: - self._acct_hist_tree.insert("", "end", values=( - e.timestamp, e.severity, e.title, e.details[:200] - ), tags=(e.severity,)) - - self._acct_hist_tree.bind("", lambda ev: self._show_event_detail( - self._acct_hist_tree, self._acct_hist_events)) - - # ── Historical Timeline Tab ── - - def _build_historical(self): - f = self.tab_historical - - # Top bar: time range + search + export - top = ctk.CTkFrame(f, fg_color="transparent") - top.pack(fill="x", padx=6, pady=(4, 0)) - - ctk.CTkLabel(top, text="Range:", font=("Segoe UI", 10), - text_color=C_GRAY).pack(side="left") - self._range_buttons: Dict[int, ctk.CTkButton] = {} - for hours, label in [(24, "1d"), (168, "7d"), (720, "30d"), (0, "All")]: - btn = ctk.CTkButton(top, text=label, width=45, height=26, - fg_color="#1f538d" if hours == 24 else "#444444", - hover_color="#2b6194", font=("Segoe UI", 9), - command=lambda h=hours: self._set_range(h)) - btn.pack(side="left", padx=2) - self._range_buttons[hours] = btn - - self._hist_search = ctk.CTkEntry(top, placeholder_text="Search events...", width=220) - self._hist_search.pack(side="left", padx=(12, 4)) - self._hist_search.bind("", lambda e: self._filter_historical()) - - self._hist_count = ctk.CTkLabel(top, text="0 events", font=("Segoe UI", 10), - text_color=C_GRAY) - self._hist_count.pack(side="left", padx=10) - - self._hist_progress = ctk.CTkLabel(top, text="", font=("Segoe UI", 9), - text_color=C_CYAN) - self._hist_progress.pack(side="left", padx=4) - - ctk.CTkButton(top, text="Refresh", width=70, height=26, - command=self._load_historical).pack(side="right", padx=4) - ctk.CTkButton(top, text="Export", width=60, height=26, - fg_color="#444444", hover_color="#555555", - command=self._export_historical).pack(side="right", padx=2) - - # Category filters - self._hist_cat_buttons = self._build_category_filters(f, self._filter_historical) - - # Treeview - tree_frame = ctk.CTkFrame(f) - tree_frame.pack(fill="both", expand=True, padx=6, pady=4) - - cols = ("time", "category", "severity", "event_id", "title", "details") - headers = ("Time", "Category", "Severity", "ID", "Event", "Details") - widths = (140, 80, 70, 45, 200, 450) - - self.hist_tree = ttk.Treeview(tree_frame, columns=cols, show="headings", - selectmode="browse") - for col, hdr, w in zip(cols, headers, widths): - self.hist_tree.heading(col, text=hdr) - self.hist_tree.column(col, width=w, stretch=(col == "details"), - minwidth=35) - - self.hist_tree.tag_configure("CRITICAL", foreground=C_RED) - self.hist_tree.tag_configure("WARNING", foreground=C_ORANGE) - self.hist_tree.tag_configure("INFO", foreground="#cccccc") - - vsb = ttk.Scrollbar(tree_frame, orient="vertical", command=self.hist_tree.yview) - self.hist_tree.configure(yscrollcommand=vsb.set) - self.hist_tree.pack(side="left", fill="both", expand=True) - vsb.pack(side="right", fill="y") - - self.hist_tree.bind("", self._show_event_detail_hist) - - def _set_range(self, hours: int): - self._hist_hours = hours - for h, btn in self._range_buttons.items(): - btn.configure(fg_color="#1f538d" if h == hours else "#444444") - self._load_historical() - - def _load_historical(self): - hours = self._hist_hours if self._hist_hours > 0 else 8760 # "All" = 1 year - self._hist_progress.configure(text="Loading...") - self.work_q.put_nowait(("historical", hours, set(self._active_categories))) - - def _filter_historical(self): - """Apply category, severity, and text filters to historical events.""" - self._search_text = self._hist_search.get().lower() if hasattr(self, '_hist_search') else "" - self._populate_tree(self.hist_tree, self._hist_events, - self._active_categories, self._active_severities, - self._search_text) - - def _populate_tree(self, tree: ttk.Treeview, events: List[ParsedEvent], - categories: Set[str], severities: Set[str], - search: str = ""): - """Populate a treeview with filtered events.""" - tree.delete(*tree.get_children()) - count = 0 - for e in events: - if e.category not in categories: - continue - if e.severity not in severities: - continue - if search: - searchable = f"{e.title} {e.details} {e.user} {e.category}".lower() - if search not in searchable: - continue - - tree.insert("", "end", values=( - e.timestamp, e.category, e.severity, - e.event_id, e.title, e.details[:200] - ), tags=(e.severity,)) - count += 1 - if count >= 5000: - break - - if tree == self.hist_tree: - self._hist_count.configure(text=f"{count} events") - - def _export_historical(self): - path = filedialog.asksaveasfilename( - defaultextension=".json", - filetypes=[("JSON", "*.json")], - initialfile=f"activity_log_{datetime.now().strftime('%Y%m%d_%H%M%S')}.json" - ) - if not path: - return - try: - # Export filtered events - filtered = [e for e in self._hist_events - if e.category in self._active_categories - and e.severity in self._active_severities] - self.engine.export_events(filtered, path) - messagebox.showinfo("Export", f"Exported {len(filtered)} events to:\n{path}") - except Exception as e: - messagebox.showerror("Export Error", str(e)) - - def _show_event_detail_hist(self, event): - self._show_event_detail(self.hist_tree, self._hist_events) - - def _show_event_detail(self, tree: ttk.Treeview, events: List[ParsedEvent]): - sel = tree.selection() - if not sel: - return - vals = tree.item(sel[0], "values") - # Find matching event - ts, cat, sev, eid = vals[0], vals[1], vals[2], vals[3] - match = None - for e in events: - if e.timestamp == ts and str(e.event_id) == str(eid) and e.category == cat: - match = e - break - if not match: - return - - # Detail popup - popup = ctk.CTkToplevel(self) - popup.title("Event Detail") - popup.geometry("700x550") - popup.transient(self.parent) - - scroll = ctk.CTkScrollableFrame(popup) - scroll.pack(fill="both", expand=True, padx=10, pady=10) - - # Header with severity badge - hdr = ctk.CTkFrame(scroll, fg_color=CARD_BG, corner_radius=8) - hdr.pack(fill="x", pady=(0, 8)) - title_row = ctk.CTkFrame(hdr, fg_color="transparent") - title_row.pack(fill="x", padx=10, pady=6) - icon = CATEGORY_ICONS.get(match.category, "") - ctk.CTkLabel(title_row, text=f"{icon} {match.title}", - font=("Segoe UI", 14, "bold")).pack(side="left") - sev_color = SEV_COLORS.get(match.severity, C_WHITE) - ctk.CTkLabel(title_row, text=match.severity, - font=("Segoe UI", 12, "bold"), - text_color=sev_color).pack(side="right") - - # Summary line - if match.details: - summary = ctk.CTkFrame(scroll, fg_color="#1a2a1a" if match.severity == "INFO" - else "#2a1a1a", corner_radius=8) - summary.pack(fill="x", pady=(0, 8)) - ctk.CTkLabel(summary, text=match.details, - font=("Segoe UI", 11), wraplength=600, - justify="left").pack(padx=10, pady=8) - - # Core info - info = ctk.CTkFrame(scroll, fg_color=CARD_BG, corner_radius=8) - info.pack(fill="x", pady=(0, 8)) - ctk.CTkLabel(info, text="Event Info", font=("Segoe UI", 11, "bold"), - text_color=C_CYAN).pack(anchor="w", padx=10, pady=(6, 2)) - core_fields = [ - ("Time", match.timestamp), - ("Event ID", str(match.event_id)), - ("Category", match.category), - ("Log Source", match.log_source), - ("Computer", match.computer), - ] - for label, value in core_fields: - if not value: - continue - row = ctk.CTkFrame(info, fg_color="transparent") - row.pack(fill="x", padx=10, pady=1) - ctk.CTkLabel(row, text=f"{label}:", font=("Segoe UI", 10, "bold"), - text_color=C_GRAY, width=100, anchor="w").pack(side="left") - ctk.CTkLabel(row, text=value, font=("Segoe UI", 10), - wraplength=500, justify="left").pack(side="left") - # Pad bottom - ctk.CTkLabel(info, text="", font=("Segoe UI", 2)).pack() - - # Parsed data fields — the meaningful extracted data - if match.parsed_data: - data_frame = ctk.CTkFrame(scroll, fg_color=CARD_BG, corner_radius=8) - data_frame.pack(fill="x", pady=(0, 8)) - ctk.CTkLabel(data_frame, text="Event Data (Parsed)", - font=("Segoe UI", 11, "bold"), - text_color=C_CYAN).pack(anchor="w", padx=10, pady=(6, 2)) - - # Friendly labels for common field names - friendly_names = { - "SubjectUserName": "Acting User", - "SubjectDomainName": "Acting Domain", - "SubjectUserSid": "Acting User SID", - "TargetUserName": "Target Account", - "TargetDomainName": "Target Domain", - "TargetUserSid": "Target SID", - "LogonType": "Logon Type", - "IpAddress": "Source IP", - "IpPort": "Source Port", - "WorkstationName": "Workstation", - "DeviceDescription": "Device Name", - "DeviceId": "Device ID", - "ClassName": "Device Class", - "ClassId": "Class ID", - "VendorIds": "Vendor", - "CompatibleIds": "Compatible IDs", - "ServiceName": "Service Name", - "ImagePath": "Executable Path", - "ServiceType": "Service Type", - "StartType": "Start Type", - "AccountName": "Account Name", - "RuleName": "Firewall Rule", - "updateTitle": "Update Name", - "MemberName": "Group Member", - "MemberSid": "Member SID", - "OldTargetUserName": "Old Username", - "NewTargetUserName": "New Username", - "FailureReason": "Failure Reason", - "Status": "Status Code", - "SubStatus": "Sub-Status", - "DriverName": "Driver Name", - "DriverProvider": "Driver Provider", - "LocationInformation": "Location", - } - - # Skip noisy/technical fields - skip_fields = {"SubjectLogonId", "Keywords", "PrivilegeList", - "RestrictedAdminMode", "VirtualAccount", - "TransmittedServices", "LmPackageName", - "KeyLength", "ProcessId", "ProcessName", - "ElevatedToken", "TargetLinkedLogonId", - "TargetOutboundUserName", "TargetOutboundDomainName", - "TargetLogonId", "LogonGuid", "LogonProcessName", - "AuthenticationPackageName", "ImpersonationLevel"} - - for key, value in match.parsed_data.items(): - if key in skip_fields: - continue - if not value or value == "-": - continue - # Truncate long values - display_val = value[:300] - friendly = friendly_names.get(key, key) - - row = ctk.CTkFrame(data_frame, fg_color="transparent") - row.pack(fill="x", padx=10, pady=1) - ctk.CTkLabel(row, text=f"{friendly}:", font=("Segoe UI", 10, "bold"), - text_color=C_GRAY, width=130, anchor="w").pack(side="left") - ctk.CTkLabel(row, text=display_val, font=("Segoe UI", 10), - wraplength=450, justify="left").pack(side="left", fill="x") - - ctk.CTkLabel(data_frame, text="", font=("Segoe UI", 2)).pack() - - # Raw XML — collapsed by default - if match.raw_xml: - xml_frame = ctk.CTkFrame(scroll, fg_color=CARD_BG, corner_radius=8) - xml_frame.pack(fill="x", pady=(0, 8)) - - xml_header = ctk.CTkFrame(xml_frame, fg_color="transparent") - xml_header.pack(fill="x", padx=10, pady=(6, 2)) - ctk.CTkLabel(xml_header, text="Raw XML", - font=("Segoe UI", 10, "bold"), - text_color="#555555").pack(side="left") - - xml_container = ctk.CTkFrame(xml_frame, fg_color="transparent") - xml_visible = [False] - - def toggle_xml(): - if xml_visible[0]: - xml_container.pack_forget() - show_btn.configure(text="Show XML") - xml_visible[0] = False - else: - xml_container.pack(fill="x", padx=10, pady=(0, 8)) - xml_visible[0] = True - show_btn.configure(text="Hide XML") - # Lazy-load XML content - if not xml_container.winfo_children(): - xml_text = ctk.CTkTextbox(xml_container, height=150, - font=("Consolas", 8), fg_color="#1a1a1a") - xml_text.pack(fill="x") - try: - from xml.dom.minidom import parseString - pretty = parseString(match.raw_xml).toprettyxml(indent=" ") - lines = pretty.split("\n") - if lines and lines[0].startswith("", lambda e: self._filter_live()) - - # Category filters (reuse builder) - self._live_cat_buttons = self._build_category_filters(f, self._filter_live) - - # Treeview - tree_frame = ctk.CTkFrame(f) - tree_frame.pack(fill="both", expand=True, padx=6, pady=4) - - cols = ("time", "category", "severity", "event_id", "title", "details") - headers = ("Time", "Category", "Severity", "ID", "Event", "Details") - widths = (140, 80, 70, 45, 200, 450) - - self.live_tree = ttk.Treeview(tree_frame, columns=cols, show="headings", - selectmode="browse") - for col, hdr, w in zip(cols, headers, widths): - self.live_tree.heading(col, text=hdr) - self.live_tree.column(col, width=w, stretch=(col == "details"), - minwidth=35) - - self.live_tree.tag_configure("CRITICAL", foreground=C_RED) - self.live_tree.tag_configure("WARNING", foreground=C_ORANGE) - self.live_tree.tag_configure("INFO", foreground="#cccccc") - - vsb = ttk.Scrollbar(tree_frame, orient="vertical", command=self.live_tree.yview) - self.live_tree.configure(yscrollcommand=vsb.set) - self.live_tree.pack(side="left", fill="both", expand=True) - vsb.pack(side="right", fill="y") - - self.live_tree.bind("", self._show_event_detail_live) - - def _toggle_live_pause(self): - self._live_paused = not self._live_paused - if self._live_paused: - self._pause_btn.configure(text="Resume", fg_color="#226622") - self._live_status.configure(text="⏸ PAUSED", text_color=C_YELLOW) - else: - self._pause_btn.configure(text="Pause", fg_color="#664400") - self._live_status.configure(text="● MONITORING", text_color=C_GREEN) - - def _clear_live(self): - self._live_events.clear() - self._live_counter = 0 - self.live_tree.delete(*self.live_tree.get_children()) - self._live_count_lbl.configure(text="0 events captured") - - def _filter_live(self): - search = self._live_search.get().lower() if hasattr(self, '_live_search') else "" - self._populate_tree(self.live_tree, self._live_events, - self._active_categories, self._active_severities, search) - - def _export_live(self): - path = filedialog.asksaveasfilename( - defaultextension=".json", - filetypes=[("JSON", "*.json")], - initialfile=f"live_activity_{datetime.now().strftime('%Y%m%d_%H%M%S')}.json" - ) - if not path: - return - try: - self.engine.export_events(self._live_events, path) - messagebox.showinfo("Export", f"Exported {len(self._live_events)} events to:\n{path}") - except Exception as e: - messagebox.showerror("Export Error", str(e)) - - def _show_event_detail_live(self, event): - self._show_event_detail(self.live_tree, self._live_events) - - def _start_live_poll(self): - if not self.running: - return - if not self._live_paused: - self.work_q.put_nowait(("live", set(self._active_categories))) - self.after(self._poll_interval, self._start_live_poll) - - # ── Settings Tab ── - - def _build_settings(self): - f = self.tab_settings - scroll = ctk.CTkScrollableFrame(f) - scroll.pack(fill="both", expand=True, padx=6, pady=6) - - # Poll interval - poll_card = ctk.CTkFrame(scroll, fg_color=CARD_BG, corner_radius=8) - poll_card.pack(fill="x", pady=4) - ctk.CTkLabel(poll_card, text="Live Monitor Settings", - font=("Segoe UI", 11, "bold")).pack(anchor="w", padx=10, pady=(6, 4)) - - row = ctk.CTkFrame(poll_card, fg_color="transparent") - row.pack(fill="x", padx=10, pady=(0, 8)) - ctk.CTkLabel(row, text="Poll interval:", font=("Segoe UI", 10)).pack(side="left") - self._poll_lbl = ctk.CTkLabel(row, text="5s", font=("Segoe UI", 10, "bold"), - text_color=C_CYAN) - self._poll_lbl.pack(side="right", padx=10) - poll_slider = ctk.CTkSlider(row, from_=3, to=15, number_of_steps=12, - command=lambda v: ( - self._poll_lbl.configure(text=f"{int(v)}s"), - setattr(self, '_poll_interval', int(v) * 1000))) - poll_slider.set(5) - poll_slider.pack(side="right", padx=10, fill="x", expand=True) - - # Admin status - admin_card = ctk.CTkFrame(scroll, fg_color=CARD_BG, corner_radius=8) - admin_card.pack(fill="x", pady=4) - ctk.CTkLabel(admin_card, text="Admin Status", - font=("Segoe UI", 11, "bold")).pack(anchor="w", padx=10, pady=(6, 4)) - status = "✓ Running as Administrator" if self.engine._admin else "⚠ Not Administrator" - color = C_GREEN if self.engine._admin else C_ORANGE - ctk.CTkLabel(admin_card, text=status, font=("Segoe UI", 10), - text_color=color).pack(anchor="w", padx=10, pady=(0, 4)) - if not self.engine._admin: - ctk.CTkLabel(admin_card, - text="Security log events (Account, Logon, Security Policy) require admin.\n" - "Device, System, and Software events work without admin.", - font=("Segoe UI", 9), text_color=C_GRAY, - justify="left").pack(anchor="w", padx=10, pady=(0, 8)) - - # Event reference - ref_card = ctk.CTkFrame(scroll, fg_color=CARD_BG, corner_radius=8) - ref_card.pack(fill="x", pady=4) - ctk.CTkLabel(ref_card, text="Monitored Events", - font=("Segoe UI", 11, "bold")).pack(anchor="w", padx=10, pady=(6, 4)) - - for cat in ALL_CATEGORIES: - icon = CATEGORY_ICONS.get(cat, "") - cat_events = [e for e in EVENT_DEFS.values() if e.category == cat] - events_str = ", ".join(f"{e.event_id} ({e.title})" for e in cat_events) - row = ctk.CTkFrame(ref_card, fg_color="transparent") - row.pack(fill="x", padx=10, pady=1) - ctk.CTkLabel(row, text=f"{icon} {cat}:", font=("Segoe UI", 9, "bold"), - text_color=C_CYAN, width=90, anchor="w").pack(side="left") - ctk.CTkLabel(row, text=events_str, font=("Segoe UI", 8), - text_color=C_GRAY, wraplength=800, - justify="left").pack(side="left", fill="x", expand=True) - - ctk.CTkLabel(ref_card, text="", font=("Segoe UI", 1)).pack(pady=2) - - # ── Worker ── - - def _worker_loop(self): - while self.running: - try: - job = self.work_q.get(timeout=0.3) - except queue.Empty: - continue - try: - if job[0] == "historical": - hours, cats = job[1], job[2] - def progress_cb(msg, pct): - self.result_q.put(("hist_progress", msg)) - events = self.engine.query_events(hours, cats, callback=progress_cb) - self.result_q.put(("historical", events)) - elif job[0] == "live": - cats = job[1] - events = self.engine.query_new_events(cats) - if events: - self.result_q.put(("live", events)) - elif job[0] == "spy_check": - findings = self.engine.run_spy_check() - self.result_q.put(("spy_check", findings)) - elif job[0] == "accounts": - accounts = self.engine.get_user_accounts() - self.result_q.put(("accounts", accounts)) - elif job[0] == "account_history": - events = self.engine.get_account_events_all_time() - self.result_q.put(("account_history", events)) - except Exception: - pass - - def _process_queue(self): - try: - while True: - try: - msg = self.result_q.get_nowait() - except queue.Empty: - break - if msg[0] == "spy_check": - self._populate_spy_results(msg[1]) - elif msg[0] == "accounts": - self._populate_accounts(msg[1]) - elif msg[0] == "account_history": - self._populate_account_history(msg[1]) - elif msg[0] == "historical": - self._hist_events = msg[1] - self._filter_historical() - self._hist_progress.configure(text="") - elif msg[0] == "hist_progress": - self._hist_progress.configure(text=msg[1]) - elif msg[0] == "live": - new_events = msg[1] - self._live_events = new_events + self._live_events - # Cap at 5000 - if len(self._live_events) > 5000: - self._live_events = self._live_events[:5000] - self._live_counter += len(new_events) - self._live_count_lbl.configure( - text=f"{self._live_counter} events captured") - # Insert at top of tree - search = self._live_search.get().lower() if hasattr(self, '_live_search') else "" - for e in reversed(new_events): - if e.category not in self._active_categories: - continue - if e.severity not in self._active_severities: - continue - if search: - searchable = f"{e.title} {e.details} {e.user} {e.category}".lower() - if search not in searchable: - continue - self.live_tree.insert("", 0, values=( - e.timestamp, e.category, e.severity, - e.event_id, e.title, e.details[:200] - ), tags=(e.severity,)) - # Trim tree to 5000 - children = self.live_tree.get_children() - if len(children) > 5000: - for iid in children[5000:]: - self.live_tree.delete(iid) - except Exception: - pass - if self.running: - self.after(100, self._process_queue) - - # ── Cleanup ── - - def _fix_log_sizes(self, log_names: List[str]): - """Increase event log sizes to 200 MB.""" - if not is_admin(): - messagebox.showinfo("Admin Required", - "Changing event log sizes requires administrator privileges.\n\n" - "Right-click the app and select 'Run as administrator', then try again.") - return - - results = [] - target_size = 209715200 # 200 MB - for log in log_names: - rc, _, err = safe_run(["wevtutil", "sl", log, f"/ms:{target_size}"], timeout=10) - if rc == 0: - results.append(f" {log}: increased to 200 MB") - else: - results.append(f" {log}: FAILED — {err[:100]}") - - messagebox.showinfo("Log Sizes Updated", - "Results:\n\n" + "\n".join(results) + - "\n\nEvent logs will now keep ~6-12 months of history.") - - # Re-run spy check to update the display - self._run_spy_check() - - def force_stop(self): - self.running = False - try: - self.parent.destroy() - except Exception: - pass - - -# ───────────────────────────────────────────── -# Entry point -# ───────────────────────────────────────────── - -def run_tool(): - try: - if tk._default_root is None: - root = ctk.CTk() - else: - root = ctk.CTkToplevel() - - app = App(root) - root.protocol("WM_DELETE_WINDOW", app.force_stop) - - root.update_idletasks() - w, h = 1300, 750 - x = (root.winfo_screenwidth() - w) // 2 - y = (root.winfo_screenheight() - h) // 2 - root.geometry(f"{w}x{h}+{x}+{y}") - root.lift() - root.focus_force() - - if tk._default_root == root: - root.mainloop() - except Exception as e: - messagebox.showerror(TOOL_NAME, f"Startup error:\n{e}") - - -if __name__ == "__main__": - ctk.set_appearance_mode("dark") - ctk.set_default_color_theme("blue") - root = ctk.CTk() - app = App(root) - root.protocol("WM_DELETE_WINDOW", app.force_stop) - root.mainloop() diff --git a/portable/build/Account_Activity_Monitor.spec b/portable/build/Account_Activity_Monitor.spec new file mode 100644 index 0000000..497d4ae --- /dev/null +++ b/portable/build/Account_Activity_Monitor.spec @@ -0,0 +1,51 @@ +# -*- mode: python ; coding: utf-8 -*- +import os +from PyInstaller.utils.hooks import collect_all + +datas = [] +binaries = [] +hiddenimports = [ + 'customtkinter', 'psutil', + 'tools.account_activity_monitor', + 'tools._common.threadsafe', +] +tmp_ret = collect_all('customtkinter') +datas += tmp_ret[0]; binaries += tmp_ret[1]; hiddenimports += tmp_ret[2] + + +a = Analysis( + ['..\\Account_Activity_Monitor_Portable.py'], + pathex=[os.path.abspath(os.path.join(SPECPATH, '..', '..'))], + binaries=binaries, + datas=datas, + hiddenimports=hiddenimports, + hookspath=[], + hooksconfig={}, + runtime_hooks=[], + excludes=[], + noarchive=False, + optimize=0, +) +pyz = PYZ(a.pure) + +exe = EXE( + pyz, + a.scripts, + a.binaries, + a.datas, + [], + name='Account_Activity_Monitor', + debug=False, + bootloader_ignore_signals=False, + strip=False, + upx=True, + upx_exclude=[], + runtime_tmpdir=None, + console=False, + disable_windowed_traceback=False, + argv_emulation=False, + target_arch=None, + codesign_identity=None, + entitlements_file=None, + uac_admin=True, +) diff --git a/portable/build/Network_Intrusion_Detector_Pro.spec b/portable/build/Network_Intrusion_Detector_Pro.spec new file mode 100644 index 0000000..e6daacb --- /dev/null +++ b/portable/build/Network_Intrusion_Detector_Pro.spec @@ -0,0 +1,53 @@ +# -*- mode: python ; coding: utf-8 -*- +import os +from PyInstaller.utils.hooks import collect_all + +datas = [] +binaries = [] +hiddenimports = [ + 'customtkinter', 'psutil', 'requests', 'winreg', + 'scapy', 'scapy.all', 'scapy.layers.l2', 'scapy.layers.inet', + 'tools.network_intrusion_detector_pro', + 'tools._common.threadsafe', +] +tmp_ret = collect_all('customtkinter') +datas += tmp_ret[0]; binaries += tmp_ret[1]; hiddenimports += tmp_ret[2] +tmp_ret = collect_all('scapy') +datas += tmp_ret[0]; binaries += tmp_ret[1]; hiddenimports += tmp_ret[2] + + +a = Analysis( + ['..\\Network_Intrusion_Detector_Portable.py'], + pathex=[os.path.abspath(os.path.join(SPECPATH, '..', '..'))], + binaries=binaries, + datas=datas, + hiddenimports=hiddenimports, + hookspath=[], + hooksconfig={}, + runtime_hooks=[], + excludes=[], + noarchive=False, + optimize=0, +) +pyz = PYZ(a.pure) + +exe = EXE( + pyz, + a.scripts, + a.binaries, + a.datas, + [], + name='Network_Intrusion_Detector_Pro', + debug=False, + bootloader_ignore_signals=False, + strip=False, + upx=True, + upx_exclude=[], + runtime_tmpdir=None, + console=False, + disable_windowed_traceback=False, + argv_emulation=False, + target_arch=None, + codesign_identity=None, + entitlements_file=None, +) diff --git a/portable/build/Network_Stability_Monitor_Pro.spec b/portable/build/Network_Stability_Monitor_Pro.spec new file mode 100644 index 0000000..2ec1b42 --- /dev/null +++ b/portable/build/Network_Stability_Monitor_Pro.spec @@ -0,0 +1,50 @@ +# -*- mode: python ; coding: utf-8 -*- +import os +from PyInstaller.utils.hooks import collect_all + +datas = [] +binaries = [] +hiddenimports = [ + 'customtkinter', 'psutil', 'requests', + 'tools.network_stability_monitor', + 'tools._common.threadsafe', +] +tmp_ret = collect_all('customtkinter') +datas += tmp_ret[0]; binaries += tmp_ret[1]; hiddenimports += tmp_ret[2] + + +a = Analysis( + ['..\\Network_Stability_Monitor_Portable.py'], + pathex=[os.path.abspath(os.path.join(SPECPATH, '..', '..'))], + binaries=binaries, + datas=datas, + hiddenimports=hiddenimports, + hookspath=[], + hooksconfig={}, + runtime_hooks=[], + excludes=[], + noarchive=False, + optimize=0, +) +pyz = PYZ(a.pure) + +exe = EXE( + pyz, + a.scripts, + a.binaries, + a.datas, + [], + name='Network_Stability_Monitor_Pro', + debug=False, + bootloader_ignore_signals=False, + strip=False, + upx=True, + upx_exclude=[], + runtime_tmpdir=None, + console=False, + disable_windowed_traceback=False, + argv_emulation=False, + target_arch=None, + codesign_identity=None, + entitlements_file=None, +) diff --git a/portable/build/Security_Audit.spec b/portable/build/Security_Audit.spec new file mode 100644 index 0000000..b569df5 --- /dev/null +++ b/portable/build/Security_Audit.spec @@ -0,0 +1,50 @@ +# -*- mode: python ; coding: utf-8 -*- +import os +from PyInstaller.utils.hooks import collect_all + +datas = [] +binaries = [] +hiddenimports = [ + 'customtkinter', 'psutil', + 'tools.security_audit', +] +tmp_ret = collect_all('customtkinter') +datas += tmp_ret[0]; binaries += tmp_ret[1]; hiddenimports += tmp_ret[2] + + +a = Analysis( + ['..\\Security_Audit_Portable.py'], + pathex=[os.path.abspath(os.path.join(SPECPATH, '..', '..'))], + binaries=binaries, + datas=datas, + hiddenimports=hiddenimports, + hookspath=[], + hooksconfig={}, + runtime_hooks=[], + excludes=[], + noarchive=False, + optimize=0, +) +pyz = PYZ(a.pure) + +exe = EXE( + pyz, + a.scripts, + a.binaries, + a.datas, + [], + name='Security_Audit', + debug=False, + bootloader_ignore_signals=False, + strip=False, + upx=True, + upx_exclude=[], + runtime_tmpdir=None, + console=False, + disable_windowed_traceback=False, + argv_emulation=False, + target_arch=None, + codesign_identity=None, + entitlements_file=None, + uac_admin=True, +) diff --git a/portable/network_intrusion_detector_pro.py b/portable/network_intrusion_detector_pro.py deleted file mode 100644 index 2f2d77b..0000000 --- a/portable/network_intrusion_detector_pro.py +++ /dev/null @@ -1,3063 +0,0 @@ -""" -network_intrusion_detector_pro.py - -NETWORK INTRUSION DETECTOR PRO (Windows 10, Home WiFi) -- Active scanning: finds devices on your LAN (ARP scan + ping sweep fallback) -- Passive monitoring: watches for ARP spoofing/MITM indicators and suspicious scan patterns -- Outbound visibility: shows active network connections and processes -- Trust list (allowlist) + baseline persistence -- Advanced threat detection: Flipper Zero/BadUSB, Tor activity, audio spying, network recon -- 5-level connection classification: SAFE / KNOWN / UNKNOWN / SUSPICIOUS / DANGEROUS -- Threats tab with Kill/Block/Investigate actions -- Double-click connection → open file location in Explorer -- UX/perf improvements: - - Filters (category + severity) - - Alert de-duplication + rate limiting - - Clear alerts button - - Multicast/broadcast MAC filtered - -IMPORTANT WINDOWS NOTES -- For full passive packet detection (ARP spoofing + scan detection), you need Npcap: - Install Npcap (WinPcap-compatible mode recommended). -- Run this tool "as Administrator" for best results. - -Dependencies: - pip install psutil -Optional: - pip install scapy - pip install watchdog - pip install pyshark - pip install python-nmap - pip install requests -""" - -import os -import re -import sys -import json -import time -import queue -import socket -import random -import threading -import subprocess -import hashlib -import fnmatch -import requests -from datetime import datetime -from typing import Dict, List, Optional, Tuple, Set - -import tkinter as tk -from tkinter import ttk, messagebox, filedialog -import customtkinter as ctk - -import psutil - -try: - from watchdog.observers import Observer - from watchdog.events import FileSystemEventHandler - HAS_WATCHDOG = True -except ImportError: - HAS_WATCHDOG = False - -TOOL_NAME = "Network Intrusion Detector Pro" - -# ───────────────────────────────────────────────────────────────────────────── -# Scapy lazy loader (SAFE) -# ───────────────────────────────────────────────────────────────────────────── - -HAS_SCAPY = False -ARP = None -Ether = None -srp = None -sniff = None - - -def try_load_scapy(): - global HAS_SCAPY, ARP, Ether, srp, sniff - if HAS_SCAPY: - return True - try: - import importlib - scapy_all = importlib.import_module("scapy.all") - ARP = scapy_all.ARP - Ether = scapy_all.Ether - srp = scapy_all.srp - sniff = scapy_all.sniff - HAS_SCAPY = True - return True - except Exception: - HAS_SCAPY = False - return False - - -# ───────────────────────────────────────────────────────────────────────────── -# Utilities -# ───────────────────────────────────────────────────────────────────────────── - -def now_ts() -> str: - return datetime.now().strftime("%Y-%m-%d %H:%M:%S") - - -_CREATE_NO_WINDOW = 0x08000000 - - -def safe_run(cmd: List[str], timeout: int = 10) -> Tuple[int, str, str]: - try: - cp = subprocess.run( - cmd, - capture_output=True, - text=True, - errors="replace", - timeout=timeout, - shell=False, - creationflags=_CREATE_NO_WINDOW, - ) - return cp.returncode, cp.stdout, cp.stderr - except Exception as e: - return 1, "", str(e) - - -# ───────────────────────────────────────────────────────────────────────────── -# IP Geolocation & 5-level Connection Classification -# ───────────────────────────────────────────────────────────────────────────── - -_ip_geo_cache: Dict[str, Dict] = {} - -# Countries that warrant SUSPICIOUS classification (user may legitimately use -# VPN exit nodes here — we flag but do NOT auto-escalate to DANGEROUS) -SUSPICIOUS_COUNTRIES: Set[str] = { - "Russia", "China", "North Korea", "Iran", - "Belarus", "Syria", "Cuba", "Venezuela", -} - -# Ports that warrant SUSPICIOUS classification -SUSPICIOUS_PORTS: Set[int] = { - 9050, 9150, # Tor SOCKS - 9001, 9030, 9040, # Tor relay / directory - 6667, 6668, 6669, # IRC (often botnet C2) - 1080, # SOCKS proxy - 4444, 5555, 31337, # Classic backdoor ports -} - -# Ports that warrant DANGEROUS classification -DANGEROUS_PORTS: Set[int] = { - # Tor (outbound confirms Tor circuit, stronger signal than just listening) - 9050, 9150, - # Known bad / exploit frameworks - 4444, 31337, -} - -KNOWN_SAFE_ORGS: Set[str] = { - "Microsoft", "Google", "Amazon", "Cloudflare", "Akamai", - "Fastly", "Facebook", "Apple", "GitHub", "GitLab", - "DigitalOcean", "Linode", "OVH", "Hetzner", "Twitch", - "Netflix", "Spotify", "Adobe", "Dropbox", "Zoom", -} - -TOR_PROCESSES: Set[str] = { - "tor.exe", "tor", "privoxy", "obfs4proxy", "meek-client", -} - -# ───────────────────────────────────────────────────────────────────────────── -# Port → human-readable service name + description -# ───────────────────────────────────────────────────────────────────────────── -PORT_SERVICES: Dict[int, Tuple[str, str]] = { - # (short_name, explanation) - 20: ("FTP-Data", "File Transfer Protocol – data channel"), - 21: ("FTP", "File Transfer Protocol – control channel"), - 22: ("SSH", "Secure Shell – remote terminal / SFTP"), - 23: ("Telnet", "Unencrypted remote terminal (legacy, insecure)"), - 25: ("SMTP", "Email sending (mail server)"), - 53: ("DNS", "Domain Name resolution"), - 67: ("DHCP", "IP address assignment (server)"), - 68: ("DHCP", "IP address assignment (client)"), - 80: ("HTTP", "Unencrypted web traffic"), - 110: ("POP3", "Email retrieval (legacy)"), - 119: ("NNTP", "Usenet / news groups"), - 123: ("NTP", "Network Time Protocol – clock sync"), - 135: ("RPC", "Windows Remote Procedure Call"), - 137: ("NetBIOS-NS", "Windows NetBIOS name service"), - 138: ("NetBIOS-DG", "Windows NetBIOS datagram service"), - 139: ("NetBIOS-SS", "Windows file/printer sharing (legacy)"), - 143: ("IMAP", "Email retrieval (modern)"), - 161: ("SNMP", "Network device monitoring"), - 389: ("LDAP", "Directory service / Active Directory"), - 443: ("HTTPS", "Encrypted web traffic (TLS)"), - 445: ("SMB", "Windows file sharing / named pipes — normal Windows IPC"), - 465: ("SMTPS", "Encrypted email sending"), - 500: ("IKE", "IPsec VPN key exchange"), - 514: ("Syslog", "System log forwarding"), - 587: ("SMTP-Sub", "Email submission (authenticated)"), - 631: ("IPP", "Internet Printing Protocol"), - 636: ("LDAPS", "Encrypted LDAP / Active Directory"), - 993: ("IMAPS", "Encrypted email retrieval"), - 995: ("POP3S", "Encrypted email retrieval (legacy)"), - 1080: ("SOCKS", "⚠ SOCKS proxy — can tunnel any traffic"), - 1194: ("OpenVPN", "OpenVPN encrypted tunnel"), - 1433: ("MSSQL", "Microsoft SQL Server"), - 1434: ("MSSQL-Mon", "Microsoft SQL Server monitor"), - 1701: ("L2TP", "Layer 2 VPN tunnelling"), - 1723: ("PPTP", "PPTP VPN (outdated, insecure)"), - 1900: ("SSDP/UPnP", "Device discovery on LAN"), - 2049: ("NFS", "Network File System"), - 3074: ("Xbox Live", "Xbox Live gaming service"), - 3306: ("MySQL", "MySQL database"), - 3389: ("RDP", "Windows Remote Desktop — ensure this is expected"), - 3478: ("STUN/TURN", "WebRTC relay / VoIP NAT traversal"), - 4444: ("Metasploit", "⛔ Classic Metasploit reverse shell port"), - 5000: ("UPnP/Dev", "Various dev servers / UPnP"), - 5353: ("mDNS", "Multicast DNS — local device discovery"), - 5355: ("LLMNR", "Link-Local Multicast Name Resolution"), - 5555: ("ADB/Alt", "⚠ Android Debug Bridge or backdoor"), - 5900: ("VNC", "Virtual Network Computing — remote desktop"), - 6667: ("IRC", "⚠ Internet Relay Chat — common botnet C2 channel"), - 6668: ("IRC", "⚠ Internet Relay Chat — common botnet C2 channel"), - 6669: ("IRC", "⚠ Internet Relay Chat — common botnet C2 channel"), - 7680: ("WUDO", "Windows Update Delivery Optimisation (peer)"), - 8080: ("HTTP-Alt", "Alternative HTTP / proxy"), - 8443: ("HTTPS-Alt", "Alternative HTTPS"), - 9050: ("Tor-SOCKS", "⛔ Tor SOCKS proxy — anonymous routing"), - 9150: ("Tor-Browser","⛔ Tor Browser SOCKS proxy"), - 9001: ("Tor-Relay", "⛔ Tor relay port"), - 9030: ("Tor-Dir", "⛔ Tor directory server"), - 27015: ("Steam", "Steam game server / matchmaking"), - 27036: ("Steam-IPC", "Steam local IPC"), - 31337: ("Back Orifice","⛔ Classic backdoor / hacking tool port"), - 49152: ("Ephemeral", "Dynamic / ephemeral Windows port"), -} - -def port_service(port: int) -> Tuple[str, str]: - """Return (short_name, description) for a port number.""" - if port in PORT_SERVICES: - return PORT_SERVICES[port] - if 49152 <= port <= 65535: - return ("Ephemeral", "Dynamic high port — assigned temporarily by Windows") - if 1024 <= port <= 49151: - return ("Registered", f"Registered port {port} — check if expected for this app") - return ("System", f"Well-known system port {port}") - -SUSPICIOUS_AUDIO_PROCESSES: Set[str] = { - "pjsua.exe", "sox.exe", "ffmpeg.exe", "sndrec32.exe", "soundrecorder.exe", -} - - -def get_ip_geolocation(ip: str) -> Dict[str, str]: - global _ip_geo_cache - if is_private_ip(ip): - return {"country": "Local", "region": "LAN", "city": "Private", "org": "", "trust": "safe"} - if ip in _ip_geo_cache: - return _ip_geo_cache[ip] - if len(_ip_geo_cache) > 2000: - _ip_geo_cache.clear() - try: - resp = requests.get( - f"http://ip-api.com/json/{ip}?fields=status,country,regionName,city,isp,org,as,query", - timeout=3, - ) - if resp.status_code == 200: - data = resp.json() - if data.get("status") == "success": - result = { - "country": data.get("country", "Unknown"), - "region": data.get("regionName", "Unknown"), - "city": data.get("city", "Unknown"), - "org": data.get("org", data.get("isp", "Unknown")), - "trust": "unknown", - } - _ip_geo_cache[ip] = result - return result - except Exception: - pass - result = {"country": "Unknown", "region": "", "city": "", "org": "", "trust": "unknown"} - _ip_geo_cache[ip] = result - return result - - -def is_private_ip(ip: str) -> bool: - try: - ip = ip.strip() - - # ── IPv6 loopback & link-local ────────────────────────────────────── - if ip in ("::1", "0:0:0:0:0:0:0:1"): # loopback - return True - il = ip.lower() - if il.startswith("fe80"): # link-local (fe80::/10) - return True - if il.startswith("fc") or il.startswith("fd"): # unique local (fc00::/7) - return True - if il in ("::", "0:0:0:0:0:0:0:0"): # unspecified - return True - # IPv4-mapped IPv6 e.g. ::ffff:192.168.1.1 - if il.startswith("::ffff:"): - ip = il[7:] # strip prefix, fall through to IPv4 check - - # ── IPv4 ──────────────────────────────────────────────────────────── - parts = ip.split(".") - if len(parts) != 4: - return False - first, second = int(parts[0]), int(parts[1]) - if first == 10: - return True - if first == 172 and 16 <= second <= 31: - return True - if first == 192 and second == 168: - return True - if first == 127: # loopback - return True - if first == 169 and second == 254: # APIPA / link-local - return True - return False - except Exception: - return False - - -def extract_remote_ip(raddr: str) -> str: - """Extract IP from 'ip:port' string — handles both IPv4 and IPv6.""" - if not raddr: - return raddr - # IPv6 addresses are stored as plain '::1', not '[::1]:port' by psutil, - # but the display string is built as f"{ip}:{port}" which could be ambiguous. - # rsplit on the last ':' works fine for IPv4 (192.168.1.1:80 → 192.168.1.1) - # and for psutil IPv6 strings (::1:445 → ::1 is wrong; use raddr.ip directly instead). - # This helper is only used for display parsing, not for psutil objects. - if ":" in raddr: - return raddr.rsplit(":", 1)[0] - return raddr - - -def classify_connection(ip: str, port: int, org: str, country: str, trusted_ips: Dict) -> str: - """ - 5-level classification. - Returns: 'safe' | 'known' | 'unknown' | 'suspicious' | 'dangerous' - """ - if is_private_ip(ip): - return "safe" - - # Explicit user trust → SAFE - if ip in trusted_ips: - return "safe" - - # DANGEROUS: confirmed bad ports (outbound) - if port in DANGEROUS_PORTS: - return "dangerous" - - # SUSPICIOUS: Tor relay ports, IRC, SOCKS, backdoor ports - if port in SUSPICIOUS_PORTS: - return "suspicious" - - # SUSPICIOUS: geopolitically sensitive country - if country in SUSPICIOUS_COUNTRIES: - return "suspicious" - - # KNOWN: well-known organisation - if org: - org_lower = org.lower() - for safe_org in KNOWN_SAFE_ORGS: - if safe_org.lower() in org_lower: - return "known" - - return "unknown" - - -# ───────────────────────────────────────────────────────────────────────────── -# IP Reputation Checker (VirusTotal + AbuseIPDB) -# ───────────────────────────────────────────────────────────────────────────── - -_API_KEYS_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), "nid_api_keys.json") - -# Reputation indicators used in the connections table REP column -REP_CLEAN = "✓" -REP_SUSPICIOUS = "⚠" -REP_DANGEROUS = "✗" -REP_UNCHECKED = "—" -REP_PENDING = "…" - -REP_COLORS = { - REP_CLEAN: "#90EE90", - REP_SUSPICIOUS: "#FFA500", - REP_DANGEROUS: "#FF4444", - REP_UNCHECKED: "#888888", - REP_PENDING: "#888888", -} - - -class IPReputationChecker: - """Wraps VirusTotal v3 + AbuseIPDB v2 behind a single check_ip() call.""" - - # Rate-limit tracking - VT_MAX_PER_MIN = 4 - ABUSE_MAX_PER_DAY = 1000 - - def __init__(self): - self._cache: Dict[str, Dict] = {} - self._vt_key: str = "" - self._abuse_key: str = "" - self._vt_timestamps: List[float] = [] - self._abuse_day_count: int = 0 - self._abuse_day_start: float = time.time() - self._lock = threading.Lock() - self._load_keys() - - # ── Key management ────────────────────────────────────────────────────── - - def _load_keys(self): - try: - with open(_API_KEYS_PATH, "r") as f: - data = json.load(f) - self._vt_key = data.get("virustotal_key", "").strip() - self._abuse_key = data.get("abuseipdb_key", "").strip() - except Exception: - pass - - def save_keys(self, vt_key: str, abuse_key: str): - self._vt_key = vt_key.strip() - self._abuse_key = abuse_key.strip() - try: - with open(_API_KEYS_PATH, "w") as f: - json.dump({"virustotal_key": self._vt_key, "abuseipdb_key": self._abuse_key}, f, indent=2) - except Exception: - pass - - @property - def has_keys(self) -> bool: - return bool(self._vt_key or self._abuse_key) - - # ── Rate limiting ─────────────────────────────────────────────────────── - - def _vt_rate_ok(self) -> bool: - now = time.time() - self._vt_timestamps = [t for t in self._vt_timestamps if now - t < 60] - return len(self._vt_timestamps) < self.VT_MAX_PER_MIN - - def _abuse_rate_ok(self) -> bool: - now = time.time() - if now - self._abuse_day_start > 86400: - self._abuse_day_count = 0 - self._abuse_day_start = now - return self._abuse_day_count < self.ABUSE_MAX_PER_DAY - - # ── API calls ─────────────────────────────────────────────────────────── - - def _query_virustotal(self, ip: str) -> Dict: - if not self._vt_key or not self._vt_rate_ok(): - return {} - try: - resp = requests.get( - f"https://www.virustotal.com/api/v3/ip_addresses/{ip}", - headers={"x-apikey": self._vt_key}, - timeout=8, - ) - self._vt_timestamps.append(time.time()) - if resp.status_code == 200: - attrs = resp.json().get("data", {}).get("attributes", {}) - stats = attrs.get("last_analysis_stats", {}) - return { - "vt_malicious": int(stats.get("malicious", 0)), - "vt_suspicious": int(stats.get("suspicious", 0)), - "vt_harmless": int(stats.get("harmless", 0)), - "vt_undetected": int(stats.get("undetected", 0)), - "vt_reputation": int(attrs.get("reputation", 0)), - } - except Exception: - pass - return {} - - def _query_abuseipdb(self, ip: str) -> Dict: - if not self._abuse_key or not self._abuse_rate_ok(): - return {} - try: - resp = requests.get( - "https://api.abuseipdb.com/api/v2/check", - headers={"Key": self._abuse_key, "Accept": "application/json"}, - params={"ipAddress": ip, "maxAgeInDays": "90"}, - timeout=8, - ) - self._abuse_day_count += 1 - if resp.status_code == 200: - d = resp.json().get("data", {}) - return { - "abuse_score": int(d.get("abuseConfidenceScore", 0)), - "abuse_reports": int(d.get("totalReports", 0)), - "abuse_is_tor": bool(d.get("isTor", False)), - "abuse_usage": str(d.get("usageType", "")), - "abuse_domain": str(d.get("domain", "")), - "abuse_country": str(d.get("countryCode", "")), - } - except Exception: - pass - return {} - - # ── Main check ────────────────────────────────────────────────────────── - - def check_ip(self, ip: str, force: bool = False) -> Dict: - """ - Check an IP against VT + AbuseIPDB. Returns a combined dict. - Results are cached for the session (unless force=True). - """ - if is_private_ip(ip): - return {"rep": REP_CLEAN, "source": "private"} - - with self._lock: - if not force and ip in self._cache: - return self._cache[ip] - - vt = self._query_virustotal(ip) - ab = self._query_abuseipdb(ip) - - result = {**vt, **ab} - result["source"] = "+".join(filter(None, ["vt" if vt else "", "abuse" if ab else ""])) - result["checked_at"] = now_ts() - - # Derive reputation indicator - vt_mal = result.get("vt_malicious", 0) - abuse = result.get("abuse_score", 0) - - if vt_mal >= 3 or abuse >= 50: - result["rep"] = REP_DANGEROUS - elif vt_mal >= 1 or abuse >= 15: - result["rep"] = REP_SUSPICIOUS - else: - result["rep"] = REP_CLEAN - - with self._lock: - self._cache[ip] = result - return result - - def get_cached(self, ip: str) -> Optional[Dict]: - """Return cached result or None.""" - with self._lock: - return self._cache.get(ip) - - def reputation_upgrades_trust(self, ip: str, current_trust: str) -> str: - """ - Given an IP's reputation data, return the same or higher severity trust. - Never downgrades (e.g. safe → safe, unknown → dangerous). - """ - cached = self.get_cached(ip) - if not cached: - return current_trust - - trust_order = {"safe": 0, "known": 1, "unknown": 2, "suspicious": 3, "dangerous": 4} - current_level = trust_order.get(current_trust, 2) - - rep = cached.get("rep", REP_UNCHECKED) - if rep == REP_DANGEROUS: - new_level = max(current_level, 4) - elif rep == REP_SUSPICIOUS: - new_level = max(current_level, 3) - else: - # Clean reputation: optionally upgrade unknown → known - vt_harmless = cached.get("vt_harmless", 0) - if current_trust == "unknown" and vt_harmless >= 30: - new_level = max(current_level, 1) # → known - else: - new_level = current_level - - reverse = {v: k for k, v in trust_order.items()} - return reverse.get(new_level, current_trust) - - -# Global instance (created once, used by App and NetworkMonitor) -_reputation_checker: Optional[IPReputationChecker] = None - - -def get_reputation_checker() -> IPReputationChecker: - global _reputation_checker - if _reputation_checker is None: - _reputation_checker = IPReputationChecker() - return _reputation_checker - - -# ───────────────────────────────────────────────────────────────────────────── -# Firewall helper -# ───────────────────────────────────────────────────────────────────────────── - -def block_ip_firewall(ip: str, rule_name: str = None) -> bool: - """Block an IP using Windows Firewall via netsh""" - rule = rule_name or f"NID_Block_{ip}" - rc, _out, _err = safe_run( - [ - "netsh", "advfirewall", "firewall", "add", "rule", - f"name={rule}", "dir=out", "action=block", - f"remoteip={ip}", "enable=yes", - ], - timeout=15, - ) - return rc == 0 - - -# ───────────────────────────────────────────────────────────────────────────── -# Connection trust manager -# ───────────────────────────────────────────────────────────────────────────── - -class ConnectionTrustManager: - def __init__(self, state_path: str): - self.state_path = state_path.replace("nid_state.json", "connection_trust.json") - self.trusted_ips: Dict[str, Dict] = {} - self.trusted_domains: Dict[str, Dict] = {} - self._load() - - def _load(self): - try: - if os.path.exists(self.state_path): - with open(self.state_path, "r", encoding="utf-8") as f: - data = json.load(f) - self.trusted_ips = data.get("trusted_ips", {}) - self.trusted_domains = data.get("trusted_domains", {}) - except Exception: - pass - - def save(self): - try: - with open(self.state_path, "w", encoding="utf-8") as f: - json.dump( - {"trusted_ips": self.trusted_ips, "trusted_domains": self.trusted_domains}, - f, indent=2, ensure_ascii=False, - ) - except Exception: - pass - - def trust_ip(self, ip: str, label: str = "", notes: str = ""): - self.trusted_ips[ip] = {"label": label or "Trusted", "first_seen": now_ts(), "notes": notes} - self.save() - - def untrust_ip(self, ip: str): - if ip in self.trusted_ips: - del self.trusted_ips[ip] - self.save() - - def trust_domain(self, domain: str, label: str = ""): - self.trusted_domains[domain.lower()] = {"label": label or "Trusted", "first_seen": now_ts()} - self.save() - - -# ───────────────────────────────────────────────────────────────────────────── -# System helpers -# ───────────────────────────────────────────────────────────────────────────── - -def is_admin_windows() -> bool: - if not sys.platform.startswith("win"): - return False - try: - import ctypes - return bool(ctypes.windll.shell32.IsUserAnAdmin()) - except Exception: - return False - - -def parse_ipconfig_subnet() -> Optional[str]: - rc, out, _ = safe_run(["ipconfig"]) - if rc != 0: - return None - ip = mask = None - for line in out.splitlines(): - s = line.strip() - if s.lower().startswith("ipv4 address") or "IPv4 Address" in s: - m = re.search(r"(\d+\.\d+\.\d+\.\d+)", s) - if m: - ip = m.group(1) - if s.lower().startswith("subnet mask") or "Subnet Mask" in s: - m = re.search(r"(\d+\.\d+\.\d+\.\d+)", s) - if m: - mask = m.group(1) - if ip and mask: - break - if not ip or not mask: - return None - - def ip_to_int(x: str) -> int: - p = [int(v) for v in x.split(".")] - return (p[0] << 24) | (p[1] << 16) | (p[2] << 8) | p[3] - - def int_to_ip(v: int) -> str: - return ".".join(str((v >> s) & 0xFF) for s in (24, 16, 8, 0)) - - net_i = ip_to_int(ip) & ip_to_int(mask) - prefix = bin(ip_to_int(mask)).count("1") - return f"{int_to_ip(net_i)}/{prefix}" - - -def get_default_gateway() -> Optional[str]: - rc, out, _ = safe_run(["route", "print"]) - if rc != 0: - return None - for line in out.splitlines(): - line = line.strip() - if not line: - continue - if line.startswith("0.0.0.0"): - parts = re.split(r"\s+", line) - if len(parts) >= 3 and parts[0] == "0.0.0.0" and parts[1] == "0.0.0.0": - gw = parts[2] - if re.match(r"^\d+\.\d+\.\d+\.\d+$", gw): - return gw - return None - - -def arp_table() -> Dict[str, str]: - rc, out, _ = safe_run(["arp", "-a"]) - if rc != 0: - return {} - res: Dict[str, str] = {} - for line in out.splitlines(): - m = re.search(r"(\d+\.\d+\.\d+\.\d+)\s+([0-9a-fA-F\-]{17})", line) - if m: - res[m.group(1)] = m.group(2).lower().replace("-", ":") - return res - - -def ping_sweep(cidr: str, limit: int = 254, timeout_ms: int = 250) -> None: - if not cidr.endswith("/24"): - return - prefix = ".".join(cidr.split("/")[0].split(".")[:3]) - ips = [f"{prefix}.{i}" for i in range(1, 255)] - random.shuffle(ips) - for ip in ips[:min(limit, len(ips))]: - safe_run(["ping", "-n", "1", "-w", str(timeout_ms), ip], timeout=2) - - -def scapy_arp_scan(cidr: str, timeout_s: int = 2) -> Dict[str, str]: - if not try_load_scapy(): - return {} - try: - pkt = Ether(dst="ff:ff:ff:ff:ff:ff") / ARP(pdst=cidr) - ans, _ = srp(pkt, timeout=timeout_s, verbose=False) - return {str(rcv.psrc): str(rcv.hwsrc).lower() for _, rcv in ans if rcv.psrc and rcv.hwsrc} - except Exception: - return {} - - -def local_ipv4() -> Optional[str]: - try: - with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as s: - s.connect(("8.8.8.8", 80)) - return s.getsockname()[0] - except Exception: - return None - - -# ───────────────────────────────────────────────────────────────────────────── -# MAC / device intelligence -# ───────────────────────────────────────────────────────────────────────────── - -MAC_MANUFACTURERS = { - "00:1a:2b": "Apple Inc.", "28:cf:e9": "Apple Inc.", "40:a6:d9": "Apple Inc.", - "00:16:32": "Samsung Electronics", "b4:99:ba": "Samsung Electronics", - "fc:a1:3e": "Amazon Technologies Inc.", "74:c2:46": "Amazon Technologies Inc.", - "18:b4:30": "Google Inc.", "44:65:0d": "Google Inc.", - "00:0d:3a": "Raspberry Pi Foundation", "b8:27:eb": "Raspberry Pi Foundation", - "dc:a6:32": "Raspberry Pi Trading Ltd.", - "00:1b:44": "Roku Inc.", "c8:d7:4d": "Roku Inc.", - "e8:9f:6d": "Xiaomi Communications", "34:ce:00": "Xiaomi Communications", - "28:6e:d4": "Huawei Technologies", "00:e0:4c": "Huawei Technologies", - "ac:5a:be": "LG Electronics", "00:07:f1": "LG Electronics", - "00:12:fb": "Sony Corporation", "00:02:b3": "Sony Corporation", - "00:1e:58": "Dell Inc.", "00:14:22": "Dell Inc.", - "00:1f:29": "HP Inc.", "00:15:5d": "Microsoft Corporation", - "00:17:c4": "Netgear Inc.", "00:04:ea": "Netgear Inc.", - "00:90:f5": "TP-Link Technologies", "c8:3a:35": "TP-Link Technologies", - "f0:b4:29": "ASUSTek Computer Inc.", "00:17:9a": "ASUSTek Computer Inc.", - "00:0c:f6": "Cisco Systems", "00:1b:1f": "Cisco Systems", - "00:1d:09": "Intel Corporation", "00:aa:00": "Intel Corporation", -} - -DEVICE_PATTERNS = { - "smartphone": ["apple", "samsung", "xiaomi", "huawei", "oneplus"], - "tablet": ["apple", "samsung", "microsoft"], - "smart_tv": ["samsung", "lg", "sony", "tcl", "vizio"], - "smart_speaker": ["amazon", "google"], - "iot_device": ["raspberry", "esp", "arduino"], - "router": ["cisco", "netgear", "tp-link", "asus", "linksys"], - "computer": ["dell", "hp", "intel", "microsoft"], -} - - -def get_mac_manufacturer(mac: str) -> str: - if not mac: - return "Unknown" - mac = mac.lower().replace("-", ":") - prefix = mac[:8] - if prefix in MAC_MANUFACTURERS: - return MAC_MANUFACTURERS[prefix] - try: - resp = requests.get(f"https://api.macvendors.com/{prefix}", timeout=3) - if resp.status_code == 200: - mfr = resp.text.strip() - MAC_MANUFACTURERS[prefix] = mfr - return mfr - except Exception: - pass - return "Unknown" - - -def detect_device_type(manufacturer: str, mac: str) -> str: - mfr_lower = manufacturer.lower() - for dtype, keywords in DEVICE_PATTERNS.items(): - if any(k in mfr_lower for k in keywords): - return dtype.replace("_", " ").title() - return "Unknown Device" - - -def analyze_device_behavior(mac: str, ip: str, known_devices: Dict) -> Dict: - analysis = {"risk_score": 0, "behaviors": [], "recommendations": []} - if mac not in known_devices: - analysis["risk_score"] += 20 - analysis["behaviors"].append("New device on network") - analysis["recommendations"].append("Verify if this is your device") - device_info = known_devices.get(mac, {}) - if "ip_history" in device_info and len(set(device_info["ip_history"])) > 1: - analysis["risk_score"] += 40 - analysis["behaviors"].append("Multiple IP addresses detected") - analysis["recommendations"].append("Possible ARP spoofing - investigate") - manufacturer = get_mac_manufacturer(mac) - if manufacturer == "Unknown": - analysis["risk_score"] += 15 - analysis["behaviors"].append("Unknown manufacturer") - analysis["recommendations"].append("Investigate unknown device") - score = analysis["risk_score"] - analysis["risk_level"] = "HIGH" if score >= 50 else ("MEDIUM" if score >= 25 else "LOW") - return analysis - - -def get_device_intelligence(mac: str, ip: str, known_devices: Dict) -> Dict: - manufacturer = get_mac_manufacturer(mac) - device_type = detect_device_type(manufacturer, mac) - behavior = analyze_device_behavior(mac, ip, known_devices) - return { - "mac": mac, "ip": ip, "manufacturer": manufacturer, "device_type": device_type, - "risk_score": behavior["risk_score"], "risk_level": behavior["risk_level"], - "behaviors": behavior["behaviors"], "recommendations": behavior["recommendations"], - "first_seen": known_devices.get(mac, {}).get("first_seen", now_ts()), - "last_seen": known_devices.get(mac, {}).get("last_seen", now_ts()), - } - - -# ───────────────────────────────────────────────────────────────────────────── -# Noise filters / alert helpers -# ───────────────────────────────────────────────────────────────────────────── - -NOISY_MAC_PREFIXES = ("01:00:5e", "33:33") -BROADCAST_MAC = "ff:ff:ff:ff:ff:ff" - - -def is_noisy_mac(mac: str) -> bool: - m = (mac or "").lower() - if not m: - return False - if m == BROADCAST_MAC: - return True - return any(m.startswith(p) for p in NOISY_MAC_PREFIXES) - - -def compact_alert_key(category: str, title: str, details: Dict) -> str: - d = details or {} - parts = [f"cat={category}", f"title={title}"] - for k in ("mac", "ip", "src_ip", "gateway_ip", "old_mac", "new_mac"): - if k in d and d[k]: - parts.append(f"{k}={d[k]}") - return "|".join(parts) - - -def make_alert(severity: str, category: str, title: str, details: Optional[Dict] = None) -> Dict: - return { - "timestamp": now_ts(), - "severity": severity, # INFO|WARN|HIGH - "category": category, # DEVICE|MITM|SCAN|DNS|OUTBOUND|SYSTEM - "title": title, - "details": details or {}, - } - - -# ───────────────────────────────────────────────────────────────────────────── -# Core network monitor -# ───────────────────────────────────────────────────────────────────────────── - -class NetworkMonitor: - def __init__(self, state_path: str): - self.state_path = state_path - - self.trusted: Dict[str, Dict] = {} - self.known_devices: Dict[str, Dict] = {} - self.baseline_gateway_ip: str = "" - self.baseline_gateway_mac: str = "" - self.last_arp: Dict[str, str] = {} - self.last_devices: Dict[str, str] = {} - - self.alerts: List[Dict] = [] - self._alert_index: Dict[str, int] = {} - self._alert_last_seen: Dict[str, float] = {} - self._rate_limit: Dict[str, float] = {} - self._alert_lock = threading.Lock() - - self.sniff_thread: Optional[threading.Thread] = None - self.sniff_stop = threading.Event() - - self.syn_tracker: Dict[str, Dict] = {} - - self.dns_queries: Dict[str, List] = {} - self.suspicious_processes: Set[str] = set() - self.monitored_files: Dict[str, str] = {} - self.file_observer = None - self.process_baseline: Dict[str, Dict] = {} - - self.conn_trust = ConnectionTrustManager(state_path) - - # Advanced scan rate limiter - self._adv_scan_counter: int = 0 - self._adv_last_ts: float = 0.0 - - self._load_state() - - # ── Persistence ─────────────────────────────────────────────────────────── - - def _load_state(self): - if not os.path.exists(self.state_path): - return - try: - with open(self.state_path, "r", encoding="utf-8") as f: - data = json.load(f) - self.trusted = data.get("trusted", {}) or {} - self.known_devices = data.get("known_devices", {}) or {} - self.baseline_gateway_ip = data.get("baseline_gateway_ip", "") or "" - self.baseline_gateway_mac = data.get("baseline_gateway_mac", "") or "" - except Exception: - pass - - def save_state(self): - try: - data = { - "trusted": self.trusted, - "known_devices": self.known_devices, - "baseline_gateway_ip": self.baseline_gateway_ip, - "baseline_gateway_mac": self.baseline_gateway_mac, - "saved_at": now_ts(), - } - with open(self.state_path, "w", encoding="utf-8") as f: - json.dump(data, f, indent=2, ensure_ascii=False) - except Exception: - pass - - # ── Alert logging ───────────────────────────────────────────────────────── - - def log(self, severity: str, category: str, title: str, details: Optional[Dict] = None): - details = details or {} - key = compact_alert_key(category, title, details) - now = time.time() - - cooldown = 0.0 - if category == "MITM" and "multiple ip" in title.lower(): - cooldown = 30.0 - if category == "DEVICE" and "discovered" in title.lower(): - cooldown = 10.0 - if category == "OUTBOUND" and "high number" in title.lower(): - cooldown = 30.0 - - nxt = self._rate_limit.get(key, 0.0) - if cooldown > 0 and now < nxt: - return - if cooldown > 0: - self._rate_limit[key] = now + cooldown - - with self._alert_lock: - last = self._alert_last_seen.get(key) - if last is not None and (now - last) <= 45: - idx = self._alert_index.get(key) - if idx is not None and 0 <= idx < len(self.alerts): - a = self.alerts[idx] - d = a.get("details", {}) - d["_count"] = int(d.get("_count", 1)) + 1 - d["_last_seen_ts"] = now_ts() - d["_last_details"] = details - a["details"] = d - self._alert_last_seen[key] = now - return - - a = make_alert(severity, category, title, details) - a["details"]["_count"] = 1 - a["details"]["_first_seen_ts"] = a["timestamp"] - self.alerts.append(a) - self._alert_index[key] = len(self.alerts) - 1 - self._alert_last_seen[key] = now - - if len(self.alerts) > 1500: - self.alerts = self.alerts[-1200:] - self._alert_index.clear() - self._alert_last_seen.clear() - for i, aa in enumerate(self.alerts): - k = compact_alert_key(aa["category"], aa["title"], aa.get("details", {})) - self._alert_index[k] = i - self._alert_last_seen[k] = now - - # ── Trust management ────────────────────────────────────────────────────── - - def trust_mac(self, mac: str, label: str): - mac = (mac or "").lower() - if not mac: - return - if mac not in self.trusted: - self.trusted[mac] = {"label": label.strip() or "Trusted", "first_seen": now_ts()} - else: - self.trusted[mac]["label"] = label.strip() or self.trusted[mac].get("label", "Trusted") - self.save_state() - - def untrust_mac(self, mac: str): - mac = (mac or "").lower() - if mac in self.trusted: - del self.trusted[mac] - self.save_state() - - def set_gateway_baseline(self, gw_ip: str, gw_mac: str): - self.baseline_gateway_ip = gw_ip - self.baseline_gateway_mac = gw_mac - self.save_state() - self.log("INFO", "SYSTEM", "Gateway baseline updated", - {"gateway_ip": gw_ip, "gateway_mac": gw_mac}) - - # ── Device discovery / analysis ─────────────────────────────────────────── - - def discover_devices(self, cidr: Optional[str], active: bool) -> Dict[str, str]: - found_ip_mac: Dict[str, str] = {} - if active and cidr: - if try_load_scapy(): - found_ip_mac.update(scapy_arp_scan(cidr, timeout_s=2)) - else: - ping_sweep(cidr) - found_ip_mac.update(arp_table()) - found_ip_mac.update(arp_table()) - mac_ip: Dict[str, str] = {} - for ip, mac in found_ip_mac.items(): - if not mac: - continue - m = mac.lower() - if is_noisy_mac(m): - continue - mac_ip[m] = ip - return mac_ip - - def analyze_devices(self, mac_ip: Dict[str, str], gw_ip: Optional[str]): - if gw_ip: - arps = arp_table() - gw_mac = arps.get(gw_ip, "") - if gw_mac: - if not self.baseline_gateway_ip: - self.set_gateway_baseline(gw_ip, gw_mac) - elif self.baseline_gateway_ip == gw_ip and self.baseline_gateway_mac and gw_mac != self.baseline_gateway_mac: - self.log("HIGH", "MITM", - "Gateway MAC changed (possible ARP spoof / MITM)", - {"gateway_ip": gw_ip, "old_mac": self.baseline_gateway_mac, "new_mac": gw_mac}) - - for mac, ip in mac_ip.items(): - prev = self.known_devices.get(mac) - if not prev: - sev = "INFO" if mac in self.trusted else "WARN" - self.log(sev, "DEVICE", - "Trusted device discovered" if mac in self.trusted else "New device discovered", - {"mac": mac, "ip": ip, "trusted": mac in self.trusted, - "label": self.trusted.get(mac, {}).get("label", "")}) - self.known_devices[mac] = { - "last_ip": ip, "last_seen": now_ts(), - "trusted": mac in self.trusted, - "label": self.trusted.get(mac, {}).get("label", ""), - } - - ip_to_mac = {ip: mac for mac, ip in mac_ip.items()} - mac_to_ips: Dict[str, List[str]] = {} - for mac, ip in mac_ip.items(): - mac_to_ips.setdefault(mac, []).append(ip) - - for mac, ips in mac_to_ips.items(): - if len(set(ips)) >= 2 and mac not in self.trusted: - self.log("WARN", "MITM", "One MAC claims multiple IPs (possible spoofing)", - {"mac": mac, "ips": sorted(set(ips))}) - - if self.last_arp: - for ip, mac in ip_to_mac.items(): - old = self.last_arp.get(ip) - if old and old != mac: - self.log("HIGH", "MITM", "IP-to-MAC mapping changed (ARP spoof indicator)", - {"ip": ip, "old_mac": old, "new_mac": mac}) - - self.last_devices = dict(mac_ip) - self.last_arp = {ip: mac for mac, ip in mac_ip.items()} - self.save_state() - - # ── Outbound connections snapshot ───────────────────────────────────────── - - def snapshot_outbound(self) -> List[Dict]: - conns_out: List[Dict] = [] - try: - conns = psutil.net_connections(kind="inet") - except Exception: - return conns_out - - for c in conns: - try: - if not c.raddr: - continue - pid = c.pid or 0 - pname = "?" - exe_path = "" - try: - if pid: - proc = psutil.Process(pid) - pname = proc.name() - try: - exe_path = proc.exe() - except Exception: - exe_path = "" - except Exception: - pname = "?" - - la = f"{c.laddr.ip}:{c.laddr.port}" if c.laddr else "" - ra = f"{c.raddr.ip}:{c.raddr.port}" if c.raddr else "" - remote_ip = str(c.raddr.ip) - remote_port = int(c.raddr.port) - - geo = get_ip_geolocation(remote_ip) - trust_level = classify_connection( - remote_ip, remote_port, - geo.get("org", ""), - geo.get("country", "Unknown"), - self.conn_trust.trusted_ips, - ) - - svc_name, svc_desc = port_service(remote_port) - # Also check local port for well-known services (e.g. SMB server on 445) - local_port = int(c.laddr.port) if c.laddr else 0 - if svc_name in ("Ephemeral", "Registered") and local_port in PORT_SERVICES: - svc_name, svc_desc = port_service(local_port) - - # Reputation check (hybrid: only for unknown/suspicious/dangerous) - rep_checker = get_reputation_checker() - rep_indicator = REP_UNCHECKED - cached_rep = rep_checker.get_cached(remote_ip) - if cached_rep: - rep_indicator = cached_rep.get("rep", REP_UNCHECKED) - trust_level = rep_checker.reputation_upgrades_trust(remote_ip, trust_level) - - conns_out.append({ - "pid": pid, - "process": pname, - "exe": exe_path, - "laddr": la, - "raddr": ra, - "status": str(c.status), - "remote_ip": remote_ip, - "remote_port": remote_port, - "country": geo.get("country", "Unknown"), - "region": geo.get("region", ""), - "city": geo.get("city", ""), - "org": geo.get("org", ""), - "trust": trust_level, - "rep": rep_indicator, - "service": svc_name, - "service_desc": svc_desc, - }) - except Exception: - continue - - trust_order = {"safe": 0, "known": 1, "unknown": 2, "suspicious": 3, "dangerous": 4} - conns_out.sort( - key=lambda x: (-(trust_order.get(x["trust"], 2)), x.get("process", "")), - ) - return conns_out - - # ── Background reputation checks (hybrid auto-check) ───────────────────── - - def check_reputations_background(self, conns: List[Dict]): - """ - Auto-check IPs classified as unknown/suspicious/dangerous. - Runs in a background thread to avoid blocking the UI. - Max 3 checks per cycle to stay within rate limits. - """ - rep = get_reputation_checker() - if not rep.has_keys: - return - - to_check = [] - for c in conns: - ip = c.get("remote_ip", "") - trust = c.get("trust", "") - if trust in ("unknown", "suspicious", "dangerous") and ip and not is_private_ip(ip): - if rep.get_cached(ip) is None: - to_check.append(ip) - if len(to_check) >= 3: - break - - if not to_check: - return - - def _bg(): - for ip in to_check: - result = rep.check_ip(ip) - if result.get("rep") in (REP_DANGEROUS, REP_SUSPICIOUS): - self.log("WARN", "REPUTATION", - f"IP {ip} flagged by reputation services", - {"ip": ip, "rep": result.get("rep", ""), - "vt_malicious": result.get("vt_malicious", 0), - "abuse_score": result.get("abuse_score", 0), - "source": result.get("source", "")}) - - threading.Thread(target=_bg, daemon=True).start() - - # ── Advanced threat detection ───────────────────────────────────────────── - - def _detect_advanced_threats(self, conns: List[Dict]): - """ - Gate-kept: runs at most every 30 seconds or every 5th scan. - Calls sub-detectors for Flipper Zero, Tor, audio spying, etc. - """ - self._adv_scan_counter += 1 - now = time.time() - if self._adv_scan_counter % 5 != 0 and (now - self._adv_last_ts) < 30: - return - self._adv_last_ts = now - - self._detect_flipper_zero() - self._detect_tor_activity(conns) - self._detect_audio_spying() - - def _detect_flipper_zero(self): - """Detect Flipper Zero and similar BadUSB devices via WMI""" - try: - _rc, out2, _err = safe_run( - [ - "wmic", "path", "Win32_PnPEntity", "where", - "Caption like '%Flipper%' or Caption like '%BadUSB%' or " - "(DeviceID like '%VID_0483%' and DeviceID like '%PID_5741%')", - "get", "Caption,DeviceID", - ], - timeout=10, - ) - if out2.strip() and len(out2.strip().splitlines()) > 1: - self.log( - "HIGH", "DEVICE", - "Flipper Zero or suspicious USB HID device detected", - {"details": out2.strip()[:300], "action": "Physical inspection recommended"}, - ) - except Exception: - pass - - def _detect_tor_activity(self, conns: List[Dict]): - """Detect Tor usage and dark web tool activity""" - tor_ports = {9050, 9150, 9001, 9030, 9040} - - for c in conns: - port = c.get("remote_port", 0) - raddr = c.get("raddr", "") - proc = c.get("process", "").lower() - - if port in tor_ports: - self.log("HIGH", "OUTBOUND", - "Connection to known Tor port detected", - {"process": c.get("process"), "raddr": raddr, "port": port}) - - if proc in TOR_PROCESSES: - self.log("WARN", "SYSTEM", - "Tor relay process detected running", - {"process": proc, "connection": raddr}) - - # Check for running Tor processes even without active connections - try: - for proc in psutil.process_iter(["name", "exe"]): - try: - name = proc.info["name"].lower() - if name in TOR_PROCESSES: - self.log("WARN", "SYSTEM", - "Tor process running on system", - {"process": name}) - except Exception: - pass - except Exception: - pass - - def _detect_audio_spying(self): - """Detect processes that may be recording audio/microphone""" - try: - _rc, out, _err = safe_run( - [ - "wmic", "path", "Win32_Process", - "where", - "Name like '%record%' or Name like '%capture%' or Name like '%listen%'", - "get", "Name,ProcessId,ExecutablePath", - ], - timeout=8, - ) - if out.strip() and len(out.strip().splitlines()) > 1: - self.log("WARN", "SYSTEM", - "Possible audio recording process detected via WMI", - {"processes": out.strip()[:300]}) - - for proc in psutil.process_iter(["name", "exe", "pid"]): - try: - name = proc.info["name"].lower() - if name in SUSPICIOUS_AUDIO_PROCESSES: - self.log("HIGH", "SYSTEM", - "Suspicious audio-capable process detected", - {"process": name, "pid": proc.info["pid"], - "exe": proc.info.get("exe", "")}) - except Exception: - pass - except Exception: - pass - - # ── DNS monitoring ──────────────────────────────────────────────────────── - - def monitor_dns_queries(self): - try: - rc, out, _ = safe_run(["ipconfig", "/displaydns"]) - if rc != 0: - return - current_time = time.time() - suspicious_domains = ["*.tk", "*.ml", "*.ga", "bit.ly", "tinyurl.com", "t.co", "*onion"] - for line in out.splitlines(): - if "Record Name" in line: - match = re.search(r"Record Name:\s*(\S+)", line) - if match: - domain = match.group(1).lower() - self.dns_queries.setdefault(domain, []).append(current_time) - if len(self.dns_queries[domain]) > 50: - window = 300 - recent = [t for t in self.dns_queries[domain] if current_time - t <= window] - if len(recent) > 20: - self.log("WARN", "DNS", - "High frequency DNS queries (possible tunneling/exfiltration)", - {"domain": domain, "query_count": len(recent), "time_window": f"{window}s"}) - for pattern in suspicious_domains: - if fnmatch.fnmatch(domain, pattern): - self.log("HIGH", "DNS", - "Query to suspicious domain detected", - {"domain": domain, "pattern": pattern}) - break - except Exception as e: - self.log("WARN", "SYSTEM", "DNS monitoring error", {"error": str(e)}) - - # ── Process monitoring ──────────────────────────────────────────────────── - - def monitor_suspicious_processes(self): - try: - current_processes = {} - suspicious_indicators = [ - ("keylogger", "log", "record"), - ("sniffer", "capture", "wireshark", "tshark"), - ("remote", "teamviewer", "anydesk", "vnc"), - ("crypt", "encrypt", "ransom"), - ("miner", "bitcoin", "ethereum"), - ("hack", "exploit", "payload"), - ("proxy", "tunnel", "vpn"), - ] - for proc in psutil.process_iter(["pid", "name", "exe", "cmdline"]): - try: - name = proc.info["name"].lower() - exe = proc.info.get("exe", "").lower() - cmdline = " ".join(proc.info.get("cmdline", [])).lower() - current_processes[name] = {"pid": proc.info["pid"], "exe": exe, - "cmdline": cmdline, "first_seen": now_ts()} - for grp in suspicious_indicators: - if any(ind in name or ind in exe or ind in cmdline for ind in grp): - if name not in self.suspicious_processes: - self.suspicious_processes.add(name) - self.log("HIGH", "SYSTEM", "Suspicious process detected", - {"process": name, "exe": exe, "indicators": grp}) - break - except (psutil.NoSuchProcess, psutil.AccessDenied): - continue - - for name, info in current_processes.items(): - if name not in self.suspicious_processes: - if name not in self.process_baseline: - self.process_baseline[name] = {"count": 1, "first_seen": info["first_seen"]} - else: - self.process_baseline[name]["count"] += 1 - - current_time = time.time() - self.process_baseline = { - k: v for k, v in self.process_baseline.items() - if current_time - time.mktime( - time.strptime(v["first_seen"], "%Y-%m-%d %H:%M:%S") - ) <= 3600 - } - except Exception as e: - self.log("WARN", "SYSTEM", "Process monitoring error", {"error": str(e)}) - - # ── File monitoring ─────────────────────────────────────────────────────── - - def start_file_monitoring(self, paths: List[str]): - if not HAS_WATCHDOG: - self.log("WARN", "SYSTEM", "File monitoring disabled (watchdog missing)", - {"action": "pip install watchdog"}) - return - try: - monitor_ref = self - - class SecurityFileHandler(FileSystemEventHandler): - def on_modified(self, event): - if event.is_directory: - return - filepath = event.src_path - try: - with open(filepath, "rb") as f: - fhash = hashlib.sha256(f.read()).hexdigest() - if filepath in monitor_ref.monitored_files: - if monitor_ref.monitored_files[filepath] != fhash: - monitor_ref.log("HIGH", "SYSTEM", "Critical file modified", - {"file": filepath, - "old_hash": monitor_ref.monitored_files[filepath][:16], - "new_hash": fhash[:16]}) - monitor_ref.monitored_files[filepath] = fhash - except Exception: - pass - - critical_paths = [ - os.path.expandvars(r"%SystemRoot%\System32\drivers\etc\hosts"), - os.path.expandvars(r"%SystemRoot%\System32\config"), - ] + paths - - self.file_observer = Observer() - handler = SecurityFileHandler() - for path in critical_paths: - if os.path.exists(path): - self.file_observer.schedule(handler, path, recursive=True) - try: - with open(path, "rb") as f: - self.monitored_files[path] = hashlib.sha256(f.read()).hexdigest() - except Exception: - pass - self.file_observer.start() - self.log("INFO", "SYSTEM", "File monitoring started", {"paths": len(critical_paths)}) - except Exception as e: - self.log("WARN", "SYSTEM", "File monitoring setup failed", {"error": str(e)}) - - def stop_file_monitoring(self): - if self.file_observer: - self.file_observer.stop() - self.file_observer.join() - - # ── Passive sniff ───────────────────────────────────────────────────────── - - def start_passive_sniff(self): - if not try_load_scapy(): - self.log("WARN", "SYSTEM", "Passive sniff disabled (scapy missing)", - {"action": "pip install scapy"}) - return - if self.sniff_thread and self.sniff_thread.is_alive(): - return - self.sniff_stop.clear() - self.sniff_thread = threading.Thread(target=self._sniff_loop, daemon=True) - self.sniff_thread.start() - self.log("INFO", "SYSTEM", "Passive sniff started", - {"requires": "Npcap + Admin for best results"}) - - def stop_passive_sniff(self): - self.sniff_stop.set() - self.log("INFO", "SYSTEM", "Passive sniff stopping", {}) - - def _sniff_loop(self): - def handler(pkt): - if self.sniff_stop.is_set(): - return - try: - if ARP is not None and pkt.haslayer(ARP): - arp = pkt.getlayer(ARP) - if getattr(arp, "op", None) == 2: - psrc = str(getattr(arp, "psrc", "")) - hwsrc = str(getattr(arp, "hwsrc", "")).lower() - if ( - self.baseline_gateway_ip and psrc == self.baseline_gateway_ip - and self.baseline_gateway_mac and hwsrc - and hwsrc != self.baseline_gateway_mac - ): - self.log("HIGH", "MITM", - "Observed ARP reply claiming to be gateway with different MAC", - {"gateway_ip": psrc, "claimed_mac": hwsrc, - "baseline_mac": self.baseline_gateway_mac}) - except Exception: - pass - try: - if pkt.haslayer("IP") and pkt.haslayer("TCP"): - ip_layer = pkt["IP"] - tcp = pkt["TCP"] - flags = int(tcp.flags) - if (flags & 0x02) and not (flags & 0x10): - src = str(ip_layer.src) - dport = int(tcp.dport) - rec = self.syn_tracker.get(src) - t = time.time() - if not rec: - self.syn_tracker[src] = {"ports": {dport}, "ts": t} - else: - if t - rec["ts"] > 20: - rec["ports"] = {dport} - rec["ts"] = t - else: - rec["ports"].add(dport) - if len(rec["ports"]) >= 18: - self.log("WARN", "SCAN", - "Possible port scan (many SYNs to multiple ports)", - {"src_ip": src, "ports_count": len(rec["ports"]), - "sample_ports": sorted(list(rec["ports"]))[:25]}) - rec["ports"].clear() - rec["ts"] = t - except Exception: - pass - - try: - sniff( - filter="arp or tcp", - prn=handler, - store=False, - stop_filter=lambda _p: self.sniff_stop.is_set(), - ) - except Exception as e: - self.log("WARN", "SYSTEM", - "Passive sniff failed (Npcap/Admin likely missing)", {"error": str(e)}) - - # ── Threat level calculation ─────────────────────────────────────────────── - - def compute_threat_level(self) -> str: - """ - Returns 'LOW' | 'MEDIUM' | 'HIGH' | 'CRITICAL' based on recent alerts. - """ - cutoff = time.time() - 3600 - high_count = 0 - warn_count = 0 - for a in self.alerts: - try: - ts = time.mktime(time.strptime(a["timestamp"], "%Y-%m-%d %H:%M:%S")) - except Exception: - continue - if ts < cutoff: - continue - sev = a.get("severity", "") - if sev == "HIGH": - high_count += 1 - elif sev == "WARN": - warn_count += 1 - if high_count >= 3: - return "CRITICAL" - if high_count >= 1: - return "HIGH" - if warn_count >= 5: - return "MEDIUM" - return "LOW" - - def get_active_threats(self) -> List[Dict]: - """Return HIGH-severity alerts from the last hour.""" - cutoff = time.time() - 3600 - threats = [] - for a in self.alerts: - try: - ts = time.mktime(time.strptime(a["timestamp"], "%Y-%m-%d %H:%M:%S")) - except Exception: - continue - if ts >= cutoff and a.get("severity") in ("HIGH", "WARN"): - threats.append(a) - return list(reversed(threats[-100:])) - - -# ───────────────────────────────────────────────────────────────────────────── -# GUI helpers -# ───────────────────────────────────────────────────────────────────────────── - -def simple_prompt(parent, title: str, label: str, default: str = "") -> Optional[str]: - win = ctk.CTkToplevel(parent) - win.title(title) - win.resizable(False, False) - win.grab_set() - - ctk.CTkLabel(win, text=label).pack(padx=12, pady=(12, 6)) - var = tk.StringVar(value=default) - ent = ctk.CTkEntry(win, textvariable=var, width=44) - ent.pack(padx=12, pady=(0, 12)) - ent.focus_set() - - out: Dict[str, Optional[str]] = {"v": None} - btns = ctk.CTkFrame(win) - btns.pack(pady=(0, 12)) - - def ok(): - out["v"] = var.get().strip() - win.destroy() - - def cancel(): - out["v"] = None - win.destroy() - - ctk.CTkButton(btns, text="OK", command=ok).pack(side="left", padx=6) - ctk.CTkButton(btns, text="Cancel", command=cancel).pack(side="left", padx=6) - win.bind("", lambda _e: ok()) - win.bind("", lambda _e: cancel()) - parent.wait_window(win) - return out["v"] - - -# Trust/classification display colours -TRUST_COLORS = { - "safe": "#90EE90", # light green - "known": "#87CEEB", # light blue - "unknown": "#FFD700", # gold / yellow - "suspicious": "#FFA500", # orange - "dangerous": "#FF4444", # red -} - -THREAT_LEVEL_COLORS = { - "LOW": "#90EE90", - "MEDIUM": "#FFD700", - "HIGH": "#FFA500", - "CRITICAL": "#FF4444", -} - - -# ───────────────────────────────────────────────────────────────────────────── -# Connection detail popup -# ───────────────────────────────────────────────────────────────────────────── - -class ConnectionDetailPopup: - def __init__(self, parent, conn: Dict, monitor: "NetworkMonitor", on_refresh): - self.parent = parent - self.conn = conn - self.mon = monitor - self.on_refresh = on_refresh - - self.win = ctk.CTkToplevel(parent) - self.win.title("Connection Detail") - self.win.geometry("580x720") - self.win.resizable(True, True) - self.win.minsize(520, 500) - self.win.grab_set() - - self._build() - - def _build(self): - c = self.conn - trust = c.get("trust", "unknown") - color = TRUST_COLORS.get(trust, "white") - - # Scrollable container for all content - scroll = ctk.CTkScrollableFrame(self.win, corner_radius=0) - scroll.pack(fill="both", expand=True, padx=0, pady=0) - - header = ctk.CTkFrame(scroll, fg_color=("#1e1e2e", "#1e1e2e"), corner_radius=8) - header.pack(fill="x", padx=12, pady=(12, 4)) - ctk.CTkLabel(header, text=f" {c.get('process', '?')}", - font=ctk.CTkFont(size=15, weight="bold")).pack(side="left", pady=8, padx=4) - ctk.CTkLabel(header, text=f"PID {c.get('pid', '?')}", - text_color="gray").pack(side="left", pady=8, padx=4) - ctk.CTkLabel(header, text=trust.upper(), - text_color=color, - font=ctk.CTkFont(weight="bold")).pack(side="right", pady=8, padx=12) - - info = ctk.CTkFrame(scroll, corner_radius=8) - info.pack(fill="x", padx=12, pady=4) - - def row(lbl, val): - r = ctk.CTkFrame(info, fg_color="transparent") - r.pack(fill="x", padx=8, pady=2) - ctk.CTkLabel(r, text=lbl, width=120, anchor="w", - font=ctk.CTkFont(weight="bold")).pack(side="left") - ctk.CTkLabel(r, text=str(val), anchor="w", wraplength=340).pack(side="left", padx=4) - - # Service badge — most important "what is this?" answer - svc_name = c.get("service", "") - svc_desc = c.get("service_desc", "") - if svc_name: - svc_frame = ctk.CTkFrame(info, fg_color="#1a2a3a", corner_radius=6) - svc_frame.pack(fill="x", padx=8, pady=(4, 6)) - ctk.CTkLabel( - svc_frame, - text=f" 🔌 {svc_name}", - font=ctk.CTkFont(size=13, weight="bold"), - text_color="#87CEEB", - anchor="w", - ).pack(side="left", padx=8, pady=4) - if svc_desc: - ctk.CTkLabel( - svc_frame, - text=svc_desc, - text_color="#aaaaaa", - anchor="w", - wraplength=340, - ).pack(side="left", padx=4, pady=4) - - row("Remote IP:", c.get("remote_ip", "")) - row("Remote port:", c.get("remote_port", "")) - row("Local addr:", c.get("laddr", "")) - row("Status:", c.get("status", "")) - row("Country:", c.get("country", "")) - row("Organization:", c.get("org", "")) - row("Executable:", c.get("exe", "(unavailable)") or "(unavailable)") - - trust_explanations = { - "safe": "Explicitly trusted by you, or private LAN address.", - "known": "Belongs to a well-known organisation (Google, Microsoft, etc.).", - "unknown": "No geolocation or org data available.", - "suspicious": "Unusual port, flagged country, or unrecognised service.", - "dangerous": "Confirmed bad port (Tor exit, backdoor, etc.).", - } - row("Trust reason:", trust_explanations.get(trust, "")) - - # ── Reputation section ────────────────────────────────────────────── - rep_frame = ctk.CTkFrame(scroll, corner_radius=8) - rep_frame.pack(fill="x", padx=12, pady=4) - - rep_header = ctk.CTkFrame(rep_frame, fg_color="transparent") - rep_header.pack(fill="x", padx=8, pady=(6, 2)) - ctk.CTkLabel(rep_header, text="🛡️ IP Reputation", - font=ctk.CTkFont(size=13, weight="bold")).pack(side="left") - - self._rep_content = ctk.CTkFrame(rep_frame, fg_color="transparent") - self._rep_content.pack(fill="x", padx=8, pady=(0, 6)) - - remote_ip = c.get("remote_ip", "") - rep_checker = get_reputation_checker() - cached = rep_checker.get_cached(remote_ip) - if cached: - self._show_rep_results(cached) - else: - ctk.CTkLabel(self._rep_content, text="Not checked yet", - text_color="gray").pack(anchor="w") - - rep_btn_row = ctk.CTkFrame(rep_frame, fg_color="transparent") - rep_btn_row.pack(fill="x", padx=8, pady=(0, 8)) - self._check_rep_btn = ctk.CTkButton( - rep_btn_row, text="Check Reputation", - command=lambda: self._check_reputation(remote_ip), - fg_color="#5865F2", hover_color="#4752C4", width=150, - ) - self._check_rep_btn.pack(side="left", padx=4) - self._rep_status_lbl = ctk.CTkLabel(rep_btn_row, text="", text_color="gray") - self._rep_status_lbl.pack(side="left", padx=8) - - if not rep_checker.has_keys: - self._check_rep_btn.configure(state="disabled") - self._rep_status_lbl.configure(text="No API keys configured") - - # ──────────────────────────────────────────────────────────────────── - exe_path = c.get("exe", "") - btn_frame = ctk.CTkFrame(scroll, fg_color="transparent") - btn_frame.pack(fill="x", padx=12, pady=8) - - if exe_path and os.path.exists(exe_path): - ctk.CTkButton( - btn_frame, text="Open Location", - command=lambda: self._open_location(exe_path), - fg_color="#3a7ebf", hover_color="#2b6194", width=130, - ).pack(side="left", padx=4) - - ctk.CTkButton( - btn_frame, text="Block IP", - command=self._block_ip, - fg_color="#bf6d3a", hover_color="#944e2b", width=110, - ).pack(side="left", padx=4) - - ctk.CTkButton( - btn_frame, text="Trust IP", - command=self._trust_ip, - fg_color="#3abf6d", hover_color="#2b944e", width=110, - ).pack(side="left", padx=4) - - ctk.CTkButton( - btn_frame, text="Kill Process", - command=self._kill_process, - fg_color="#bf3a3a", hover_color="#942b2b", width=110, - ).pack(side="left", padx=4) - - ctk.CTkButton( - scroll, text="Close", - command=self.win.destroy, width=80, - ).pack(pady=(4, 12)) - - def _open_location(self, exe_path: str): - try: - subprocess.Popen( - ["explorer", "/select,", exe_path], - creationflags=_CREATE_NO_WINDOW, - ) - except Exception as e: - messagebox.showerror("Open Location", str(e), parent=self.win) - - def _block_ip(self): - ip = self.conn.get("remote_ip", "") - if not ip: - return - if messagebox.askyesno("Block IP", - f"Add Windows Firewall outbound block rule for {ip}?", - parent=self.win): - ok = block_ip_firewall(ip) - if ok: - messagebox.showinfo("Block IP", f"Outbound rule created for {ip}.", parent=self.win) - self.mon.log("HIGH", "SYSTEM", "IP blocked via firewall", - {"ip": ip, "process": self.conn.get("process")}) - else: - messagebox.showerror("Block IP", - "netsh failed. Run as Administrator for firewall access.", - parent=self.win) - - def _trust_ip(self): - ip = self.conn.get("remote_ip", "") - if not ip: - return - self.mon.conn_trust.trust_ip(ip, "Manually trusted") - messagebox.showinfo("Trust IP", f"{ip} added to trust list.", parent=self.win) - self.on_refresh() - self.win.destroy() - - def _kill_process(self): - pid = self.conn.get("pid", 0) - pname = self.conn.get("process", "?") - if not pid: - messagebox.showerror("Kill Process", "No PID available.", parent=self.win) - return - if messagebox.askyesno("Kill Process", - f"Terminate {pname} (PID {pid})?\nThis cannot be undone.", - parent=self.win): - try: - psutil.Process(pid).kill() - messagebox.showinfo("Kill Process", f"{pname} (PID {pid}) terminated.", - parent=self.win) - self.mon.log("HIGH", "SYSTEM", "Process killed by user", - {"process": pname, "pid": pid}) - self.on_refresh() - self.win.destroy() - except Exception as e: - messagebox.showerror("Kill Process", str(e), parent=self.win) - - def _show_rep_results(self, rep: Dict): - """Display reputation results in the popup.""" - for w in self._rep_content.winfo_children(): - w.destroy() - - indicator = rep.get("rep", REP_UNCHECKED) - ind_color = REP_COLORS.get(indicator, "#888888") - - # Indicator badge - badge_frame = ctk.CTkFrame(self._rep_content, fg_color="#1a2a3a", corner_radius=6) - badge_frame.pack(fill="x", pady=(2, 4)) - ctk.CTkLabel(badge_frame, text=f" {indicator} ", - font=ctk.CTkFont(size=16, weight="bold"), - text_color=ind_color).pack(side="left", padx=6, pady=4) - label_map = {REP_CLEAN: "Clean", REP_SUSPICIOUS: "Suspicious", REP_DANGEROUS: "Dangerous"} - ctk.CTkLabel(badge_frame, text=label_map.get(indicator, "Unknown"), - text_color=ind_color, - font=ctk.CTkFont(weight="bold")).pack(side="left", pady=4) - ctk.CTkLabel(badge_frame, text=f" (via {rep.get('source', '?')})", - text_color="gray").pack(side="left", pady=4, padx=4) - - def detail_row(lbl, val, val_color="white"): - r = ctk.CTkFrame(self._rep_content, fg_color="transparent") - r.pack(fill="x", pady=1) - ctk.CTkLabel(r, text=lbl, width=140, anchor="w", - text_color="gray").pack(side="left") - ctk.CTkLabel(r, text=str(val), anchor="w", - text_color=val_color).pack(side="left", padx=4) - - # VirusTotal results - if "vt_malicious" in rep: - vt_mal = rep["vt_malicious"] - vt_susp = rep.get("vt_suspicious", 0) - vt_harm = rep.get("vt_harmless", 0) - mal_col = "#FF4444" if vt_mal > 0 else "#90EE90" - detail_row("VT Malicious:", f"{vt_mal} (suspicious: {vt_susp}, harmless: {vt_harm})", mal_col) - detail_row("VT Reputation:", rep.get("vt_reputation", "?")) - - # AbuseIPDB results - if "abuse_score" in rep: - score = rep["abuse_score"] - score_col = "#FF4444" if score >= 50 else "#FFA500" if score >= 15 else "#90EE90" - detail_row("Abuse Score:", f"{score}% ({rep.get('abuse_reports', 0)} reports)", score_col) - if rep.get("abuse_is_tor"): - detail_row("Tor Node:", "YES", "#FF4444") - if rep.get("abuse_usage"): - detail_row("Usage Type:", rep.get("abuse_usage", "")) - if rep.get("abuse_domain"): - detail_row("Domain:", rep.get("abuse_domain", "")) - - if rep.get("checked_at"): - detail_row("Checked at:", rep["checked_at"]) - - def _check_reputation(self, ip: str): - """Manual reputation check (runs in background thread, updates UI).""" - if not ip or is_private_ip(ip): - self._rep_status_lbl.configure(text="Private IP — no check needed") - return - - self._check_rep_btn.configure(state="disabled") - self._rep_status_lbl.configure(text="Checking...") - - def _bg(): - rep = get_reputation_checker() - result = rep.check_ip(ip, force=True) - - def _update_ui(): - try: - if not self.win.winfo_exists(): - return - except Exception: - return - self._show_rep_results(result) - self._check_rep_btn.configure(state="normal") - self._rep_status_lbl.configure(text="") - - try: - self.win.after(0, _update_ui) - except Exception: - pass - - threading.Thread(target=_bg, daemon=True).start() - - -# ───────────────────────────────────────────────────────────────────────────── -# Main App -# ───────────────────────────────────────────────────────────────────────────── - -# ───────────────────────────────────────────────────────────────────────────── -# API Keys Settings dialog -# ───────────────────────────────────────────────────────────────────────────── - -class APIKeySettingsDialog: - def __init__(self, parent): - self.win = ctk.CTkToplevel(parent) - self.win.title("API Key Settings") - self.win.geometry("520x280") - self.win.resizable(False, False) - self.win.grab_set() - - rep = get_reputation_checker() - - ctk.CTkLabel(self.win, text="🔑 API Keys for IP Reputation", - font=ctk.CTkFont(size=16, weight="bold")).pack(pady=(16, 4)) - ctk.CTkLabel(self.win, text="Keys are stored locally in tools/nid_api_keys.json", - text_color="gray", font=ctk.CTkFont(size=11)).pack(pady=(0, 12)) - - form = ctk.CTkFrame(self.win) - form.pack(fill="x", padx=20, pady=4) - - # VirusTotal key - r1 = ctk.CTkFrame(form, fg_color="transparent") - r1.pack(fill="x", padx=10, pady=6) - ctk.CTkLabel(r1, text="VirusTotal:", width=100, anchor="w", - font=ctk.CTkFont(weight="bold")).pack(side="left") - self.vt_var = tk.StringVar(value=rep._vt_key) - ctk.CTkEntry(r1, textvariable=self.vt_var, width=340, show="*").pack(side="left", padx=4) - - # AbuseIPDB key - r2 = ctk.CTkFrame(form, fg_color="transparent") - r2.pack(fill="x", padx=10, pady=6) - ctk.CTkLabel(r2, text="AbuseIPDB:", width=100, anchor="w", - font=ctk.CTkFont(weight="bold")).pack(side="left") - self.abuse_var = tk.StringVar(value=rep._abuse_key) - ctk.CTkEntry(r2, textvariable=self.abuse_var, width=340, show="*").pack(side="left", padx=4) - - # Status - self.status_lbl = ctk.CTkLabel(self.win, text="", text_color="gray") - self.status_lbl.pack(pady=4) - - # Buttons - btns = ctk.CTkFrame(self.win, fg_color="transparent") - btns.pack(pady=8) - ctk.CTkButton(btns, text="Save", command=self._save, - fg_color="#3abf6d", hover_color="#2b944e", width=100).pack(side="left", padx=6) - ctk.CTkButton(btns, text="Cancel", command=self.win.destroy, width=100).pack(side="left", padx=6) - - # Show current status - has_vt = "✓" if rep._vt_key else "✗" - has_ab = "✓" if rep._abuse_key else "✗" - self.status_lbl.configure(text=f"VT key: {has_vt} | AbuseIPDB key: {has_ab}") - - def _save(self): - rep = get_reputation_checker() - rep.save_keys(self.vt_var.get(), self.abuse_var.get()) - has_vt = "✓" if rep._vt_key else "✗" - has_ab = "✓" if rep._abuse_key else "✗" - self.status_lbl.configure(text=f"Saved! VT: {has_vt} | AbuseIPDB: {has_ab}", text_color="#90EE90") - - -class App(ctk.CTkFrame): - def __init__(self, parent): - super().__init__(parent, corner_radius=10) - self.parent = parent - parent.title("Network Intrusion Detector Pro") - parent.geometry("1240x760") - # Single topmost flash to bring window to front - parent.attributes("-topmost", True) - parent.after(100, lambda: parent.attributes("-topmost", False)) - - self.state_path = os.path.join( - os.path.dirname(os.path.abspath(__file__)), "nid_state.json" - ) - self.mon = NetworkMonitor(self.state_path) - - self.running = True - self.worker_q: "queue.Queue[str]" = queue.Queue() - - self.active_scan = tk.BooleanVar(value=True) - self.passive_scan = tk.BooleanVar(value=True) - self.refresh_ms = tk.IntVar(value=3500) - - self.cidr = tk.StringVar(value=parse_ipconfig_subnet() or "") - self.gateway = tk.StringVar(value=get_default_gateway() or "") - self.localip = tk.StringVar(value=local_ipv4() or "") - - self._last_conns: List[Dict] = [] - self._last_scan_ts: str = "" - - # Connection history — accumulates all unknown/suspicious/dangerous IPs - self._history_path = self.state_path.replace("nid_state.json", "nid_conn_history.json") - self._conn_history: Dict[str, Dict] = {} # keyed by remote_ip - self._load_history() - - self._build_ui() - - threading.Thread(target=self._worker_loop, daemon=True).start() - - self.after(600, self.scan_now) - self.after(900, self._ensure_passive) - self.after(700, self._ui_tick) - - # ── Style helper (called ONCE) ──────────────────────────────────────────── - - def _apply_tree_style(self): - style = ttk.Style(self) - style.theme_use("default") - style.configure( - "Treeview", - background="#2b2b2b", foreground="white", - fieldbackground="#2b2b2b", borderwidth=0, rowheight=25, - ) - style.configure( - "Treeview.Heading", - background="#565b5e", foreground="white", - relief="flat", font=("Arial", 10, "bold"), - ) - style.map("Treeview", background=[("selected", "#1f538d")]) - - # ── UI build ────────────────────────────────────────────────────────────── - - def _build_ui(self): - self._apply_tree_style() - - top = ctk.CTkFrame(self) - top.pack(fill="x", padx=10, pady=8) - - ctk.CTkLabel(top, text=f"Admin: {'YES' if is_admin_windows() else 'NO'}").pack(side="left") - ttk.Separator(top, orient="vertical").pack(side="left", fill="y", padx=10) - ctk.CTkLabel(top, text="Local IP").pack(side="left") - ctk.CTkEntry(top, textvariable=self.localip, width=120).pack(side="left", padx=6) - ctk.CTkLabel(top, text="Gateway").pack(side="left") - ctk.CTkEntry(top, textvariable=self.gateway, width=120).pack(side="left", padx=6) - ctk.CTkLabel(top, text="LAN CIDR").pack(side="left") - ctk.CTkEntry(top, textvariable=self.cidr, width=140).pack(side="left", padx=6) - ttk.Separator(top, orient="vertical").pack(side="left", fill="y", padx=10) - ctk.CTkCheckBox(top, text="Active discovery", variable=self.active_scan).pack(side="left", padx=6) - ctk.CTkCheckBox(top, text="Passive sniff", variable=self.passive_scan).pack(side="left", padx=6) - ctk.CTkLabel(top, text="Refresh (ms)").pack(side="left") - ctk.CTkEntry(top, textvariable=self.refresh_ms, width=80).pack(side="left", padx=6) - ctk.CTkButton(top, text="Scan now", command=self.scan_now, - fg_color="#3a7ebf", hover_color="#2b6194").pack(side="left", padx=6) - ctk.CTkButton(top, text="Export report", command=self.export_report).pack(side="left", padx=6) - ctk.CTkButton(top, text="Force stop", command=self.force_stop, - fg_color="#bf3a3a", hover_color="#942b2b").pack(side="left", padx=6) - ctk.CTkButton(top, text="⚙ API Keys", command=lambda: APIKeySettingsDialog(self.parent), - fg_color="#565b5e", hover_color="#6e7377", width=90).pack(side="left", padx=6) - - self.nb = ctk.CTkTabview(self) - nb = self.nb - nb.pack(fill="both", expand=True, padx=20, pady=(0, 20)) - - self.tab_dashboard = nb.add("Summary") - self.tab_devices = nb.add("Devices") - self.tab_connections = nb.add("Connections") - self.tab_history = nb.add("History") - self.tab_alerts = nb.add("Alerts") - self.tab_trust = nb.add("Trust list") - self.tab_threats = nb.add("Threats") - - self._build_dashboard() - self._build_devices() - self._build_connections() - self._build_history() - self._build_alerts() - self._build_trust() - self._build_threats() - - self.pack(fill="both", expand=True) - - # ── Dashboard tab ───────────────────────────────────────────────────────── - - def _build_dashboard(self): - box = ctk.CTkFrame(self.tab_dashboard, corner_radius=10) - box.pack(fill="both", expand=True, padx=8, pady=8) - self.live_text = ctk.CTkTextbox( - box, wrap="word", font=ctk.CTkFont(family="Consolas", size=13) - ) - self.live_text.pack(fill="both", expand=True, padx=10, pady=10) - self.live_text.configure(state="disabled") - self._update_live_text() - - def _update_live_text(self): - threat_level = self.mon.compute_threat_level() - susp_conns = sum( - 1 for c in self._last_conns if c.get("trust") in ("suspicious", "dangerous") - ) - lines = [ - f" Threat level : {threat_level}", - f" Suspicious connections : {susp_conns}", - f" Active connections : {len(self._last_conns)}", - f" Last scan : {self._last_scan_ts or '(pending)'}", - "", - "What matters most:", - " - New/untrusted device on LAN", - " - Gateway MAC changes vs baseline (possible MITM)", - " - IP->MAC mapping flips (strong ARP spoof indicator)", - " - Possible port scan (passive sniff)", - " - SUSPICIOUS / DANGEROUS outbound connections", - "", - "Noise reduction:", - " - Multicast/broadcast MACs filtered", - " - Alerts deduped + rate-limited", - "", - "Status:", - f" scapy={'YES' if HAS_SCAPY else 'NO'} | watchdog={'YES' if HAS_WATCHDOG else 'NO'} | admin={'YES' if is_admin_windows() else 'NO'}", - f" active={'ON' if self.active_scan.get() else 'OFF'} | passive={'ON' if self.passive_scan.get() else 'OFF'}", - f" Gateway baseline: {self.mon.baseline_gateway_ip or '(not set)'} / {self.mon.baseline_gateway_mac or '(not set)'}", - "", - "IP Reputation:", - f" VirusTotal: {'configured' if get_reputation_checker()._vt_key else 'no key'}" - f" | AbuseIPDB: {'configured' if get_reputation_checker()._abuse_key else 'no key'}", - f" IPs checked this session: {len(get_reputation_checker()._cache)}", - ] - self.live_text.configure(state="normal") - self.live_text.delete("1.0", "end") - self.live_text.insert("1.0", "\n".join(lines)) - self.live_text.configure(state="disabled") - - # ── Devices tab ─────────────────────────────────────────────────────────── - - def _build_devices(self): - f = self.tab_devices - top = ctk.CTkFrame(f) - top.pack(fill="x", padx=8, pady=6) - ctk.CTkButton(top, text="Set current gateway as baseline", - command=self.set_gateway_baseline).pack(side="left", padx=6) - self.dev_summary = ctk.CTkLabel(top, text="Devices: 0") - self.dev_summary.pack(side="right", padx=10) - - cols = ("trusted", "label", "mac", "ip", "last_seen") - self.dev_tree = ttk.Treeview(f, columns=cols, show="headings", height=22) - for c in cols: - self.dev_tree.heading(c, text=c.upper()) - w = 140 - if c == "mac": - w = 180 - if c == "ip": - w = 120 - if c == "label": - w = 220 - self.dev_tree.column(c, width=w, stretch=True) - self.dev_tree.pack(fill="both", expand=True, padx=8, pady=8) - - act = ctk.CTkFrame(f) - act.pack(fill="x", padx=8, pady=(0, 8)) - ctk.CTkButton(act, text="Trust selected", command=self.trust_selected).pack(side="left", padx=6) - ctk.CTkButton(act, text="Untrust selected", command=self.untrust_selected).pack(side="left", padx=6) - ctk.CTkButton(act, text="Copy selected", command=self.copy_device_selected).pack(side="left", padx=6) - - # ── Connections tab ─────────────────────────────────────────────────────── - - def _build_connections(self): - f = self.tab_connections - cols = ("trust", "rep", "process", "service", "laddr", "raddr", "country", "org", "status") - self.conn_tree = ttk.Treeview(f, columns=cols, show="headings", height=26) - headings = { - "trust": "TRUST", "rep": "REP", "process": "PROCESS", "service": "SERVICE", - "laddr": "LOCAL", "raddr": "REMOTE", - "country": "COUNTRY", "org": "ORGANIZATION", "status": "STATUS", - } - widths = { - "trust": 90, "rep": 40, "process": 140, "service": 110, - "laddr": 130, "raddr": 130, - "country": 90, "org": 170, "status": 90, - } - for c in cols: - self.conn_tree.heading(c, text=headings[c]) - self.conn_tree.column(c, width=widths[c], stretch=(c in ("org", "process"))) - self.conn_tree.pack(fill="both", expand=True, padx=8, pady=8) - - # Double-click → detail popup (also opens Explorer to executable location) - self.conn_tree.bind("", self._on_conn_double_click) - - filter_frame = ctk.CTkFrame(f) - filter_frame.pack(fill="x", padx=8, pady=(0, 8)) - ctk.CTkLabel(filter_frame, text="Filter:").pack(side="left", padx=(0, 6)) - self.conn_filter = tk.StringVar(value="ALL") - conn_combo = ttk.Combobox( - filter_frame, textvariable=self.conn_filter, width=18, - values=("ALL", "SAFE", "KNOWN", "UNKNOWN", "SUSPICIOUS", "DANGEROUS"), - state="readonly", - ) - conn_combo.pack(side="left", padx=6) - conn_combo.bind("<>", lambda _e: self.refresh_connections()) - ctk.CTkButton(filter_frame, text="Apply", command=self.refresh_connections).pack(side="left", padx=6) - ctk.CTkButton(filter_frame, text="Trust IP", command=self.trust_connection_ip).pack(side="left", padx=6) - ctk.CTkButton(filter_frame, text="Untrust IP", command=self.untrust_connection_ip).pack(side="left", padx=6) - ctk.CTkButton(filter_frame, text="Block IP (Firewall)", command=self.block_selected_ip, - fg_color="#bf6d3a", hover_color="#944e2b").pack(side="left", padx=6) - - def _on_conn_double_click(self, _event): - sel = self.conn_tree.selection() - if not sel: - return - iid = sel[0] - try: - idx = int(iid.split("-")[1]) - except Exception: - return - - # Resolve the index against the currently displayed (filtered) list - filter_val = self.conn_filter.get() - filtered = self._filtered_conns(filter_val) - if idx < 0 or idx >= len(filtered): - return - - conn = filtered[idx] - ConnectionDetailPopup(self.parent, conn, self.mon, self.refresh_connections) - - def _filtered_conns(self, filter_val: str) -> List[Dict]: - result = [] - for c in self._last_conns[:1200]: - trust = c.get("trust", "unknown") - if filter_val != "ALL" and trust.upper() != filter_val.upper(): - continue - result.append(c) - return result - - # ── History tab ─────────────────────────────────────────────────────────── - - def _build_history(self): - f = self.tab_history - - info = ctk.CTkLabel( - f, text="All unknown / suspicious / dangerous connections ever seen (persisted across sessions)", - text_color="gray", font=ctk.CTkFont(size=12)) - info.pack(anchor="w", padx=10, pady=(8, 2)) - - cols = ("trust", "rep", "remote_ip", "process", "service", "raddr", - "country", "org", "first_seen", "last_seen", "times_seen") - self.hist_tree = ttk.Treeview(f, columns=cols, show="headings", height=24) - headings = { - "trust": "TRUST", "rep": "REP", "remote_ip": "REMOTE IP", - "process": "PROCESS", "service": "SERVICE", "raddr": "REMOTE ADDR", - "country": "COUNTRY", "org": "ORGANIZATION", - "first_seen": "FIRST SEEN", "last_seen": "LAST SEEN", "times_seen": "#", - } - widths = { - "trust": 90, "rep": 40, "remote_ip": 130, "process": 120, - "service": 90, "raddr": 140, "country": 80, "org": 160, - "first_seen": 140, "last_seen": 140, "times_seen": 40, - } - for c in cols: - self.hist_tree.heading(c, text=headings[c]) - self.hist_tree.column(c, width=widths[c], stretch=(c in ("org", "process"))) - self.hist_tree.pack(fill="both", expand=True, padx=8, pady=(2, 4)) - - self.hist_tree.bind("", self._on_hist_double_click) - - # Filter + buttons - bar = ctk.CTkFrame(f) - bar.pack(fill="x", padx=8, pady=(0, 8)) - ctk.CTkLabel(bar, text="Filter:").pack(side="left", padx=(0, 6)) - self.hist_filter = tk.StringVar(value="ALL") - hist_combo = ttk.Combobox( - bar, textvariable=self.hist_filter, width=18, - values=("ALL", "UNKNOWN", "SUSPICIOUS", "DANGEROUS"), - state="readonly", - ) - hist_combo.pack(side="left", padx=6) - hist_combo.bind("<>", lambda _e: self.refresh_history()) - ctk.CTkButton(bar, text="Refresh", command=self.refresh_history, width=80).pack(side="left", padx=6) - ctk.CTkButton(bar, text="Clear All", command=self._clear_history, width=80, - fg_color="#bf3a3a", hover_color="#942b2b").pack(side="right", padx=6) - - def _on_hist_double_click(self, event): - sel = self.hist_tree.selection() - if not sel: - return - iid = sel[0] - ip = self.hist_tree.set(iid, "remote_ip") - # Find matching entry in history and build a fake conn dict for the detail popup - entry = self._conn_history.get(ip) - if not entry: - return - conn_data = { - "remote_ip": ip, - "remote_port": entry.get("remote_port", 0), - "trust": entry.get("trust", "unknown"), - "rep": entry.get("rep", ""), - "process": entry.get("process", "?"), - "exe": entry.get("exe", ""), - "service": entry.get("service", ""), - "service_desc": entry.get("service_desc", ""), - "laddr": entry.get("laddr", ""), - "raddr": entry.get("raddr", ""), - "country": entry.get("country", ""), - "org": entry.get("org", ""), - "status": entry.get("status", ""), - "pid": 0, - } - ConnectionDetailPopup(self.parent, conn_data, self.mon, self.refresh_connections) - - # ── Alerts tab ──────────────────────────────────────────────────────────── - - def _build_alerts(self): - f = self.tab_alerts - bar = ctk.CTkFrame(f) - bar.pack(fill="x", padx=8, pady=(8, 0)) - - self.alert_filter_cat = tk.StringVar(value="ALL") - self.alert_filter_sev = tk.StringVar(value="ALL") - - ctk.CTkLabel(bar, text="Category").pack(side="left") - ttk.Combobox( - bar, textvariable=self.alert_filter_cat, width=10, - values=("ALL", "DEVICE", "MITM", "SCAN", "DNS", "OUTBOUND", "SYSTEM"), - state="readonly", - ).pack(side="left", padx=6) - ctk.CTkLabel(bar, text="Severity").pack(side="left") - ttk.Combobox( - bar, textvariable=self.alert_filter_sev, width=8, - values=("ALL", "INFO", "WARN", "HIGH"), - state="readonly", - ).pack(side="left", padx=6) - ctk.CTkButton(bar, text="Apply", command=self.refresh_alerts).pack(side="left", padx=6) - ctk.CTkButton(bar, text="Clear alerts", command=self.clear_alerts).pack(side="left", padx=6) - - cols = ("time", "severity", "category", "title") - self.alert_tree = ttk.Treeview(f, columns=cols, show="headings", height=20) - for c in cols: - self.alert_tree.heading(c, text=c.upper()) - w = 760 if c == "title" else 180 - self.alert_tree.column(c, width=w, stretch=(c == "title")) - self.alert_tree.pack(fill="both", expand=True, padx=8, pady=8) - self.alert_tree.bind("<>", lambda _e: self.show_alert_details()) - - box = ctk.CTkFrame(f, corner_radius=10) - box.pack(fill="both", expand=True, padx=8, pady=(0, 8)) - self.alert_details = ctk.CTkTextbox( - box, height=150, wrap="word", font=ctk.CTkFont(family="Consolas", size=13) - ) - self.alert_details.pack(fill="both", expand=True, padx=10, pady=10) - self.alert_details.configure(state="disabled") - - # ── Trust list tab ──────────────────────────────────────────────────────── - - def _build_trust(self): - f = self.tab_trust - - # ── Trusted Devices (MAC) ── - ctk.CTkLabel(f, text="Trusted Devices (MAC)", - font=ctk.CTkFont(size=13, weight="bold")).pack(anchor="w", padx=10, pady=(8, 2)) - cols = ("mac", "label", "first_seen") - self.trust_tree = ttk.Treeview(f, columns=cols, show="headings", height=10) - for c in cols: - self.trust_tree.heading(c, text=c.upper()) - w = 280 if c == "label" else 220 - self.trust_tree.column(c, width=w, stretch=True) - self.trust_tree.pack(fill="both", expand=True, padx=8, pady=(2, 4)) - - row = ctk.CTkFrame(f) - row.pack(fill="x", padx=8, pady=(0, 6)) - ctk.CTkButton(row, text="Remove selected", command=self.remove_trust_selected).pack(side="left", padx=6) - - # ── Trusted IPs (connections) ── - ctk.CTkLabel(f, text="Trusted IPs (Connections)", - font=ctk.CTkFont(size=13, weight="bold")).pack(anchor="w", padx=10, pady=(6, 2)) - ip_cols = ("ip", "label", "first_seen", "notes") - self.trust_ip_tree = ttk.Treeview(f, columns=ip_cols, show="headings", height=10) - for c in ip_cols: - self.trust_ip_tree.heading(c, text=c.upper()) - w = 300 if c == "notes" else 200 if c == "label" else 160 - self.trust_ip_tree.column(c, width=w, stretch=(c == "notes")) - self.trust_ip_tree.pack(fill="both", expand=True, padx=8, pady=(2, 4)) - - row2 = ctk.CTkFrame(f) - row2.pack(fill="x", padx=8, pady=(0, 8)) - ctk.CTkButton(row2, text="Remove selected IP", command=self._remove_trust_ip_selected).pack(side="left", padx=6) - ctk.CTkButton(row2, text="Refresh", command=self.refresh_trust).pack(side="right", padx=6) - - # ── Threats tab ─────────────────────────────────────────────────────────── - - def _build_threats(self): - f = self.tab_threats - - # Threat gauge row - gauge_frame = ctk.CTkFrame(f, corner_radius=8) - gauge_frame.pack(fill="x", padx=8, pady=(10, 4)) - - ctk.CTkLabel(gauge_frame, text="Overall Threat Level:", - font=ctk.CTkFont(size=13, weight="bold")).pack(side="left", padx=12, pady=8) - self.threat_level_label = ctk.CTkLabel( - gauge_frame, text="LOW", - font=ctk.CTkFont(size=16, weight="bold"), - text_color="#90EE90", - ) - self.threat_level_label.pack(side="left", padx=8, pady=8) - - # Stats row - stats_frame = ctk.CTkFrame(f, fg_color="transparent") - stats_frame.pack(fill="x", padx=8, pady=2) - self.threat_stats_label = ctk.CTkLabel(stats_frame, text="", text_color="gray") - self.threat_stats_label.pack(side="left", padx=12) - - # Active threats list - cols = ("time", "severity", "category", "title") - self.threat_tree = ttk.Treeview(f, columns=cols, show="headings", height=16) - for c in cols: - self.threat_tree.heading(c, text=c.upper()) - w = 600 if c == "title" else 150 - self.threat_tree.column(c, width=w, stretch=(c == "title")) - self.threat_tree.pack(fill="both", expand=True, padx=8, pady=8) - - # Action buttons - act = ctk.CTkFrame(f, fg_color="transparent") - act.pack(fill="x", padx=8, pady=(0, 8)) - ctk.CTkButton(act, text="Kill Process", - command=self.threats_kill_process, - fg_color="#bf3a3a", hover_color="#942b2b").pack(side="left", padx=6) - ctk.CTkButton(act, text="Block IP (Firewall)", - command=self.threats_block_ip, - fg_color="#bf6d3a", hover_color="#944e2b").pack(side="left", padx=6) - ctk.CTkButton(act, text="Investigate", - command=self.threats_investigate).pack(side="left", padx=6) - ctk.CTkButton(act, text="Refresh", - command=self.refresh_threats).pack(side="right", padx=6) - - # ───────────────────────────────────────────────────────────────────────── - # Actions - # ───────────────────────────────────────────────────────────────────────── - - def scan_now(self): - try: - self.worker_q.put_nowait("scan") - except Exception: - pass - - def force_stop(self): - self.running = False - self.mon.stop_passive_sniff() - try: - self.parent.destroy() - except Exception: - pass - - def export_report(self): - path = filedialog.asksaveasfilename( - defaultextension=".json", - filetypes=[("JSON report", "*.json")], - title="Save network report", - ) - if not path: - return - report = { - "generated_at": now_ts(), - "admin": is_admin_windows(), - "scapy_available": HAS_SCAPY, - "cidr": self.cidr.get(), - "gateway": self.gateway.get(), - "local_ip": self.localip.get(), - "trusted": self.mon.trusted, - "known_devices": self.mon.known_devices, - "gateway_baseline": { - "ip": self.mon.baseline_gateway_ip, - "mac": self.mon.baseline_gateway_mac, - }, - "alerts": self.mon.alerts, - "connections_snapshot": self._last_conns, - "threat_level": self.mon.compute_threat_level(), - } - try: - with open(path, "w", encoding="utf-8") as f: - json.dump(report, f, indent=2, ensure_ascii=False) - messagebox.showinfo("Export", f"Saved:\n{path}") - except Exception as e: - messagebox.showerror("Export failed", str(e)) - - # ── Connection History persistence ────────────────────────────────────── - - def _load_history(self): - try: - if os.path.exists(self._history_path): - with open(self._history_path, "r", encoding="utf-8") as f: - self._conn_history = json.load(f) - except Exception: - self._conn_history = {} - - def _save_history(self): - try: - with open(self._history_path, "w", encoding="utf-8") as f: - json.dump(self._conn_history, f, indent=2, ensure_ascii=False) - except Exception: - pass - - def _update_history(self, conns: List[Dict]): - """Add unknown/suspicious/dangerous connections to persistent history.""" - dominated = {"unknown", "suspicious", "dangerous"} - changed = False - for c in conns: - trust = c.get("trust", "unknown") - if trust not in dominated: - continue - ip = c.get("remote_ip", "") - if not ip: - continue - if ip in self._conn_history: - entry = self._conn_history[ip] - entry["last_seen"] = now_ts() - entry["times_seen"] = entry.get("times_seen", 1) + 1 - # upgrade severity level if worse - order = {"unknown": 0, "suspicious": 1, "dangerous": 2} - if order.get(trust, 0) > order.get(entry.get("trust", "unknown"), 0): - entry["trust"] = trust - # update other fields - entry["process"] = c.get("process", entry.get("process", "?")) - entry["service"] = c.get("service", entry.get("service", "")) - entry["country"] = c.get("country", entry.get("country", "")) - entry["org"] = c.get("org", entry.get("org", "")) - entry["rep"] = c.get("rep", entry.get("rep", "")) - entry["raddr"] = c.get("raddr", entry.get("raddr", "")) - entry["laddr"] = c.get("laddr", entry.get("laddr", "")) - entry["status"] = c.get("status", entry.get("status", "")) - changed = True - else: - self._conn_history[ip] = { - "remote_ip": ip, - "trust": trust, - "process": c.get("process", "?"), - "service": c.get("service", ""), - "laddr": c.get("laddr", ""), - "raddr": c.get("raddr", ""), - "country": c.get("country", ""), - "org": c.get("org", ""), - "rep": c.get("rep", ""), - "status": c.get("status", ""), - "first_seen": now_ts(), - "last_seen": now_ts(), - "times_seen": 1, - "remote_port": c.get("remote_port", 0), - "exe": c.get("exe", ""), - } - changed = True - if changed: - self._save_history() - - def _clear_history(self): - if not messagebox.askyesno("Clear History", - "Delete all connection history entries?", - parent=self.parent): - return - self._conn_history.clear() - self._save_history() - self.refresh_history() - - def set_gateway_baseline(self): - gw = self.gateway.get().strip() - if not gw: - messagebox.showerror("Gateway baseline", "Gateway IP is empty.") - return - arps = arp_table() - mac = arps.get(gw, "") - if not mac: - messagebox.showerror("Gateway baseline", - "Could not find gateway MAC in ARP table.\nTry Scan now and retry.") - return - self.mon.set_gateway_baseline(gw, mac) - self._update_live_text() - - def trust_selected(self): - sel = self.dev_tree.selection() - if not sel: - return - iid = sel[0] - mac = self.dev_tree.set(iid, "mac").lower() - label = self.dev_tree.set(iid, "label") or "Trusted" - label = simple_prompt(self.parent, "Trust device", "Label for this device:", default=label) - if label is None: - return - self.mon.trust_mac(mac, label) - self.mon.log("INFO", "DEVICE", "Device marked as trusted", {"mac": mac, "label": label}) - self.refresh_all() - - def untrust_selected(self): - sel = self.dev_tree.selection() - if not sel: - return - iid = sel[0] - mac = self.dev_tree.set(iid, "mac").lower() - self.mon.untrust_mac(mac) - self.mon.log("INFO", "DEVICE", "Device removed from trust list", {"mac": mac}) - self.refresh_all() - - def copy_device_selected(self): - sel = self.dev_tree.selection() - if not sel: - return - iid = sel[0] - row = {c: self.dev_tree.set(iid, c) for c in ("trusted", "label", "mac", "ip", "last_seen")} - text = json.dumps(row, indent=2, ensure_ascii=False) - self.parent.clipboard_clear() - self.parent.clipboard_append(text) - self.mon.log("INFO", "SYSTEM", "Copied device to clipboard", row) - - def remove_trust_selected(self): - sel = self.trust_tree.selection() - if not sel: - return - iid = sel[0] - mac = self.trust_tree.set(iid, "mac").lower() - self.mon.untrust_mac(mac) - self.mon.log("INFO", "DEVICE", "Trust entry removed", {"mac": mac}) - self.refresh_all() - - def _remove_trust_ip_selected(self): - sel = self.trust_ip_tree.selection() - if not sel: - return - iid = sel[0] - ip = self.trust_ip_tree.set(iid, "ip") - self.mon.conn_trust.untrust_ip(ip) - self.mon.log("INFO", "CONNECTION", "Trusted IP removed", {"ip": ip}) - self.refresh_trust() - - def clear_alerts(self): - self.mon.alerts.clear() - self.mon._alert_index.clear() - self.mon._alert_last_seen.clear() - self.mon._rate_limit.clear() - for i in self.alert_tree.get_children(): - self.alert_tree.delete(i) - self.alert_details.configure(state="normal") - self.alert_details.delete("1.0", "end") - self.alert_details.configure(state="disabled") - - def alert_passes_filter(self, a: Dict) -> bool: - cat = self.alert_filter_cat.get() - sev = self.alert_filter_sev.get() - if cat != "ALL" and a.get("category") != cat: - return False - if sev != "ALL" and a.get("severity") != sev: - return False - return True - - # Connections actions ────────────────────────────────────────────────────── - - def _selected_conn(self) -> Optional[Dict]: - sel = self.conn_tree.selection() - if not sel: - return None - try: - idx = int(sel[0].split("-")[1]) - except Exception: - return None - filtered = self._filtered_conns(self.conn_filter.get()) - if idx < len(filtered): - return filtered[idx] - return None - - def trust_connection_ip(self): - conn = self._selected_conn() - if not conn: - messagebox.showinfo("Trust IP", "Please select a connection first") - return - ip = conn.get("remote_ip", "") - if ip: - self.mon.conn_trust.trust_ip(ip, "Manually trusted connection") - messagebox.showinfo("Trust IP", f"IP {ip} added to trust list") - self.refresh_connections() - - def untrust_connection_ip(self): - conn = self._selected_conn() - if not conn: - messagebox.showinfo("Untrust IP", "Please select a connection first") - return - ip = conn.get("remote_ip", "") - if ip: - self.mon.conn_trust.untrust_ip(ip) - messagebox.showinfo("Untrust IP", f"IP {ip} removed from trust list") - self.refresh_connections() - - def block_selected_ip(self): - conn = self._selected_conn() - if not conn: - messagebox.showinfo("Block IP", "Please select a connection first") - return - ip = conn.get("remote_ip", "") - if not ip: - return - if messagebox.askyesno("Block IP", f"Add Windows Firewall outbound block rule for {ip}?"): - ok = block_ip_firewall(ip) - if ok: - messagebox.showinfo("Block IP", f"Outbound block rule created for {ip}.") - self.mon.log("HIGH", "SYSTEM", "IP blocked via firewall", - {"ip": ip, "process": conn.get("process")}) - else: - messagebox.showerror("Block IP", - "netsh failed. Run as Administrator for firewall access.") - - # Threats tab actions ───────────────────────────────────────────────────── - - def _selected_threat_alert(self) -> Optional[Dict]: - sel = self.threat_tree.selection() - if not sel: - return None - try: - idx = int(sel[0].split("-")[1]) - except Exception: - return None - threats = self.mon.get_active_threats() - if idx < len(threats): - return threats[idx] - return None - - def threats_kill_process(self): - alert = self._selected_threat_alert() - if not alert: - messagebox.showinfo("Kill Process", "Select a threat first") - return - details = alert.get("details", {}) - proc_name = details.get("process", "") - pid_val = details.get("pid", 0) - - if not pid_val: - messagebox.showinfo("Kill Process", "No PID in this alert.") - return - try: - pid_val = int(pid_val) - except Exception: - messagebox.showinfo("Kill Process", "Invalid PID.") - return - - if messagebox.askyesno("Kill Process", - f"Terminate {proc_name or 'process'} (PID {pid_val})?"): - try: - psutil.Process(pid_val).kill() - messagebox.showinfo("Kill Process", f"PID {pid_val} terminated.") - self.mon.log("HIGH", "SYSTEM", "Process killed via Threats tab", - {"process": proc_name, "pid": pid_val}) - self.refresh_threats() - except Exception as e: - messagebox.showerror("Kill Process", str(e)) - - def threats_block_ip(self): - alert = self._selected_threat_alert() - if not alert: - messagebox.showinfo("Block IP", "Select a threat first") - return - details = alert.get("details", {}) - ip = details.get("remote_ip", details.get("ip", "")) - if not ip: - messagebox.showinfo("Block IP", "No IP address found in this alert.") - return - if messagebox.askyesno("Block IP", f"Add Windows Firewall outbound block for {ip}?"): - ok = block_ip_firewall(ip) - if ok: - messagebox.showinfo("Block IP", f"Block rule created for {ip}.") - self.mon.log("HIGH", "SYSTEM", "IP blocked via Threats tab", {"ip": ip}) - self.refresh_threats() - else: - messagebox.showerror("Block IP", - "netsh failed. Run as Administrator for firewall access.") - - def threats_investigate(self): - alert = self._selected_threat_alert() - if not alert: - messagebox.showinfo("Investigate", "Select a threat first") - return - text = json.dumps(alert, indent=2, ensure_ascii=False) - win = ctk.CTkToplevel(self.parent) - win.title("Threat Investigation") - win.geometry("600x400") - tb = ctk.CTkTextbox(win, wrap="word", font=ctk.CTkFont(family="Consolas", size=12)) - tb.pack(fill="both", expand=True, padx=8, pady=8) - tb.insert("1.0", text) - tb.configure(state="disabled") - ctk.CTkButton(win, text="Close", command=win.destroy).pack(pady=8) - - # ───────────────────────────────────────────────────────────────────────── - # Background worker - # ───────────────────────────────────────────────────────────────────────── - - def _worker_loop(self): - while self.running: - try: - job = self.worker_q.get(timeout=0.2) - except queue.Empty: - continue - - if job == "scan": - try: - cidr = self.cidr.get().strip() or None - gw = self.gateway.get().strip() or None - active = bool(self.active_scan.get()) - - mac_ip = self.mon.discover_devices(cidr, active=active) - self.mon.analyze_devices(mac_ip, gw_ip=gw) - - conns = self.mon.snapshot_outbound() - self._last_conns = conns - self._last_scan_ts = now_ts() - - # Accumulate unknown/suspicious/dangerous to persistent history - self._update_history(conns) - - if len(conns) > 80: - self.mon.log("INFO", "OUTBOUND", "High number of active connections", - {"count": len(conns)}) - - # Advanced threat scans (rate-limited internally) - self.mon._detect_advanced_threats(conns) - - # Background reputation checks (hybrid: unknown/suspicious/dangerous) - self.mon.check_reputations_background(conns) - - except Exception as e: - self.mon.log("WARN", "SYSTEM", "Scan error", {"error": str(e)}) - - def _ensure_passive(self): - if self.passive_scan.get(): - self.mon.start_passive_sniff() - - def _ui_tick(self): - if self.running: - self.scan_now() - self.refresh_all() - self._update_live_text() - try: - interval = max(1000, int(self.refresh_ms.get())) - except ValueError: - interval = 3500 - self.after(interval, self._ui_tick) - - # ───────────────────────────────────────────────────────────────────────── - # View refresh - # ───────────────────────────────────────────────────────────────────────── - - def refresh_all(self, force: bool = False): - # Always refresh the summary dashboard - self._update_live_text() - - if force: - self.refresh_devices() - self.refresh_connections() - self.refresh_history() - self.refresh_alerts() - self.refresh_trust() - self.refresh_threats() - return - - # Only refresh the currently visible tab for performance - active = self.nb.get() - if active == "Devices": - self.refresh_devices() - elif active == "Connections": - self.refresh_connections() - elif active == "History": - self.refresh_history() - elif active == "Alerts": - self.refresh_alerts() - elif active == "Trust list": - self.refresh_trust() - elif active == "Threats": - self.refresh_threats() - - def refresh_devices(self): - for i in self.dev_tree.get_children(): - self.dev_tree.delete(i) - items = [ - (mac, info) - for mac, info in (self.mon.known_devices or {}).items() - if not is_noisy_mac(mac) - ] - items.sort( - key=lambda x: (1 if x[0] in self.mon.trusted else 0, x[1].get("last_seen", "")), - reverse=True, - ) - for mac, info in items[:900]: - trusted = "YES" if mac in self.mon.trusted else "" - label = self.mon.trusted.get(mac, {}).get("label", "") if mac in self.mon.trusted else "" - self.dev_tree.insert("", "end", iid=f"dev-{mac}", - values=(trusted, label, mac, info.get("last_ip", ""), - info.get("last_seen", ""))) - self.dev_summary.configure( - text=f"Devices: {len(items)} (Trusted: {len(self.mon.trusted)})" - ) - - def refresh_connections(self): - for i in self.conn_tree.get_children(): - self.conn_tree.delete(i) - filter_val = self.conn_filter.get() - filtered = self._filtered_conns(filter_val) - for idx, c in enumerate(filtered[:800]): - trust = c.get("trust", "unknown") - org = c.get("org", "") - if len(org) > 35: - org = org[:32] + "..." - iid = f"c-{idx}" - rep = c.get("rep", REP_UNCHECKED) - self.conn_tree.insert( - "", "end", iid=iid, - values=( - trust.upper(), - rep, - c.get("process", "?"), - c.get("service", ""), - c.get("laddr", ""), - c.get("raddr", ""), - c.get("country", ""), - org, - c.get("status", ""), - ), - tags=(trust,), - ) - self.conn_tree.tag_configure(trust, foreground=TRUST_COLORS.get(trust, "white")) - - def refresh_alerts(self): - for i in self.alert_tree.get_children(): - self.alert_tree.delete(i) - recent = list(reversed(self.mon.alerts[-800:])) - idx = 0 - for a in recent: - if not self.alert_passes_filter(a): - continue - title = a.get("title", "") - cnt = int(a.get("details", {}).get("_count", 1)) - if cnt > 1: - title = f"{title} (x{cnt})" - self.alert_tree.insert("", "end", iid=f"a-{idx}", - values=(a.get("timestamp", ""), a.get("severity", ""), - a.get("category", ""), title)) - idx += 1 - - def show_alert_details(self): - sel = self.alert_tree.selection() - if not sel: - return - try: - idx = int(sel[0].split("-")[1]) - except Exception: - return - recent = list(reversed(self.mon.alerts[-800:])) - filtered = [a for a in recent if self.alert_passes_filter(a)] - if idx < 0 or idx >= len(filtered): - return - text = json.dumps(filtered[idx], indent=2, ensure_ascii=False) - self.alert_details.configure(state="normal") - self.alert_details.delete("1.0", "end") - self.alert_details.insert("1.0", text) - self.alert_details.configure(state="disabled") - - def refresh_history(self): - for i in self.hist_tree.get_children(): - self.hist_tree.delete(i) - filt = self.hist_filter.get().lower() - trust_order = {"dangerous": 0, "suspicious": 1, "unknown": 2} - entries = sorted( - self._conn_history.values(), - key=lambda e: (trust_order.get(e.get("trust", "unknown"), 2), e.get("last_seen", "")), - ) - for idx, e in enumerate(entries): - trust = e.get("trust", "unknown") - if filt != "all" and trust != filt: - continue - org = e.get("org", "") - if len(org) > 35: - org = org[:32] + "..." - iid = f"h-{idx}" - self.hist_tree.insert( - "", "end", iid=iid, - values=( - trust.upper(), - e.get("rep", ""), - e.get("remote_ip", ""), - e.get("process", "?"), - e.get("service", ""), - e.get("raddr", ""), - e.get("country", ""), - org, - e.get("first_seen", ""), - e.get("last_seen", ""), - e.get("times_seen", 1), - ), - tags=(trust,), - ) - self.hist_tree.tag_configure(trust, foreground=TRUST_COLORS.get(trust, "white")) - - def refresh_trust(self): - # Device trust (MAC) - for i in self.trust_tree.get_children(): - self.trust_tree.delete(i) - for mac, info in sorted(self.mon.trusted.items()): - self.trust_tree.insert("", "end", iid=f"t-{mac}", - values=(mac, info.get("label", ""), info.get("first_seen", ""))) - # Connection trust (IP) - for i in self.trust_ip_tree.get_children(): - self.trust_ip_tree.delete(i) - for ip, info in sorted(self.mon.conn_trust.trusted_ips.items()): - self.trust_ip_tree.insert("", "end", iid=f"tip-{ip}", - values=(ip, info.get("label", ""), - info.get("first_seen", ""), - info.get("notes", ""))) - - def refresh_threats(self): - for i in self.threat_tree.get_children(): - self.threat_tree.delete(i) - - level = self.mon.compute_threat_level() - color = THREAT_LEVEL_COLORS.get(level, "white") - self.threat_level_label.configure(text=level, text_color=color) - - susp = sum(1 for c in self._last_conns if c.get("trust") in ("suspicious", "dangerous")) - high_alerts = sum(1 for a in self.mon.alerts[-200:] if a.get("severity") == "HIGH") - self.threat_stats_label.configure( - text=f"Suspicious/Dangerous connections: {susp} | " - f"HIGH alerts (recent): {high_alerts} | " - f"Last scan: {self._last_scan_ts or '(pending)'}" - ) - - threats = self.mon.get_active_threats() - for idx, a in enumerate(threats[:200]): - self.threat_tree.insert( - "", "end", iid=f"th-{idx}", - values=(a.get("timestamp", ""), a.get("severity", ""), - a.get("category", ""), a.get("title", "")), - tags=(a.get("severity", ""),), - ) - self.threat_tree.tag_configure("HIGH", foreground="#FF4444") - self.threat_tree.tag_configure("WARN", foreground="#FFA500") - self.threat_tree.tag_configure("INFO", foreground="#90EE90") - - -# ───────────────────────────────────────────────────────────────────────────── -# Toolbox entry point -# ───────────────────────────────────────────────────────────────────────────── - -def run_tool(): - try: - if tk._default_root is None: - root = ctk.CTkToplevel() - root.withdraw() - app = App(root) - root.protocol("WM_DELETE_WINDOW", app.force_stop) - root.mainloop() - else: - win = ctk.CTkToplevel(tk._default_root) - app = App(win) - win.protocol("WM_DELETE_WINDOW", app.force_stop) - except Exception as e: - try: - messagebox.showerror("Network Intrusion Detector Pro", f"Startup error:\n{e}") - except Exception: - pass - - -if __name__ == "__main__": - run_tool() diff --git a/portable/security_audit.py b/portable/security_audit.py deleted file mode 100644 index 01f9c5c..0000000 --- a/portable/security_audit.py +++ /dev/null @@ -1,1836 +0,0 @@ -""" -Security Audit Tool — Comprehensive device & network security checker. -Scans 10 categories: Startup, Processes, Ports/Firewall, Filesystem, -DNS/Network, Accounts, Wi-Fi, USB/Hardware, Browser, Event Logs. -""" - -import os, sys, re, json, csv, io, time, hashlib, socket, threading, queue, subprocess -from datetime import datetime, timedelta -from dataclasses import dataclass, field, asdict -from typing import List, Dict, Tuple, Optional, Any -from concurrent.futures import ThreadPoolExecutor, as_completed -from pathlib import Path - -try: - import winreg -except ImportError: - winreg = None - -try: - import psutil -except ImportError: - psutil = None - -try: - import customtkinter as ctk - from tkinter import ttk - import tkinter as tk - HAS_CTK = True -except ImportError: - HAS_CTK = False - import tkinter as tk - from tkinter import ttk - -TOOL_NAME = "Security Audit" -TOOL_DESCRIPTION = "Comprehensive security audit — checks startup, processes, ports, files, DNS, accounts, Wi-Fi, USB, browser, event logs" - -_CREATE_NO_WINDOW = 0x08000000 - -# ─────────────────────────── helpers ─────────────────────────── - -def now_ts() -> str: - return datetime.now().strftime("%Y-%m-%d %H:%M:%S") - -def safe_run(cmd: List[str], timeout: int = 30) -> Tuple[int, str, str]: - try: - cp = subprocess.run(cmd, capture_output=True, text=True, errors="replace", - timeout=timeout, shell=False, - creationflags=_CREATE_NO_WINDOW) - return cp.returncode, cp.stdout, cp.stderr - except subprocess.TimeoutExpired: - return 1, "", "timeout" - except Exception as e: - return 1, "", str(e) - -def is_admin() -> bool: - try: - import ctypes - return ctypes.windll.shell32.IsUserAnAdmin() != 0 - except Exception: - return False - -def _age_days(path: str) -> float: - try: - return (time.time() - os.path.getmtime(path)) / 86400 - except Exception: - return 9999 - -SAFE_PROCESS_PATHS = [ - "\\microsoft\\", "\\windows defender\\", "\\windows\\system32\\", - "\\program files\\", "\\program files (x86)\\", -] - -def _is_suspicious_path(p: str) -> bool: - if not p: - return False - low = p.lower() - # Known safe paths - if any(s in low for s in SAFE_PROCESS_PATHS): - return False - suspicious = ["\\temp\\", "\\tmp\\", "\\downloads\\", "\\appdata\\local\\temp\\", - "\\public\\"] - return any(s in low for s in suspicious) - -# ─────────────────────────── Finding ─────────────────────────── - -@dataclass -class Finding: - category: str - severity: str # INFO, WARN, CRITICAL - title: str - detail: str = "" - remediation: str = "" - raw_data: dict = field(default_factory=dict) - - def key(self) -> str: - data = f"{self.category}|{self.title}|{self.detail[:120]}" - return hashlib.sha256(data.encode()).hexdigest()[:16] - -# ─────────────────────────── Constants ─────────────────────────── - -CATEGORIES = [ - ("Startup & Persistence", "startup", "🔄"), - ("Process Analysis", "processes", "⚙️"), - ("Ports & Firewall", "ports", "🛡️"), - ("File System", "filesystem","📁"), - ("DNS & Network", "dns", "🌐"), - ("Account Security", "accounts", "👤"), - ("Wi-Fi Security", "wifi", "📶"), - ("USB & Hardware", "usb", "🔌"), - ("Browser Security", "browser", "🌍"), - ("Event Logs", "eventlogs", "📋"), -] - -SYSTEM_PROCESS_PATHS = { - "svchost.exe": r"c:\windows\system32\svchost.exe", - "csrss.exe": r"c:\windows\system32\csrss.exe", - "lsass.exe": r"c:\windows\system32\lsass.exe", - "services.exe": r"c:\windows\system32\services.exe", - "smss.exe": r"c:\windows\system32\smss.exe", - "wininit.exe": r"c:\windows\system32\wininit.exe", - "winlogon.exe": r"c:\windows\system32\winlogon.exe", - "explorer.exe": r"c:\windows\explorer.exe", - "dwm.exe": r"c:\windows\system32\dwm.exe", - "taskhostw.exe": r"c:\windows\system32\taskhostw.exe", - "runtimebroker.exe": r"c:\windows\system32\runtimebroker.exe", - "conhost.exe": r"c:\windows\system32\conhost.exe", - "spoolsv.exe": r"c:\windows\system32\spoolsv.exe", - "dllhost.exe": r"c:\windows\system32\dllhost.exe", -} - -KNOWN_DNS = { - "8.8.8.8", "8.8.4.4", # Google - "1.1.1.1", "1.0.0.1", # Cloudflare - "9.9.9.9", "149.112.112.112", # Quad9 - "208.67.222.222", "208.67.220.220", # OpenDNS - "76.76.2.0", "76.76.10.0", # Control D - "94.140.14.14", "94.140.15.15", # AdGuard -} - -BACKDOOR_PORTS = {4444, 5555, 31337, 12345, 6666, 6667, 1337, 9999, 8888} - -SUSPICIOUS_LISTEN_PORTS = { - 3389: "RDP (Remote Desktop)", - 5900: "VNC", - 5800: "VNC HTTP", - 22: "SSH", - 23: "Telnet", - 445: "SMB (check if expected)", - 135: "RPC", - 139: "NetBIOS", -} - -HACKING_USB_KEYWORDS = [ - "rubber ducky", "bash bunny", "flipper", "hak5", "badusb", - "o.mg", "lan turtle", "wifi pineapple", "usb armory", - "teensy", "digispark", "attiny85", -] - -KNOWN_ROOT_CA_KEYWORDS = [ - "digicert", "globalsign", "comodo", "sectigo", "godaddy", "go daddy", - "entrust", "verisign", "thawte", "geotrust", "rapidssl", - "starfield", "amazon", "microsoft", "apple", "google", - "isrg", "let's encrypt", "usertrust", "baltimore", - "identrust", "certsign", "actalis", "buypass", "certum", - "dfn-verein", "hellenic", "secom", "trust", "root", - "equifax", "swisssign", "ec-acc", "catalanes", - "quovadis", "networksolutions", "keynectis", "state of", - "d-trust", "t-telesec", "chambers of commerce", - "blizzard", "battle.net", # Gaming (Blizzard installs local cert for Battle.net) - "valve", "steam", # Gaming - "nvidia", "amd", "intel", # Hardware vendors - "cisco", "fortinet", "zscaler",# Enterprise security - "symantec", "norton", # Security vendors -] - -# ─────────────────────────── Engine ─────────────────────────── - -class SecurityAuditEngine: - def __init__(self, state_path: str): - self.state_path = state_path - self.state = self._load_state() - self._progress_cb = None - - def _load_state(self) -> dict: - try: - with open(self.state_path, "r") as f: - return json.load(f) - except Exception: - return {"baseline": None, "last_scan": None, "last_findings": []} - - def save_state(self): - try: - with open(self.state_path, "w") as f: - json.dump(self.state, f, indent=2, default=str) - except Exception: - pass - - def save_baseline(self, findings: List[Finding]): - self.state["baseline"] = { - "saved_at": now_ts(), - "finding_keys": [f.key() for f in findings], - "findings": [asdict(f) for f in findings], - } - self.save_state() - - def clear_baseline(self): - self.state["baseline"] = None - self.save_state() - - def get_baseline_keys(self) -> set: - bl = self.state.get("baseline") - if not bl: - return set() - return set(bl.get("finding_keys", [])) - - def get_checks(self): - return [ - ("startup", self.check_startup), - ("processes", self.check_processes), - ("ports", self.check_ports_firewall), - ("filesystem", self.check_filesystem), - ("dns", self.check_dns_network), - ("accounts", self.check_accounts), - ("wifi", self.check_wifi), - ("usb", self.check_usb_hardware), - ("browser", self.check_browser), - ("eventlogs", self.check_event_logs), - ] - - # ────────── 1. Startup & Persistence ────────── - - def check_startup(self) -> List[Finding]: - findings = [] - if not winreg: - findings.append(Finding("startup", "INFO", "Registry not available", - "winreg module not found (non-Windows)")) - return findings - - # Registry Run keys — only report suspicious, summarize rest - run_keys = [ - (winreg.HKEY_LOCAL_MACHINE, r"SOFTWARE\Microsoft\Windows\CurrentVersion\Run", "HKLM\\Run"), - (winreg.HKEY_LOCAL_MACHINE, r"SOFTWARE\Microsoft\Windows\CurrentVersion\RunOnce", "HKLM\\RunOnce"), - (winreg.HKEY_CURRENT_USER, r"SOFTWARE\Microsoft\Windows\CurrentVersion\Run", "HKCU\\Run"), - (winreg.HKEY_CURRENT_USER, r"SOFTWARE\Microsoft\Windows\CurrentVersion\RunOnce", "HKCU\\RunOnce"), - (winreg.HKEY_LOCAL_MACHINE, r"SOFTWARE\WOW6432Node\Microsoft\Windows\CurrentVersion\Run", "HKLM\\Run(32)"), - ] - - reg_total = 0 - reg_suspicious = [] - reg_names = [] - for hive, path, label in run_keys: - try: - key = winreg.OpenKey(hive, path, 0, winreg.KEY_READ) - i = 0 - while True: - try: - name, value, _ = winreg.EnumValue(key, i) - exe_path = value.strip('"').split('"')[0].strip() - reg_total += 1 - reg_names.append(name) - - if _is_suspicious_path(exe_path): - reg_suspicious.append((name, value, label)) - i += 1 - except OSError: - break - winreg.CloseKey(key) - except (OSError, PermissionError): - pass - - # Report suspicious individually, summarize the rest - for name, value, label in reg_suspicious: - findings.append(Finding("startup", "CRITICAL", - f"Suspicious startup entry: {name}", - f"Location: {label}\nCommand: {value}\nPath is in a suspicious directory.", - "Investigate this entry. Remove if not recognized.")) - - safe_count = reg_total - len(reg_suspicious) - if safe_count > 0: - findings.append(Finding("startup", "INFO", - f"Registry startup entries: {safe_count} (all safe)", - "Programs: " + ", ".join(reg_names[:15]) + - (f" (+{len(reg_names)-15} more)" if len(reg_names) > 15 else ""))) - - # Startup folders — summarize - startup_dirs = [] - appdata = os.environ.get("APPDATA", "") - if appdata: - startup_dirs.append(os.path.join(appdata, - r"Microsoft\Windows\Start Menu\Programs\Startup")) - startup_dirs.append( - r"C:\ProgramData\Microsoft\Windows\Start Menu\Programs\Startup") - - exec_exts = {".exe", ".bat", ".cmd", ".vbs", ".ps1", ".scr", ".pif", ".com", ".js", ".wsh"} - folder_items = [] - for d in startup_dirs: - if not os.path.isdir(d): - continue - try: - for f in os.listdir(d): - fp = os.path.join(d, f) - ext = os.path.splitext(f)[1].lower() - if ext in exec_exts or ext == ".lnk": - age = _age_days(fp) - if age < 7 and ext in exec_exts: - findings.append(Finding("startup", "CRITICAL", - f"Recently added startup executable: {f}", - f"Path: {fp}\nAge: {age:.0f} days", - "Review this file. Remove if not recognized.")) - else: - folder_items.append(f) - except PermissionError: - pass - - if folder_items: - findings.append(Finding("startup", "INFO", - f"Startup folder: {len(folder_items)} items", - "Items: " + ", ".join(folder_items[:10]))) - - # Scheduled Tasks — fix CSV parsing, summarize normal tasks - suspicious_tasks = [] - normal_task_count = 0 - try: - rc, out, _ = safe_run(["schtasks", "/query", "/fo", "CSV", "/v"], timeout=20) - if rc == 0 and out.strip(): - # Fix CSV parsing — skip lines that don't look like data - reader = csv.DictReader(io.StringIO(out)) - seen_tasks = set() - for row in reader: - try: - task_name = row.get("TaskName", "").strip() - task_run = row.get("Task To Run", "").strip() - author = row.get("Author", "").strip() - - # Skip empty, header artifacts, and Microsoft tasks - if not task_name or task_name == "TaskName": - continue - if "\\Microsoft\\" in task_name: - continue - # Deduplicate (same task appears multiple times for different triggers) - if task_name in seen_tasks: - continue - seen_tasks.add(task_name) - - if _is_suspicious_path(task_run): - suspicious_tasks.append((task_name, task_run, author)) - else: - normal_task_count += 1 - except Exception: - continue - except Exception: - findings.append(Finding("startup", "INFO", "Could not enumerate scheduled tasks", - "schtasks command failed")) - - for task_name, task_run, author in suspicious_tasks: - findings.append(Finding("startup", "CRITICAL", - f"Suspicious scheduled task: {task_name}", - f"Command: {task_run}\nAuthor: {author}", - "Investigate this scheduled task. Delete if not recognized.")) - - if normal_task_count > 0: - findings.append(Finding("startup", "INFO", - f"Scheduled tasks: {normal_task_count} non-Microsoft tasks (all safe paths)", - "All scheduled task executables are in expected locations.")) - - return findings - - # ────────── 2. Process Analysis ────────── - - def check_processes(self) -> List[Finding]: - findings = [] - if not psutil: - findings.append(Finding("processes", "INFO", "psutil not available")) - return findings - - checked_exes = set() - suspicious_count = 0 - - for proc in psutil.process_iter(['pid', 'name', 'exe', 'username', 'create_time']): - try: - info = proc.info - name = (info.get('name') or '').lower() - exe = info.get('exe') or '' - username = info.get('username') or '' - pid = info.get('pid', 0) - - if not exe or pid <= 4: - continue - - # Check for system process name spoofing - if name in SYSTEM_PROCESS_PATHS: - expected = SYSTEM_PROCESS_PATHS[name] - actual = exe.lower().replace("/", "\\") - if actual != expected and not actual.endswith("\\" + name): - findings.append(Finding("processes", "CRITICAL", - f"⚠️ FAKE SYSTEM PROCESS: {name} (PID {pid})", - f"Expected: {expected}\nActual: {exe}\nUser: {username}\n" - "This process is impersonating a Windows system process!", - "IMMEDIATELY investigate. This is a strong indicator of malware.")) - continue - - # Check for processes in suspicious locations - if _is_suspicious_path(exe): - suspicious_count += 1 - if suspicious_count <= 30: # cap findings - findings.append(Finding("processes", "WARN", - f"Process from suspicious path: {info.get('name', name)}", - f"PID: {pid}\nPath: {exe}\nUser: {username}", - "Investigate if this process is legitimate.")) - - # Check digital signature for suspicious processes (rate limited) - if _is_suspicious_path(exe) and exe not in checked_exes and len(checked_exes) < 10: - checked_exes.add(exe) - try: - rc, out, _ = safe_run([ - "powershell", "-NoProfile", "-Command", - f"(Get-AuthenticodeSignature -FilePath '{exe}').Status" - ], timeout=5) - if rc == 0 and "NotSigned" in out: - findings.append(Finding("processes", "WARN", - f"Unsigned executable: {os.path.basename(exe)}", - f"Path: {exe}\nDigital signature: Not signed", - "Unsigned executables from unusual locations should be investigated.")) - except Exception: - pass - - except (psutil.NoSuchProcess, psutil.AccessDenied, psutil.ZombieProcess): - continue - - if suspicious_count == 0: - findings.append(Finding("processes", "INFO", - "All processes running from expected locations", - "No processes found running from suspicious directories.")) - - return findings - - # ────────── 3. Ports & Firewall ────────── - - def check_ports_firewall(self) -> List[Finding]: - findings = [] - if not psutil: - findings.append(Finding("ports", "INFO", "psutil not available")) - return findings - - # Listening ports — deduplicate by port+process (IPv4/IPv6 share same port) - listeners = [] - seen_ports = {} # port -> (pname, pid, addresses) - try: - for conn in psutil.net_connections(kind='inet'): - if conn.status == 'LISTEN': - port = conn.laddr.port - addr = conn.laddr.ip - pid = conn.pid - pname = "" - try: - pname = psutil.Process(pid).name() if pid else "unknown" - except Exception: - pname = f"PID {pid}" - - listeners.append((port, addr, pname, pid)) - key = (port, pname) - if key not in seen_ports: - seen_ports[key] = {"pid": pid, "addrs": []} - seen_ports[key]["addrs"].append(addr) - except (PermissionError, psutil.AccessDenied): - findings.append(Finding("ports", "INFO", - "Limited port scan (no admin)", "Run as admin for full port enumeration.")) - - # Report each unique port+process once - reported_ports = set() - for (port, pname), info in seen_ports.items(): - if port in reported_ports: - continue - reported_ports.add(port) - addrs = ", ".join(info["addrs"]) - - if port in BACKDOOR_PORTS: - findings.append(Finding("ports", "CRITICAL", - f"⚠️ BACKDOOR PORT OPEN: {port}", - f"Process: {pname} (PID {info['pid']})\nListening on: {addrs}\n" - "This port is commonly used by backdoors and remote access trojans!", - "Investigate immediately. Kill the process and check its executable.")) - elif port in SUSPICIOUS_LISTEN_PORTS: - desc = SUSPICIOUS_LISTEN_PORTS[port] - findings.append(Finding("ports", "WARN", - f"Sensitive port: {port} ({desc}) — {pname}", - f"PID: {info['pid']}\nAddresses: {addrs}\n" - f"This is a standard Windows service. Expected on most PCs.", - f"Only concerning if you don't expect {desc} on this machine.")) - - normal_ports = len(seen_ports) - len([p for p in reported_ports - if p in BACKDOOR_PORTS or p in SUSPICIOUS_LISTEN_PORTS]) - if normal_ports > 0: - findings.append(Finding("ports", "INFO", - f"Open ports: {len(seen_ports)} unique ({normal_ports} normal, " - f"{len(reported_ports & set(SUSPICIOUS_LISTEN_PORTS.keys()))} system services)", - "No backdoor ports detected.")) - - # RDP check - if winreg: - try: - key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, - r"SYSTEM\CurrentControlSet\Control\Terminal Server", 0, winreg.KEY_READ) - deny, _ = winreg.QueryValueEx(key, "fDenyTSConnections") - winreg.CloseKey(key) - if deny == 0: - findings.append(Finding("ports", "WARN", - "Remote Desktop (RDP) is ENABLED", - "RDP allows remote access to this computer.\n" - "If you don't use RDP, this is a security risk.", - "Disable RDP: Settings → System → Remote Desktop → Off")) - except Exception: - pass - - # Firewall rules (inbound allow from any) - try: - rc, out, _ = safe_run(["netsh", "advfirewall", "firewall", "show", "rule", - "name=all", "dir=in"], timeout=15) - if rc == 0: - suspicious_rules = [] - current_rule = {} - for line in out.splitlines(): - line = line.strip() - if line.startswith("Rule Name:"): - if current_rule: - # Check previous rule - if (current_rule.get("action") == "Allow" and - current_rule.get("remote") in ("Any", "*") and - current_rule.get("enabled") == "Yes"): - name = current_rule.get("name", "") - if not any(k in name.lower() for k in ["core networking", "windows", - "microsoft", "wsl", "hyper-v", "delivery optimization"]): - suspicious_rules.append(name) - current_rule = {"name": line.split(":", 1)[1].strip()} - elif line.startswith("Enabled:"): - current_rule["enabled"] = line.split(":", 1)[1].strip() - elif line.startswith("Action:"): - current_rule["action"] = line.split(":", 1)[1].strip() - elif line.startswith("RemoteIP:"): - current_rule["remote"] = line.split(":", 1)[1].strip() - - # Deduplicate rules by name (same app often has IPv4+IPv6 rules) - from collections import Counter - rule_counts = Counter(suspicious_rules) - if rule_counts: - # Only flag as WARN if few rules, INFO if many (normal for dev machines) - sev = "INFO" if len(rule_counts) > 20 else "WARN" - top_apps = "\n".join(f" • {name} ({count} rules)" - for name, count in rule_counts.most_common(10)) - findings.append(Finding("ports", sev, - f"Firewall: {len(rule_counts)} apps with inbound allow-any rules", - f"Top apps accepting inbound connections:\n{top_apps}" + - (f"\n ... and {len(rule_counts)-10} more" if len(rule_counts) > 10 else ""), - "Normal for dev/gaming PCs. Review unused apps in Windows Firewall.")) - except Exception: - pass - - return findings - - # ────────── 4. File System ────────── - - def check_filesystem(self) -> List[Finding]: - findings = [] - - exec_exts = {".exe", ".dll", ".scr", ".bat", ".cmd", ".ps1", ".vbs", ".js", ".wsh", ".pif", ".com"} - # Double extension pattern — but exclude known safe patterns like VC_redist.x86.exe - double_ext_pat = re.compile(r'\.\w{2,5}\.(exe|scr|bat|cmd|pif|com|vbs|js|wsh|ps1)$', re.I) - safe_double_ext = re.compile(r'\.(x86|x64|arm64|win32|win64|setup|install|update|patch|redist)\.(exe|msi)$', re.I) - - # Scan suspicious directories for recent executables - scan_dirs = [] - for env in ["TEMP", "TMP"]: - d = os.environ.get(env) - if d and os.path.isdir(d): - scan_dirs.append(d) - localtemp = os.path.join(os.environ.get("LOCALAPPDATA", ""), "Temp") - if os.path.isdir(localtemp) and localtemp not in scan_dirs: - scan_dirs.append(localtemp) - for d in [os.environ.get("PROGRAMDATA", r"C:\ProgramData")]: - if d and os.path.isdir(d): - scan_dirs.append(d) - - recent_exes = [] - for base_dir in scan_dirs: - try: - for root, dirs, files in os.walk(base_dir): - # Max depth 3 - depth = root.replace(base_dir, "").count(os.sep) - if depth > 3: - dirs.clear() - continue - for f in files: - try: - ext = os.path.splitext(f)[1].lower() - except Exception: - continue - if ext in exec_exts: - try: - fp = os.path.join(root, f) - # Ensure safe string representation - fp_safe = fp.encode("utf-8", errors="replace").decode("utf-8") - f_safe = f.encode("utf-8", errors="replace").decode("utf-8") - except Exception: - continue - age = _age_days(fp) - if age < 7: - recent_exes.append((fp_safe, age, f_safe)) - # Double extension check (skip known safe patterns) - if double_ext_pat.search(f) and not safe_double_ext.search(f): - findings.append(Finding("filesystem", "CRITICAL", - f"⚠️ DOUBLE EXTENSION: {f_safe}", - f"Path: {fp_safe}\nThis file has a deceptive double extension, " - "commonly used by malware to disguise executables as documents.", - "Delete this file immediately unless you are certain it's safe.")) - except PermissionError: - continue - - # Deduplicate by filename and filter known safe patterns - safe_temp_patterns = [ - "mcp_dynamic_", # Claude Code MCP server DLLs - "vscode-", # VS Code extensions - "ngen_service", # .NET Native Image Generator - "dotnet-", # .NET SDK - "msbuild", # Visual Studio build - ] - seen_names = set() - deduped = [] - safe_skipped = 0 - for fp, age, fname in recent_exes: - if fname.lower() in seen_names: - continue - seen_names.add(fname.lower()) - if any(p in fname.lower() for p in safe_temp_patterns): - safe_skipped += 1 - continue - deduped.append((fp, age, fname)) - - for fp, age, fname in deduped[:15]: - findings.append(Finding("filesystem", "WARN", - f"Recent executable: {fname}", - f"Path: {fp}\nAge: {age:.1f} days\nFound in temporary/data directory.", - "Investigate if this file is legitimate.")) - - if safe_skipped: - findings.append(Finding("filesystem", "INFO", - f"Skipped {safe_skipped} known-safe temp files", - "Claude MCP DLLs, VS Code extensions, and .NET build files are expected.")) - - if not recent_exes: - findings.append(Finding("filesystem", "INFO", - "No recent executables in temp directories", - "No suspicious executable files found in Temp/AppData/ProgramData.")) - - # Hosts file check - hosts_path = r"C:\Windows\System32\drivers\etc\hosts" - try: - with open(hosts_path, "r", errors="replace") as f: - hosts_content = f.read() - custom_entries = [] - for line in hosts_content.splitlines(): - line = line.strip() - if not line or line.startswith("#"): - continue - if "localhost" in line.lower() and ("127.0.0.1" in line or "::1" in line): - continue - custom_entries.append(line) - - if custom_entries: - # Classify entries: blocking (127.0.0.1/0.0.0.0 → block) vs redirecting - blocking = [e for e in custom_entries if e.startswith("127.0.0.1") or e.startswith("0.0.0.0")] - redirecting = [e for e in custom_entries if e not in blocking] - - # Check for hijacking of major domains (only in redirects, not blocks) - hijacked = [e for e in redirecting if any( - d in e.lower() for d in ["google", "microsoft", "facebook", "apple", - "amazon", "bank", "paypal", "login"])] - if hijacked: - findings.append(Finding("filesystem", "CRITICAL", - "⚠️ HOSTS FILE HIJACKING DETECTED", - f"Major domains redirected to unknown IPs:\n" + - "\n".join(hijacked[:10]), - "Your hosts file has been tampered with! Remove suspicious entries.")) - elif blocking and not redirecting: - # All entries are 127.0.0.1 blocks — this is ad-blocking or license enforcement - findings.append(Finding("filesystem", "INFO", - f"Hosts file: {len(blocking)} blocked domains", - "Blocked domains (127.0.0.1):\n" + - "\n".join(f" • {e.split(None, 1)[1] if len(e.split(None, 1)) > 1 else e}" for e in blocking[:10]) + - "\n\nThese are intentional blocks (ad-blocking, license enforcement, etc). Normal.")) - else: - findings.append(Finding("filesystem", "WARN", - f"Custom hosts file entries ({len(custom_entries)})", - "Entries:\n" + "\n".join(custom_entries[:10]), - "Review these entries.")) - else: - findings.append(Finding("filesystem", "INFO", - "Hosts file clean", "No custom entries in hosts file.")) - except PermissionError: - findings.append(Finding("filesystem", "INFO", - "Cannot read hosts file", "Requires admin privileges.")) - - return findings - - # ────────── 5. DNS & Network ────────── - - def check_dns_network(self) -> List[Finding]: - findings = [] - - # Get current DNS servers - rc, out, _ = safe_run(["ipconfig", "/all"], timeout=10) - dns_servers = [] - if rc == 0: - in_dns = False - for line in out.splitlines(): - stripped = line.strip() - if "dns servers" in stripped.lower() or "dns server" in stripped.lower(): - in_dns = True - parts = stripped.split(":", 1) - if len(parts) > 1: - ip = parts[1].strip() - if re.match(r'\d+\.\d+\.\d+\.\d+', ip): - dns_servers.append(ip) - elif in_dns and re.match(r'^\s+\d+\.\d+\.\d+\.\d+', line): - dns_servers.append(line.strip()) - else: - in_dns = False - - for dns in set(dns_servers): - if dns.startswith("127.") or dns.startswith("192.168.") or dns.startswith("10."): - findings.append(Finding("dns", "INFO", - f"DNS server: {dns} (local/router)", "Using local DNS resolver.")) - elif dns in KNOWN_DNS: - findings.append(Finding("dns", "INFO", - f"DNS server: {dns} (known public DNS)", "Recognized public DNS provider.")) - else: - findings.append(Finding("dns", "WARN", - f"Unknown DNS server: {dns}", - "This DNS server is not a recognized public provider.\n" - "Could be your ISP's DNS or potentially hijacked.", - "Consider switching to a known DNS (8.8.8.8, 1.1.1.1, 9.9.9.9).")) - - # Proxy settings - if winreg: - try: - key = winreg.OpenKey(winreg.HKEY_CURRENT_USER, - r"Software\Microsoft\Windows\CurrentVersion\Internet Settings", - 0, winreg.KEY_READ) - proxy_enable, _ = winreg.QueryValueEx(key, "ProxyEnable") - if proxy_enable: - proxy_server, _ = winreg.QueryValueEx(key, "ProxyServer") - findings.append(Finding("dns", "WARN", - f"System proxy ENABLED: {proxy_server}", - "A system-wide proxy is configured. All browser traffic goes through it.\n" - "If you didn't set this, it could be malware redirecting your traffic.", - "Check proxy settings: Settings → Network → Proxy")) - else: - findings.append(Finding("dns", "INFO", - "No system proxy configured", "ProxyEnable = 0 (good)")) - winreg.CloseKey(key) - except Exception: - pass - - # DNS resolution verification - test_domains = [ - ("www.google.com", False), - ("www.microsoft.com", False), - ("login.microsoftonline.com", False), - ] - for domain, _ in test_domains: - try: - results = socket.getaddrinfo(domain, 443, socket.AF_INET) - ips = list(set(r[4][0] for r in results)) - # Check if resolved to private IPs (hijacking indicator) - private = [ip for ip in ips if ip.startswith("10.") or ip.startswith("192.168.") - or ip.startswith("172.") or ip.startswith("127.")] - if private: - findings.append(Finding("dns", "CRITICAL", - f"⚠️ DNS HIJACKING: {domain} → {private[0]}", - f"Domain {domain} resolved to a private IP address!\n" - "This strongly suggests DNS hijacking.", - "Change your DNS servers immediately! Use 8.8.8.8 or 1.1.1.1.")) - except socket.gaierror: - findings.append(Finding("dns", "WARN", - f"DNS resolution failed: {domain}", - "Could not resolve this domain. Possible DNS issue or blocking.", - "Check your DNS settings and internet connection.")) - - return findings - - # ────────── 6. Account Security ────────── - - def check_accounts(self) -> List[Finding]: - findings = [] - - # List local accounts - rc, out, _ = safe_run(["net", "user"], timeout=10) - if rc == 0: - accounts = [] - capture = False - for line in out.splitlines(): - if "---" in line: - capture = True - continue - if capture and line.strip() and "command completed" not in line.lower(): - accounts.extend(line.split()) - - for acct in accounts: - rc2, detail, _ = safe_run(["net", "user", acct], timeout=5) - if rc2 != 0: - continue - - is_active = "Yes" in [l.split("active")[-1].strip() - for l in detail.lower().splitlines() - if "account active" in l] if detail else False - is_admin_acct = False - if "*administrators*" in detail.lower().replace(" ", ""): - is_admin_acct = True - - # Check for active status more robustly - for line in detail.splitlines(): - if "Account active" in line and "Yes" in line: - is_active = True - if "Local Group Memberships" in line and "*Administrators*" in line: - is_admin_acct = True - - if acct.lower() == "guest": - if is_active: - findings.append(Finding("accounts", "WARN", - "Guest account is ENABLED", - "The Guest account allows anonymous access to this computer.", - "Disable it: net user Guest /active:no")) - continue - - if acct.lower() == "administrator": - if is_active: - findings.append(Finding("accounts", "WARN", - "Built-in Administrator account is ENABLED", - "The default Administrator account is a common attack target.", - "Disable it if not needed: net user Administrator /active:no")) - continue - - if is_admin_acct: - findings.append(Finding("accounts", "INFO", - f"Admin account: {acct}", - f"Active: {is_active}\nHas administrator privileges.")) - - # Failed login attempts - rc, out, _ = safe_run([ - "wevtutil", "qe", "Security", - "/q:*[System[EventID=4625]]", - "/c:50", "/f:text", "/rd:true" - ], timeout=10) - if rc == 0 and out.strip(): - fail_count = out.count("Event[") - if not fail_count: - fail_count = out.count("EventID") - if fail_count > 20: - findings.append(Finding("accounts", "CRITICAL", - f"⚠️ {fail_count}+ failed login attempts detected!", - "High number of failed logins may indicate a brute-force attack.", - "Check Event Viewer → Security for details. Consider enabling account lockout policy.")) - elif fail_count > 5: - findings.append(Finding("accounts", "WARN", - f"{fail_count} failed login attempts", - "Some failed login attempts detected. Could be normal typos or suspicious activity.", - "Review in Event Viewer → Security log.")) - else: - findings.append(Finding("accounts", "INFO", - "Few/no failed login attempts", "Login security looks normal.")) - elif "Access is denied" in (out + _): - findings.append(Finding("accounts", "INFO", - "Cannot read Security event log", - "Admin privileges required to check failed login attempts.", - "Run as admin for full account security analysis.")) - - return findings - - # ────────── 7. Wi-Fi Security ────────── - - def check_wifi(self) -> List[Finding]: - findings = [] - - # Saved profiles - rc, out, _ = safe_run(["netsh", "wlan", "show", "profiles"], timeout=10) - if rc != 0: - findings.append(Finding("wifi", "INFO", "Cannot enumerate Wi-Fi profiles", - "Wi-Fi interface may not be available.")) - return findings - - profiles = re.findall(r"All User Profile\s*:\s*(.+)", out) - if not profiles: - profiles = re.findall(r"Profile\s*:\s*(.+)", out) - - weak_count = 0 - for profile in profiles: - profile = profile.strip() - rc2, detail, _ = safe_run(["netsh", "wlan", "show", "profile", - f"name={profile}"], timeout=5) - if rc2 != 0: - continue - - auth = "" - cipher = "" - for line in detail.splitlines(): - if "Authentication" in line: - auth = line.split(":", 1)[1].strip() if ":" in line else "" - if "Cipher" in line: - cipher = line.split(":", 1)[1].strip() if ":" in line else "" - - if auth.lower() in ("open", ""): - findings.append(Finding("wifi", "CRITICAL", - f"⚠️ OPEN Wi-Fi saved: {profile}", - f"Authentication: {auth}\nNo encryption — all traffic visible to anyone nearby!", - "Remove this profile unless absolutely necessary.")) - weak_count += 1 - elif "wep" in auth.lower(): - findings.append(Finding("wifi", "CRITICAL", - f"⚠️ WEP Wi-Fi saved: {profile}", - f"Authentication: {auth}\nWEP encryption is broken and easily cracked.", - "Remove this profile. Use WPA2 or WPA3 networks only.")) - weak_count += 1 - elif "wpa2" in auth.lower() or "wpa3" in auth.lower(): - findings.append(Finding("wifi", "INFO", - f"Wi-Fi profile: {profile} ({auth})", - f"Cipher: {cipher}")) - - # Current connection - rc, out, _ = safe_run(["netsh", "wlan", "show", "interfaces"], timeout=5) - if rc == 0: - ssid = bssid = auth = signal = channel = "" - for line in out.splitlines(): - l = line.strip() - if l.startswith("SSID") and "BSSID" not in l: - ssid = l.split(":", 1)[1].strip() if ":" in l else "" - elif l.startswith("BSSID"): - bssid = l.split(":", 1)[1].strip() if ":" in l else "" - elif l.startswith("Authentication"): - auth = l.split(":", 1)[1].strip() if ":" in l else "" - elif l.startswith("Signal"): - signal = l.split(":", 1)[1].strip() if ":" in l else "" - elif l.startswith("Channel"): - channel = l.split(":", 1)[1].strip() if ":" in l else "" - - if ssid: - findings.append(Finding("wifi", "INFO", - f"Connected: {ssid} ({auth})", - f"BSSID: {bssid}\nSignal: {signal}\nChannel: {channel}")) - - # Evil twin check — compare BSSID with baseline - bl = self.state.get("baseline") - if bl: - bl_findings = bl.get("findings", []) - for bf in bl_findings: - if bf.get("category") == "wifi" and "Connected:" in bf.get("title", ""): - old_bssid = "" - for dl in bf.get("detail", "").splitlines(): - if dl.startswith("BSSID:"): - old_bssid = dl.split(":", 1)[1].strip() - if old_bssid and bssid and old_bssid != bssid: - findings.append(Finding("wifi", "CRITICAL", - f"⚠️ POSSIBLE EVIL TWIN: {ssid}", - f"BSSID changed!\nBaseline: {old_bssid}\nCurrent: {bssid}\n" - "Same network name but different access point!", - "Disconnect immediately if unexpected. Verify with your router admin.")) - - if weak_count == 0 and not any(f.severity == "CRITICAL" for f in findings): - findings.append(Finding("wifi", "INFO", - "Wi-Fi security OK", f"{len(profiles)} saved profiles, all use strong encryption.")) - - return findings - - # ────────── 8. USB & Hardware ────────── - - def check_usb_hardware(self) -> List[Finding]: - findings = [] - - # USB device history from registry - if winreg: - try: - key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, - r"SYSTEM\CurrentControlSet\Enum\USBSTOR", 0, winreg.KEY_READ) - i = 0 - devices = [] - while True: - try: - subkey_name = winreg.EnumKey(key, i) - # Each subkey is like "Disk&Ven_SanDisk&Prod_Ultra&Rev_1.00" - try: - subkey = winreg.OpenKey(key, subkey_name, 0, winreg.KEY_READ) - j = 0 - while True: - try: - serial = winreg.EnumKey(subkey, j) - device_name = subkey_name.replace("&", " ").replace("_", " ") - devices.append((device_name, serial)) - - # Check for hacking tool keywords - combined = (device_name + " " + serial).lower() - if any(kw in combined for kw in HACKING_USB_KEYWORDS): - findings.append(Finding("usb", "CRITICAL", - f"⚠️ HACKING TOOL USB DETECTED: {device_name}", - f"Serial: {serial}\nThis device matches known hacking tool signatures!", - "Investigate immediately! Remove and scan your system.")) - j += 1 - except OSError: - break - winreg.CloseKey(subkey) - except OSError: - pass - i += 1 - except OSError: - break - winreg.CloseKey(key) - - findings.append(Finding("usb", "INFO", - f"{len(devices)} USB storage devices in history", - "Devices:\n" + "\n".join(f" • {d[0]}" for d in devices[:15]))) - - except (OSError, PermissionError): - findings.append(Finding("usb", "INFO", - "Cannot read USB history", "Registry access denied.")) - - # Network adapter analysis — classify as physical/virtual - rc, out, _ = safe_run(["ipconfig", "/all"], timeout=10) - if rc == 0: - # Match only actual adapter headers (line starts with adapter type) - adapters = [] - for line in out.splitlines(): - m = re.match(r'^(Ethernet|Wireless LAN|Wi-Fi|PPP)\s+adapter\s+(.+?):', line, re.I) - if m: - adapters.append(m.group(2).strip()) - virtual_keywords = ["virtual", "hyper-v", "vmware", "vbox", "loopback", - "local area connection*", "bluetooth", "vpn", "tunnel", - "wsl", "docker", "vethernet"] - physical = [] - virtual = [] - for a in adapters: - if any(k in a.lower() for k in virtual_keywords): - virtual.append(a) - else: - physical.append(a) - - detail_lines = [] - if physical: - detail_lines.append("Physical: " + ", ".join(physical)) - if virtual: - detail_lines.append("Virtual/System: " + ", ".join(virtual)) - - # Only warn about unexpected PHYSICAL adapters - unexpected_physical = [a for a in physical if not any( - k in a.lower() for k in ["ethernet", "wi-fi", "wifi", "wireless"])] - if unexpected_physical: - findings.append(Finding("usb", "WARN", - f"Unexpected physical adapter: {', '.join(unexpected_physical)}", - "\n".join(detail_lines) + - "\nUnexpected physical adapters could be rogue USB network devices.", - "Check Device Manager for unknown network adapters.")) - else: - findings.append(Finding("usb", "INFO", - f"Network adapters: {len(physical)} physical, {len(virtual)} virtual", - "\n".join(detail_lines))) - - # Bluetooth devices - if winreg: - try: - key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, - r"SYSTEM\CurrentControlSet\Services\BTHPORT\Parameters\Devices", - 0, winreg.KEY_READ) - i = 0 - bt_count = 0 - while True: - try: - winreg.EnumKey(key, i) - bt_count += 1 - i += 1 - except OSError: - break - winreg.CloseKey(key) - findings.append(Finding("usb", "INFO", - f"{bt_count} Bluetooth devices paired", - "Review paired Bluetooth devices in Settings → Bluetooth.")) - except (OSError, PermissionError): - pass - - return findings - - # ────────── 9. Browser Security ────────── - - def check_browser(self) -> List[Finding]: - findings = [] - - # Root certificate store — get subject + issuer + notafter for full analysis - try: - rc, out, _ = safe_run([ - "powershell", "-NoProfile", "-Command", - "Get-ChildItem Cert:\\LocalMachine\\Root | " - "Select-Object Subject, NotAfter, Thumbprint | " - "ForEach-Object { $_.Subject + '|||' + $_.NotAfter.ToString('yyyy-MM-dd') + '|||' + $_.Thumbprint }" - ], timeout=15) - if rc == 0 and out.strip(): - known_certs = [] - unknown_certs = [] - expired_certs = [] - - for line in out.strip().splitlines(): - line = line.strip() - if not line: - continue - parts = line.split("|||") - subject = parts[0].strip() if len(parts) > 0 else "" - notafter = parts[1].strip() if len(parts) > 1 else "" - thumb = parts[2].strip()[:12] if len(parts) > 2 else "" - - if not subject: - continue - - # Check expiration - try: - exp_date = datetime.strptime(notafter, "%Y-%m-%d") - if exp_date < datetime.now(): - expired_certs.append(f"{subject[:70]} (expired {notafter})") - except Exception: - pass - - cert_lower = subject.lower() - if any(kw in cert_lower for kw in KNOWN_ROOT_CA_KEYWORDS): - known_certs.append(subject) - else: - unknown_certs.append(subject) - - # Report - findings.append(Finding("browser", "INFO", - f"Certificate store: {len(known_certs)} known CAs", - f"All recognized root certificates from trusted providers.")) - - if expired_certs: - findings.append(Finding("browser", "INFO", - f"{len(expired_certs)} expired root certificates (normal)", - "Expired CAs:\n" + "\n".join(f" - {c}" for c in expired_certs[:5]) + - ("\n ..." if len(expired_certs) > 5 else "") + - "\n\nExpired root CAs are normal Windows leftovers. Not a security risk.")) - - if unknown_certs: - # Check if any look truly suspicious (self-signed, random names) - suspicious_ca = [c for c in unknown_certs if any( - k in c.lower() for k in ["proxy", "intercept", "mitm", "debug", - "fiddler", "charles", "burp", "mitmproxy"])] - if suspicious_ca: - findings.append(Finding("browser", "CRITICAL", - f"MITM/proxy certificate detected!", - "Suspicious root CAs:\n" + "\n".join(f" - {c[:80]}" for c in suspicious_ca) + - "\n\nThese certificates are used by traffic interception tools!", - "Remove these in certmgr.msc unless you installed them intentionally.")) - elif unknown_certs: - findings.append(Finding("browser", "INFO", - f"{len(unknown_certs)} other root certificates", - "Non-standard but not suspicious CAs:\n" + - "\n".join(f" - {c[:80]}" for c in unknown_certs[:10]) + - "\n\nThese are likely from software vendors or regional CAs.", - "Review in certmgr.msc if concerned.")) - else: - findings.append(Finding("browser", "INFO", - "No unknown root certificates", "All CAs are recognized.")) - except Exception: - findings.append(Finding("browser", "INFO", - "Could not check certificate store", "PowerShell command failed.")) - - # System proxy (already checked in DNS, but reference here) - if winreg: - try: - key = winreg.OpenKey(winreg.HKEY_CURRENT_USER, - r"Software\Microsoft\Windows\CurrentVersion\Internet Settings", - 0, winreg.KEY_READ) - try: - auto_config, _ = winreg.QueryValueEx(key, "AutoConfigURL") - if auto_config: - findings.append(Finding("browser", "WARN", - f"Auto-config proxy (PAC): {auto_config}", - "A proxy auto-configuration URL is set. This could redirect your traffic.", - "Check: Settings → Network → Proxy → Automatic proxy setup")) - except FileNotFoundError: - pass - winreg.CloseKey(key) - except Exception: - pass - - if not findings: - findings.append(Finding("browser", "INFO", - "Browser security checks passed", "No suspicious proxy or certificate issues found.")) - - return findings - - # ────────── 10. Event Logs ────────── - - def check_event_logs(self) -> List[Finding]: - findings = [] - - # (log, event_id, description, area, warn_threshold, critical_threshold) - # Note: wevtutil /c:100 fetches last 100 events — for service installs, - # 100 over the lifetime of a PC is completely normal (Windows updates alone install many) - events_to_check = [ - ("Security", "4625", "Failed login attempts", "accounts", 10, 50), - ("Security", "4720", "User account created", "accounts", 1, 3), - ("System", "7045", "Service installed", "system", 100, 100), # 100 = max we fetch, always INFO - ("Security", "1102", "Audit log cleared", "security", 1, 1), - ("Security", "4719", "Security policy changed", "security", 5, 20), - ] - - admin = is_admin() - - for log, event_id, desc, area, warn_thresh, crit_thresh in events_to_check: - if log == "Security" and not admin: - continue # Security log requires admin - - try: - rc, out, err = safe_run([ - "wevtutil", "qe", log, - f"/q:*[System[EventID={event_id}]]", - "/c:100", "/f:text", "/rd:true" - ], timeout=10) - - if rc != 0: - if "Access is denied" in err or "Access is denied" in out: - continue - continue - - count = out.count(" 0: - findings.append(Finding("eventlogs", "CRITICAL", - f"⚠️ AUDIT LOG CLEARED ({count} times)!", - "Someone cleared the Windows Security audit log.\n" - "This is a common technique used by attackers to cover their tracks!", - "Investigate immediately. Check who has admin access to this machine.")) - elif event_id == "4720" and count > 0: - findings.append(Finding("eventlogs", "WARN", - f"User accounts created: {count}", - f"{count} user account creation events found in Security log.\n" - "Verify all accounts were created intentionally.", - "Run 'net user' to review all accounts.")) - elif event_id == "7045": - # Service installs are normal — Windows updates, software installs all create services - findings.append(Finding("eventlogs", "INFO", - f"Service installations: {count} in recent history", - f"This is normal for a Windows PC with regular software/updates.\n" - "Only concerning if you see services you don't recognize.", - "Review in services.msc if needed.")) - elif count > crit_thresh: - findings.append(Finding("eventlogs", "CRITICAL", - f"{desc}: {count} events", - f"High number of {desc.lower()} events detected.")) - elif count > warn_thresh: - findings.append(Finding("eventlogs", "WARN", - f"{desc}: {count} events", - f"{count} {desc.lower()} events found.")) - - except Exception: - continue - - if not admin: - findings.append(Finding("eventlogs", "INFO", - "Limited event log analysis (no admin)", - "Security event log requires admin privileges.\nRun as admin for: failed logins, " - "account creation, audit log clearing.", - "Right-click → Run as administrator for full analysis.")) - - if not findings: - findings.append(Finding("eventlogs", "INFO", - "Event logs look clean", "No suspicious events found.")) - - return findings - - -# ─────────────────────────── UI ─────────────────────────── - -class App(ctk.CTkFrame if HAS_CTK else tk.Frame): - def __init__(self, parent): - super().__init__(parent) - self.parent = parent - parent.title("Security Audit") - parent.geometry("1200x750") - parent.minsize(850, 550) - - state_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), - "security_audit_state.json") - self.engine = SecurityAuditEngine(state_path) - - self.running = True - self.scanning = False - self.result_q: "queue.Queue" = queue.Queue() - self.all_findings: List[Finding] = [] - self.scan_progress = 0 - self.filter_cat = "ALL" - self.filter_sev = {"INFO": True, "WARN": True, "CRITICAL": True} - - self._build_ui() - self.pack(fill="both", expand=True) - self.after(200, self._ui_tick) - - def _build_ui(self): - # ── Top bar ── - top = ctk.CTkFrame(self) - top.pack(fill="x", padx=8, pady=(6, 2)) - - # Admin badge - admin = is_admin() - badge_color = "#28a745" if admin else "#ffc107" - badge_text = "ADMIN" if admin else "USER MODE" - ctk.CTkLabel(top, text=f" {badge_text} ", font=("Segoe UI", 10, "bold"), - fg_color=badge_color, text_color="#000000", corner_radius=4).pack(side="left", padx=(4, 8)) - - self.btn_scan = ctk.CTkButton(top, text="▶ Run Full Audit", fg_color="#28a745", - hover_color="#218838", width=140, command=self.start_scan) - self.btn_scan.pack(side="left", padx=4) - - ctk.CTkButton(top, text="Save Baseline", width=100, - command=self.save_baseline).pack(side="left", padx=4) - ctk.CTkButton(top, text="Export", width=70, - command=self.export_report).pack(side="left", padx=4) - - # Progress bar & status - self.progress_bar = ctk.CTkProgressBar(top, width=200) - self.progress_bar.pack(side="left", padx=(12, 4)) - self.progress_bar.set(0) - - self.status_label = ctk.CTkLabel(top, text="Ready — click Run Full Audit", - font=("Segoe UI", 10)) - self.status_label.pack(side="left", padx=8) - - # ── Tabs ── - nb = ctk.CTkTabview(self) - nb.pack(fill="both", expand=True, padx=8, pady=(2, 6)) - - self.tab_dashboard = nb.add("Dashboard") - self.tab_details = nb.add("Details") - self.tab_baseline = nb.add("Baseline") - - self._build_dashboard() - self._build_details() - self._build_baseline() - - # ── Dashboard ── - - def _build_dashboard(self): - f = self.tab_dashboard - scroll = ctk.CTkScrollableFrame(f) - scroll.pack(fill="both", expand=True, padx=4, pady=4) - - # Overall threat score - self.threat_frame = ctk.CTkFrame(scroll, fg_color="#1e1e1e", corner_radius=10) - self.threat_frame.pack(fill="x", padx=6, pady=(6, 10)) - self.threat_label = ctk.CTkLabel(self.threat_frame, text=" NOT SCANNED YET ", - font=("Segoe UI", 18, "bold"), - text_color="#888888") - self.threat_label.pack(pady=12) - - # Category cards grid (2 columns x 5 rows) - card_grid = ctk.CTkFrame(scroll, fg_color="transparent") - card_grid.pack(fill="both", expand=True, padx=4) - card_grid.columnconfigure(0, weight=1) - card_grid.columnconfigure(1, weight=1) - - self.dash_cards = {} - for idx, (name, key, icon) in enumerate(CATEGORIES): - row = idx // 2 - col = idx % 2 - - card = ctk.CTkFrame(card_grid, fg_color="#1e1e1e", corner_radius=8) - card.grid(row=row, column=col, padx=6, pady=4, sticky="nsew") - card_grid.rowconfigure(row, weight=1) - - header = ctk.CTkFrame(card, fg_color="transparent") - header.pack(fill="x", padx=10, pady=(8, 2)) - - ctk.CTkLabel(header, text=f"{icon} {name}", - font=("Segoe UI", 12, "bold")).pack(side="left") - - badge = ctk.CTkLabel(header, text=" PENDING ", - font=("Segoe UI", 9, "bold"), - fg_color="#555555", text_color="#cccccc", - corner_radius=4) - badge.pack(side="right") - - count_lbl = ctk.CTkLabel(card, text="Not scanned", - font=("Segoe UI", 10), text_color="#888888") - count_lbl.pack(anchor="w", padx=12, pady=(0, 8)) - - self.dash_cards[key] = {"badge": badge, "count": count_lbl, "card": card} - - # ── Details ── - - def _build_details(self): - f = self.tab_details - - # Top filter bar - filter_bar = ctk.CTkFrame(f) - filter_bar.pack(fill="x", padx=6, pady=(6, 2)) - - ctk.CTkLabel(filter_bar, text="Category:", font=("Segoe UI", 10)).pack(side="left", padx=(4, 4)) - - self.cat_var = tk.StringVar(value="ALL") - cat_menu = ctk.CTkOptionMenu(filter_bar, variable=self.cat_var, - values=["ALL"] + [c[0] for c in CATEGORIES], - width=180, command=lambda _: self._refresh_details()) - cat_menu.pack(side="left", padx=4) - - ctk.CTkLabel(filter_bar, text="Severity:", font=("Segoe UI", 10)).pack(side="left", padx=(12, 4)) - - self.sev_vars = {} - for sev, color in [("CRITICAL", "#dc3545"), ("WARN", "#ffc107"), ("INFO", "#6c757d")]: - var = tk.BooleanVar(value=True) - self.sev_vars[sev] = var - ctk.CTkCheckBox(filter_bar, text=sev, variable=var, - text_color=color, font=("Segoe UI", 10), - command=self._refresh_details, width=80).pack(side="left", padx=2) - - # Finding count - self.finding_count_label = ctk.CTkLabel(filter_bar, text="", - font=("Segoe UI", 10), text_color="#888888") - self.finding_count_label.pack(side="right", padx=8) - - # Scrollable findings list - self.details_scroll = ctk.CTkScrollableFrame(f) - self.details_scroll.pack(fill="both", expand=True, padx=6, pady=4) - - self.details_placeholder = ctk.CTkLabel(self.details_scroll, - text="Run a scan to see findings here.", - font=("Segoe UI", 12), text_color="#666666") - self.details_placeholder.pack(pady=40) - - # ── Baseline ── - - def _build_baseline(self): - f = self.tab_baseline - scroll = ctk.CTkScrollableFrame(f) - scroll.pack(fill="both", expand=True, padx=6, pady=4) - - # Baseline info - self.bl_info_frame = ctk.CTkFrame(scroll, fg_color="#1e1e1e", corner_radius=8) - self.bl_info_frame.pack(fill="x", padx=6, pady=6) - - bl = self.engine.state.get("baseline") - if bl: - bl_text = f"Baseline saved: {bl.get('saved_at', 'unknown')}\nFindings: {len(bl.get('finding_keys', []))}" - else: - bl_text = "No baseline saved yet. Run a scan first, then click 'Save Baseline'." - - self.bl_info_label = ctk.CTkLabel(self.bl_info_frame, text=bl_text, - font=("Segoe UI", 11), justify="left") - self.bl_info_label.pack(padx=12, pady=10, anchor="w") - - btn_row = ctk.CTkFrame(scroll, fg_color="transparent") - btn_row.pack(fill="x", padx=6, pady=4) - ctk.CTkButton(btn_row, text="Save Current as Baseline", width=180, - command=self.save_baseline).pack(side="left", padx=4) - ctk.CTkButton(btn_row, text="Clear Baseline", width=120, - fg_color="#dc3545", hover_color="#c82333", - command=self.clear_baseline).pack(side="left", padx=4) - - # Diff view - ctk.CTkLabel(scroll, text="Changes Since Baseline", - font=("Segoe UI", 13, "bold")).pack(anchor="w", padx=8, pady=(12, 4)) - - self.diff_scroll = ctk.CTkFrame(scroll, fg_color="transparent") - self.diff_scroll.pack(fill="x", padx=6, pady=4) - - self.diff_placeholder = ctk.CTkLabel(self.diff_scroll, - text="Run a scan with a saved baseline to see changes.", - font=("Segoe UI", 10), text_color="#666666") - self.diff_placeholder.pack(pady=10) - - # ── Scanning ── - - def start_scan(self): - if self.scanning: - return - self.scanning = True - self.all_findings.clear() - self.scan_progress = 0 - self.progress_bar.set(0) - self.btn_scan.configure(state="disabled", text="Scanning...") - self.status_label.configure(text="Scanning...") - - # Reset dashboard cards - for key, card_data in self.dash_cards.items(): - card_data["badge"].configure(text=" SCANNING ", fg_color="#3a7ebf", text_color="#ffffff") - card_data["count"].configure(text="...") - - threading.Thread(target=self._scan_worker, daemon=True).start() - - def _scan_worker(self): - checks = self.engine.get_checks() - total = len(checks) - - with ThreadPoolExecutor(max_workers=4) as pool: - futures = {} - for cat_key, method in checks: - futures[pool.submit(self._safe_check, method)] = cat_key - - for future in as_completed(futures): - cat_key = futures[future] - try: - findings = future.result() - except Exception as e: - findings = [Finding(cat_key, "INFO", "Check failed", str(e))] - self.result_q.put(("category_done", cat_key, findings)) - - self.result_q.put(("scan_complete", None, None)) - - def _safe_check(self, method): - try: - return method() - except Exception as e: - return [Finding("unknown", "INFO", "Check error", str(e))] - - def _ui_tick(self): - if not self.running: - return - - changed = False - while True: - try: - msg_type, cat_key, data = self.result_q.get_nowait() - except queue.Empty: - break - - changed = True - if msg_type == "category_done": - self.all_findings.extend(data) - self.scan_progress += 1 - self.progress_bar.set(self.scan_progress / 10) - self._update_card(cat_key, data) - - # Find category name for status - cat_name = cat_key - for name, key, _ in CATEGORIES: - if key == cat_key: - cat_name = name - break - self.status_label.configure(text=f"Scanned: {cat_name} ({self.scan_progress}/10)") - - elif msg_type == "scan_complete": - self.scanning = False - self.progress_bar.set(1.0) - self.btn_scan.configure(state="normal", text="▶ Run Full Audit") - - # Count severities - crits = sum(1 for f in self.all_findings if f.severity == "CRITICAL") - warns = sum(1 for f in self.all_findings if f.severity == "WARN") - - self.status_label.configure( - text=f"Complete — {len(self.all_findings)} findings " - f"({crits} critical, {warns} warnings)") - - # Update overall threat - if crits > 0: - self.threat_label.configure(text=" ⚠️ ISSUES FOUND — REVIEW CRITICAL FINDINGS ", - text_color="#dc3545") - self.threat_frame.configure(fg_color="#2a1215") - elif warns > 0: - self.threat_label.configure(text=" ⚡ WARNINGS — REVIEW RECOMMENDED ", - text_color="#ffc107") - self.threat_frame.configure(fg_color="#2a2515") - else: - self.threat_label.configure(text=" ✅ SYSTEM LOOKS CLEAN ", - text_color="#28a745") - self.threat_frame.configure(fg_color="#152a17") - - # Save scan results - self.engine.state["last_scan"] = now_ts() - self.engine.state["last_findings"] = [asdict(f) for f in self.all_findings] - self.engine.save_state() - - # Refresh details and baseline diff - self._refresh_details() - self._refresh_baseline_diff() - - if not changed: - pass - - self.after(200, self._ui_tick) - - def _update_card(self, cat_key, findings: List[Finding]): - if cat_key not in self.dash_cards: - return - card = self.dash_cards[cat_key] - - crits = sum(1 for f in findings if f.severity == "CRITICAL") - warns = sum(1 for f in findings if f.severity == "WARN") - infos = sum(1 for f in findings if f.severity == "INFO") - - # Determine status - if crits > 0: - card["badge"].configure(text=" FAIL ", fg_color="#dc3545", text_color="#ffffff") - card["card"].configure(fg_color="#2a1215") - elif warns > 0: - card["badge"].configure(text=" WARN ", fg_color="#ffc107", text_color="#000000") - card["card"].configure(fg_color="#2a2515") - else: - card["badge"].configure(text=" PASS ", fg_color="#28a745", text_color="#ffffff") - card["card"].configure(fg_color="#152a17") - - # Count text - parts = [] - if crits: - parts.append(f"{crits} critical") - if warns: - parts.append(f"{warns} warning{'s' if warns > 1 else ''}") - if infos: - parts.append(f"{infos} info") - card["count"].configure(text=", ".join(parts) if parts else "Clean") - - # Check for NEW findings vs baseline - bl_keys = self.engine.get_baseline_keys() - if bl_keys: - new_count = sum(1 for f in findings if f.key() not in bl_keys) - if new_count > 0: - card["count"].configure( - text=card["count"].cget("text") + f" [NEW: {new_count}]") - - def _refresh_details(self, *_): - # Clear existing - for widget in self.details_scroll.winfo_children(): - widget.destroy() - - # Get filters - cat_filter = self.cat_var.get() - sev_filter = {s for s, v in self.sev_vars.items() if v.get()} - - bl_keys = self.engine.get_baseline_keys() - - # Filter findings - filtered = [] - for f in self.all_findings: - if f.severity not in sev_filter: - continue - if cat_filter != "ALL": - cat_name_match = False - for name, key, _ in CATEGORIES: - if name == cat_filter and key == f.category: - cat_name_match = True - break - if not cat_name_match: - continue - filtered.append(f) - - # Sort: CRITICAL first, then WARN, then INFO - sev_order = {"CRITICAL": 0, "WARN": 1, "INFO": 2} - filtered.sort(key=lambda f: sev_order.get(f.severity, 9)) - - self.finding_count_label.configure(text=f"{len(filtered)} findings shown") - - if not filtered: - ctk.CTkLabel(self.details_scroll, - text="No findings match current filters." if self.all_findings else "Run a scan first.", - font=("Segoe UI", 11), text_color="#666666").pack(pady=20) - return - - # Render finding cards - sev_colors = {"CRITICAL": "#dc3545", "WARN": "#ffc107", "INFO": "#6c757d"} - sev_bg = {"CRITICAL": "#2a1215", "WARN": "#2a2515", "INFO": "#1e1e1e"} - - for f in filtered: - is_new = bl_keys and f.key() not in bl_keys - - card = ctk.CTkFrame(self.details_scroll, - fg_color=sev_bg.get(f.severity, "#1e1e1e"), - corner_radius=6) - card.pack(fill="x", padx=4, pady=2) - - # Header row - header = ctk.CTkFrame(card, fg_color="transparent") - header.pack(fill="x", padx=8, pady=(6, 0)) - - ctk.CTkLabel(header, text=f" {f.severity} ", - font=("Segoe UI", 9, "bold"), - fg_color=sev_colors.get(f.severity, "#666"), - text_color="#ffffff" if f.severity != "WARN" else "#000000", - corner_radius=3).pack(side="left", padx=(0, 6)) - - ctk.CTkLabel(header, text=f.title, - font=("Segoe UI", 11, "bold")).pack(side="left") - - if is_new: - ctk.CTkLabel(header, text=" NEW ", - font=("Segoe UI", 8, "bold"), - fg_color="#17a2b8", text_color="#ffffff", - corner_radius=3).pack(side="left", padx=6) - - # Detail - if f.detail: - ctk.CTkLabel(card, text=f.detail, font=("Segoe UI", 9), - text_color="#aaaaaa", justify="left", - wraplength=0).pack(fill="x", padx=12, pady=(2, 0), anchor="w") - - # Remediation - if f.remediation: - ctk.CTkLabel(card, text=f"💡 {f.remediation}", - font=("Segoe UI", 9), text_color="#5bc0de", - justify="left", wraplength=0).pack(fill="x", padx=12, pady=(2, 6), anchor="w") - else: - # Small bottom padding - ctk.CTkFrame(card, height=4, fg_color="transparent").pack() - - def _refresh_baseline_diff(self): - # Clear existing diff - for widget in self.diff_scroll.winfo_children(): - widget.destroy() - - bl = self.engine.state.get("baseline") - if not bl: - ctk.CTkLabel(self.diff_scroll, text="No baseline saved.", - font=("Segoe UI", 10), text_color="#666666").pack(pady=10) - return - - bl_keys = set(bl.get("finding_keys", [])) - current_keys = {f.key() for f in self.all_findings} - - new_keys = current_keys - bl_keys - resolved_keys = bl_keys - current_keys - - # Update baseline info - self.bl_info_label.configure( - text=f"Baseline saved: {bl.get('saved_at', 'unknown')}\n" - f"Baseline findings: {len(bl_keys)} | Current: {len(current_keys)}\n" - f"New: {len(new_keys)} | Resolved: {len(resolved_keys)}") - - if not new_keys and not resolved_keys: - ctk.CTkLabel(self.diff_scroll, text="✅ No changes since baseline.", - font=("Segoe UI", 11), text_color="#28a745").pack(pady=10) - return - - if new_keys: - ctk.CTkLabel(self.diff_scroll, text=f"🆕 New Findings ({len(new_keys)})", - font=("Segoe UI", 11, "bold"), text_color="#17a2b8").pack( - anchor="w", padx=6, pady=(6, 2)) - for f in self.all_findings: - if f.key() in new_keys: - row = ctk.CTkFrame(self.diff_scroll, fg_color="#1a2a2e", corner_radius=4) - row.pack(fill="x", padx=8, pady=1) - sev_colors = {"CRITICAL": "#dc3545", "WARN": "#ffc107", "INFO": "#6c757d"} - ctk.CTkLabel(row, text=f" {f.severity} ", - font=("Segoe UI", 8, "bold"), - fg_color=sev_colors.get(f.severity, "#666"), - text_color="#fff", corner_radius=2).pack(side="left", padx=(6, 4), pady=3) - ctk.CTkLabel(row, text=f.title, font=("Segoe UI", 9)).pack(side="left", pady=3) - - if resolved_keys: - ctk.CTkLabel(self.diff_scroll, text=f"✅ Resolved ({len(resolved_keys)})", - font=("Segoe UI", 11, "bold"), text_color="#28a745").pack( - anchor="w", padx=6, pady=(10, 2)) - bl_findings = bl.get("findings", []) - for bf in bl_findings: - bkey = hashlib.sha256( - f"{bf['category']}|{bf['title']}|{bf.get('detail','')[:120]}".encode() - ).hexdigest()[:16] - if bkey in resolved_keys: - row = ctk.CTkFrame(self.diff_scroll, fg_color="#1a2e1a", corner_radius=4) - row.pack(fill="x", padx=8, pady=1) - ctk.CTkLabel(row, text=" RESOLVED ", - font=("Segoe UI", 8, "bold"), - fg_color="#28a745", text_color="#fff", - corner_radius=2).pack(side="left", padx=(6, 4), pady=3) - ctk.CTkLabel(row, text=bf.get("title", ""), - font=("Segoe UI", 9)).pack(side="left", pady=3) - - # ── Actions ── - - def save_baseline(self): - if not self.all_findings: - self.status_label.configure(text="Run a scan first before saving baseline") - return - self.engine.save_baseline(self.all_findings) - self.status_label.configure(text=f"Baseline saved with {len(self.all_findings)} findings") - self._refresh_baseline_diff() - - def clear_baseline(self): - self.engine.clear_baseline() - self.status_label.configure(text="Baseline cleared") - self._refresh_baseline_diff() - - def export_report(self): - if not self.all_findings: - self.status_label.configure(text="Run a scan first") - return - - ts = datetime.now().strftime("%Y%m%d_%H%M%S") - filename = f"security_audit_{ts}.json" - export_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "exports") - os.makedirs(export_dir, exist_ok=True) - filepath = os.path.join(export_dir, filename) - - report = { - "scan_date": now_ts(), - "admin_mode": is_admin(), - "hostname": os.environ.get("COMPUTERNAME", "unknown"), - "username": os.environ.get("USERNAME", "unknown"), - "summary": { - "total": len(self.all_findings), - "critical": sum(1 for f in self.all_findings if f.severity == "CRITICAL"), - "warnings": sum(1 for f in self.all_findings if f.severity == "WARN"), - "info": sum(1 for f in self.all_findings if f.severity == "INFO"), - }, - "findings": [asdict(f) for f in self.all_findings], - } - - try: - with open(filepath, "w") as f: - json.dump(report, f, indent=2, default=str) - self.status_label.configure(text=f"Exported: {filepath}") - except Exception as e: - self.status_label.configure(text=f"Export failed: {e}") - - def destroy(self): - self.running = False - super().destroy() - - -# ─────────────────────────── Entry point ─────────────────────────── - -def run_tool(): - root = ctk.CTkToplevel() - app = App(root) - - -if __name__ == "__main__": - ctk.set_appearance_mode("dark") - ctk.set_default_color_theme("blue") - root = ctk.CTk() - root.title("Security Audit") - root.geometry("1200x750") - app = App(root) - root.mainloop() diff --git a/requirements.txt b/requirements.txt index 96271ee..10708ee 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,2 +1,4 @@ send2trash>=1.8,<2.0 keyboard>=0.13.5,<1.0 +tzdata>=2024.1 +winotify>=1.1,<2.0; sys_platform == "win32" diff --git a/security/poc/L1_2026-04-17.py b/security/poc/L1_2026-04-17.py new file mode 100644 index 0000000..e021826 --- /dev/null +++ b/security/poc/L1_2026-04-17.py @@ -0,0 +1,150 @@ +""" +L1 — Path traversal in FFmpeg Studio output folder (PoC) +======================================================== + +Vulnerability +------------- +`tools/ffmpeg_studio.py:502,542,552` reads `output_dir_var` (a tkinter +`StringVar` bound to a free-text Entry widget) with no path validation, +then `os.path.join`s a filename onto it and hands the result to +`subprocess.Popen` as an FFmpeg output argument. An attacker who can +reach the UI (local user, RDP, shared session, shoulder-surf-and-paste) +can supply a traversal string like `..\\..\\..\\Windows\\System32` and +cause FFmpeg to drop a `.mp4` in that directory. + +What this PoC does +------------------ +We do NOT touch real system directories. Instead: + + 1. Create a sandbox root %TEMP%\\security_probe_L1\\legit + 2. Create a "victim" dir %TEMP%\\security_probe_L1\\victim + 3. Simulate the buggy code path: reproduce the exact + `os.path.join(out_dir, f"gameplay_{ts}.mp4")` idiom from + `ffmpeg_studio.py:542` using the attacker-supplied traversal + string as `out_dir`. No FFmpeg / subprocess is spawned — we + replace Popen with a tiny `open(out_file, "wb").write(...)` + because that is sufficient to demonstrate the write-primitive. + 4. Check whether the resulting file landed inside `victim/`. If so: + the traversal worked end-to-end — exploit succeeded. + +Expected output on success +-------------------------- + [+] Sandbox legit dir: <%TEMP%>\\security_probe_L1\\legit + [+] Sandbox victim dir: <%TEMP%>\\security_probe_L1\\victim + [+] Attacker-supplied out_dir string: ..\\victim + [+] os.path.join gave: <...>\\security_probe_L1\\legit\\..\\victim\\gameplay_.mp4 + [+] Resolved actual path: <...>\\security_probe_L1\\victim\\gameplay_.mp4 + [+] File landed in victim/ — traversal was NOT blocked + EXPLOIT SUCCEEDED + +After the fix is applied, re-running this PoC should print +`EXPLOIT BLOCKED` (the validator raises / rejects before the write). + +Safety caveats +-------------- +- No network calls. No real system dirs written. Runs entirely under + %TEMP%. Cleans up its sandbox at the end. +- Uses plain `open()`, NOT subprocess or FFmpeg — we are modelling the + vector, not actually executing the recorder. +- Exits with code 0 on success (prints EXPLOIT SUCCEEDED) and code 1 + on failure (prints EXPLOIT BLOCKED), so CI can just run it and + check the exit status once the fix is in. +""" +from __future__ import annotations + +import os +import shutil +import sys +import tempfile +import time +from pathlib import Path + +SANDBOX_ROOT = Path(tempfile.gettempdir()) / "security_probe_L1" + + +def _setup_sandbox() -> tuple[Path, Path]: + """Fresh legit/ and victim/ dirs under %TEMP%.""" + if SANDBOX_ROOT.exists(): + shutil.rmtree(SANDBOX_ROOT, ignore_errors=True) + legit = SANDBOX_ROOT / "legit" + victim = SANDBOX_ROOT / "victim" + legit.mkdir(parents=True, exist_ok=True) + victim.mkdir(parents=True, exist_ok=True) + return legit, victim + + +def _try_validator(candidate: Path, allowed_base: Path) -> bool: + """Mirror of the proposed fix. Returns True if path is inside base. + + We import the real validator if it's available; otherwise this PoC + still runs and prints EXPLOIT SUCCEEDED (pre-fix behaviour).""" + sys.path.insert(0, str(Path(__file__).resolve().parents[2])) + try: + from tools.ffmpeg_studio import _validate_output_dir # type: ignore + except ImportError: + return True # validator not yet installed → vulnerable path + + try: + _validate_output_dir(str(candidate), allowed_bases=[allowed_base]) + return True + except Exception: + return False + + +def main() -> int: + legit, victim = _setup_sandbox() + print(f"[+] Sandbox legit dir: {legit}") + print(f"[+] Sandbox victim dir: {victim}") + + # Attacker pastes this into the Output Folder Entry. The UI code's + # `output_dir_var.get().strip()` returns it unchanged. + attacker_input = "..\\victim" + # Simulate: Chris had just browsed to `legit/` via the file picker, + # then the attacker overwrote the entry. Real code reads the Entry + # once, so effectively `out_dir = legit / attacker_input`. + tainted_out_dir = str(legit / attacker_input) + print(f"[+] Attacker-supplied out_dir string: {attacker_input}") + + # Gate with the (yet-to-exist) validator. If the fix is applied, + # this returns False and we bail out before writing anything. + if not _try_validator(Path(tainted_out_dir), legit): + print("[+] Validator rejected the path — no write attempted") + print("EXPLOIT BLOCKED") + shutil.rmtree(SANDBOX_ROOT, ignore_errors=True) + return 1 + + # ── This is the exact pattern from ffmpeg_studio.py:542 ───────── + timestamp = time.strftime("%Y%m%d_%H%M%S") + out_file = os.path.join(tainted_out_dir, f"gameplay_{timestamp}.mp4") + print(f"[+] os.path.join gave: {out_file}") + + # Stand-in for subprocess.Popen(["ffmpeg", ..., out_file]). FFmpeg + # would open this path for writing; we do the same with plain open. + try: + with open(out_file, "wb") as fh: + fh.write(b"FAKE_MP4_PAYLOAD") + except OSError as e: + print(f"[-] Write failed ({e}) — exploit did not land") + shutil.rmtree(SANDBOX_ROOT, ignore_errors=True) + return 1 + + resolved = Path(out_file).resolve() + print(f"[+] Resolved actual path: {resolved}") + + victim_resolved = victim.resolve() + try: + resolved.relative_to(victim_resolved) + print("[+] File landed in victim/ — traversal was NOT blocked") + print("EXPLOIT SUCCEEDED") + rc = 0 + except ValueError: + print("[+] File stayed in legit/ — traversal did not escape") + print("EXPLOIT BLOCKED") + rc = 1 + + shutil.rmtree(SANDBOX_ROOT, ignore_errors=True) + return rc + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/security/poc/L2_2026-04-17.py b/security/poc/L2_2026-04-17.py new file mode 100644 index 0000000..7c965ce --- /dev/null +++ b/security/poc/L2_2026-04-17.py @@ -0,0 +1,126 @@ +""" +PoC: L2 — Path traversal in FFmpeg Studio screenshot capture output. + +What this demonstrates +---------------------- +`tools/ffmpeg_studio.py` lines 1285/1294/1329 read the screenshot output +folder from a tkinter Entry (`cap_out_var`) and feed it straight into +`subprocess.run(["ffmpeg", ..., out_file])` with no validation. A user who +types `../../Windows/Temp` (or any traversal string) into the "Output +Folder" field for screenshot capture lands files outside the intended +`~/Pictures` sandbox. Same root cause as L1 but on the Screenshot tab. + +This PoC simulates the vulnerable code path *without* touching the real +GUI or real FFmpeg. It re-implements the 3 vulnerable lines +(`cap_out_var.get().strip()` → `os.path.join` → `subprocess.run`) and +drives them with an attacker-controlled traversal string. If the traversal +lands a file outside the intended sandbox, the exploit is considered +successful. + +Safety +------ +- Loopback/local only — no network, no external sockets. +- Sandbox is a fresh dir under `%TEMP%`; "outside" target is also under + `%TEMP%` (sibling dir). No writes to `C:\\Windows\\Temp` or anywhere + privileged. +- Uses a stub "ffmpeg" call (`cmd /c echo` on Windows, `true` elsewhere) + so no real encoder runs. We then manually touch the output path to + prove `os.path.join` resolved outside the sandbox. +- Idempotent: cleans up its own temp dirs on exit. + +Expected output +--------------- +- Unpatched: "EXPLOIT SUCCEEDED — wrote file outside sandbox: " +- Patched: "EXPLOIT BLOCKED — validator rejected traversal input" + +Exit codes +---------- +- 0 = exploit succeeded (vulnerability present) +- 1 = exploit blocked (patch working) +- 2 = harness error (unexpected state, not a security result) +""" + +from __future__ import annotations + +import os +import shutil +import subprocess +import sys +import tempfile +import time +from pathlib import Path + + +def _try_import_validator(): + """Return the fix's validator if it exists, else None (unpatched).""" + try: + # The fix in Sub-task B adds `_validate_output_dir` to ffmpeg_studio. + sys.path.insert(0, str(Path(__file__).resolve().parents[2])) + from tools.ffmpeg_studio import _validate_output_dir # type: ignore + return _validate_output_dir + except Exception: + return None + + +def _simulate_vulnerable_capture(cap_out_var_value: str, sandbox: Path) -> Path: + """Mirror the 3 vulnerable lines from ffmpeg_studio.py:1285/1294/1329. + + Returns the resolved output path the vulnerable code would have + handed to subprocess.run. + """ + out_d = cap_out_var_value.strip() # line 1285 + timestamp = time.strftime("%Y%m%d_%H%M%S") + ext = "png" + out_file = os.path.join(out_d, f"capture_{timestamp}.{ext}") # line 1294 + + # Stub "ffmpeg" — proves the arg flows into subprocess without + # actually encoding. On Windows: cmd /c echo; elsewhere: true. + if os.name == "nt": + stub = ["cmd", "/c", "echo", out_file] + else: + stub = ["true", out_file] + + # line 1329 — the args list contains out_file, unvalidated. + subprocess.run(stub, capture_output=True) + + # Real ffmpeg would have written to out_file. We touch it manually + # so the harness can observe the path resolution outcome. + resolved = Path(out_file).resolve() + resolved.parent.mkdir(parents=True, exist_ok=True) + resolved.write_bytes(b"PoC artifact - L2 path traversal simulation") + return resolved + + +def main() -> int: + base = Path(tempfile.mkdtemp(prefix="l2_poc_")) + sandbox = base / "sandbox_pictures" # mimics ~/Pictures + outside = base / "outside_victim" # mimics ../../Windows/Temp + sandbox.mkdir() + outside.mkdir() + + # Attacker payload — traversal from sandbox into sibling dir. + malicious = f"{sandbox}{os.sep}..{os.sep}outside_victim" + + validator = _try_import_validator() + try: + if validator is not None: + try: + validator(malicious, allowed_bases=[Path(sandbox)]) + except (ValueError, PermissionError) as e: + print(f"EXPLOIT BLOCKED — validator rejected traversal input: {e}") + return 1 + + written = _simulate_vulnerable_capture(malicious, sandbox) + try: + written.relative_to(sandbox.resolve()) + print(f"EXPLOIT BLOCKED — file stayed inside sandbox: {written}") + return 1 + except ValueError: + print(f"EXPLOIT SUCCEEDED — wrote file outside sandbox: {written}") + return 0 + finally: + shutil.rmtree(base, ignore_errors=True) + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/tests/test_account_activity_monitor.py b/tests/test_account_activity_monitor.py new file mode 100644 index 0000000..5fb479c --- /dev/null +++ b/tests/test_account_activity_monitor.py @@ -0,0 +1,94 @@ +"""Tests for account_activity_monitor filter-snapshot thread safety. + +Covers A4: _active_categories / _active_severities are now read via +_snapshot_filters() under an RLock so UI toggle writes cannot race with +after()-callback or worker-thread reads. +""" + +import sys +import threading +from pathlib import Path + +PROJECT_ROOT = Path(__file__).resolve().parents[1] +sys.path.insert(0, str(PROJECT_ROOT)) + +from tools.account_activity_monitor import App + + +def _make_bare_app() -> App: + """Instantiate App without running __init__ (no Tk root required). + + Wires up only the attributes _snapshot_filters touches. + """ + app = App.__new__(App) + app._active_categories = {f"CAT_{i}" for i in range(20)} + app._active_severities = {"CRITICAL", "WARNING", "INFO"} + app._filter_lock = threading.RLock() + return app + + +def test_snapshot_filters_returns_frozensets(): + app = _make_bare_app() + cats, sevs = app._snapshot_filters() + assert isinstance(cats, frozenset) + assert isinstance(sevs, frozenset) + assert cats == app._active_categories + assert sevs == app._active_severities + + +def test_snapshot_is_isolated_from_later_mutation(): + app = _make_bare_app() + cats, sevs = app._snapshot_filters() + with app._filter_lock: + app._active_categories.add("NEW_CAT") + app._active_severities.discard("INFO") + assert "NEW_CAT" not in cats + assert "INFO" in sevs + + +def test_snapshot_survives_concurrent_mutation(): + """Reader snapshots while writer mutates — no RuntimeError, no partial reads.""" + app = _make_bare_app() + stop = threading.Event() + errors: list = [] + + def writer(): + toggle = True + while not stop.is_set(): + with app._filter_lock: + if toggle: + app._active_categories.add("RACE_CAT") + app._active_severities.discard("INFO") + else: + app._active_categories.discard("RACE_CAT") + app._active_severities.add("INFO") + toggle = not toggle + + def reader(): + try: + for _ in range(5000): + cats, sevs = app._snapshot_filters() + # Force iteration to surface any copy-mid-mutation corruption. + _ = sum(1 for _ in cats) + sum(1 for _ in sevs) + except Exception as e: # pragma: no cover — only fires on regression + errors.append(e) + + w = threading.Thread(target=writer, daemon=True) + r = threading.Thread(target=reader) + w.start() + r.start() + r.join(timeout=10) + stop.set() + w.join(timeout=2) + assert not errors, f"Concurrent snapshot raised: {errors}" + + +def test_snapshot_result_is_immutable(): + """Returned frozensets must reject mutation — no accidental shared state.""" + app = _make_bare_app() + cats, sevs = app._snapshot_filters() + import pytest + with pytest.raises(AttributeError): + cats.add("x") # type: ignore[attr-defined] + with pytest.raises(AttributeError): + sevs.discard("INFO") # type: ignore[attr-defined] diff --git a/tests/test_claude_usage_monitor.py b/tests/test_claude_usage_monitor.py index 7c13252..ff7317f 100644 --- a/tests/test_claude_usage_monitor.py +++ b/tests/test_claude_usage_monitor.py @@ -376,3 +376,283 @@ def test_versioned_suffix_matches_longest_key(self): def test_empty_model_returns_default(self): assert _get_pricing("") is not None assert _get_pricing("") == _get_pricing("claude-sonnet-4-6") + + +# ============================================================================= +# T1 — mtime-cache for JSONL parsing +# ============================================================================= + +from tools.claude_usage_monitor import ( # noqa: E402 + _parse_session_file_cached, + _SESSION_CACHE, +) + + +class TestSessionMtimeCache: + """_parse_session_file_cached must skip re-parse on unchanged mtime.""" + + def _write_jsonl(self, path: Path) -> None: + # Minimal assistant turn so _parse_session_file produces a session dict. + line = { + "type": "assistant", + "timestamp": "2026-04-20T10:00:00Z", + "sessionId": "sid-mtime", + "cwd": str(path.parent), + "message": { + "model": "claude-sonnet-4-6", + "usage": { + "input_tokens": 100, "output_tokens": 50, + "cache_read_input_tokens": 0, + "cache_creation_input_tokens": 0, + }, + }, + } + import json as _json + path.write_text(_json.dumps(line) + "\n", encoding="utf-8") + + def test_cache_hit_returns_same_object(self, tmp_path): + _SESSION_CACHE.clear() + f = tmp_path / "s.jsonl" + self._write_jsonl(f) + first = _parse_session_file_cached(str(f)) + second = _parse_session_file_cached(str(f)) + assert first is second # cached dict reused by identity + + def test_cache_invalidates_on_mtime_change(self, tmp_path): + import os as _os, time as _time + _SESSION_CACHE.clear() + f = tmp_path / "s.jsonl" + self._write_jsonl(f) + first = _parse_session_file_cached(str(f)) + # Bump mtime by 2s to guarantee filesystem granularity picks it up. + new_mtime = _os.path.getmtime(f) + 2 + _os.utime(f, (new_mtime, new_mtime)) + second = _parse_session_file_cached(str(f)) + assert first is not second # re-parsed, fresh object + + def test_missing_file_falls_through_without_caching(self, tmp_path): + _SESSION_CACHE.clear() + missing = tmp_path / "nope.jsonl" + result = _parse_session_file_cached(str(missing)) + # Missing files produce a session dict with 0 turns, not a crash. + assert result["assistant_turns"] == 0 + assert str(missing) not in _SESSION_CACHE + + +# ============================================================================= +# T4 — time-window filter bucketing +# ============================================================================= + +from tools.claude_usage_monitor import ClaudeUsageMonitor # noqa: E402 + + +class TestWindowFilter: + """_session_in_window must bucket sessions against Today/Week/Month cutoffs.""" + + def _monitor(self, window: str): + obj = object.__new__(ClaudeUsageMonitor) + obj._window = window + return obj + + def _session(self, ts: datetime) -> dict: + return {"last_timestamp": ts.isoformat().replace("+00:00", "Z")} + + def test_all_passes_everything(self): + m = self._monitor("All") + ancient = self._session(datetime(2000, 1, 1, tzinfo=timezone.utc)) + assert m._session_in_window(ancient) is True + + def test_today_excludes_yesterday(self): + m = self._monitor("Today") + now = datetime.now(timezone.utc) + yesterday = self._session(now - timedelta(days=1)) + assert m._session_in_window(yesterday) is False + + def test_today_includes_current_hour(self): + m = self._monitor("Today") + now = datetime.now(timezone.utc) + assert m._session_in_window(self._session(now)) is True + + def test_week_bucketing(self): + m = self._monitor("Week") + now = datetime.now(timezone.utc) + assert m._session_in_window(self._session(now - timedelta(days=3))) is True + assert m._session_in_window(self._session(now - timedelta(days=8))) is False + + def test_month_bucketing(self): + m = self._monitor("Month") + now = datetime.now(timezone.utc) + assert m._session_in_window(self._session(now - timedelta(days=20))) is True + assert m._session_in_window(self._session(now - timedelta(days=31))) is False + + def test_missing_timestamp_excluded_when_filtered(self): + m = self._monitor("Today") + assert m._session_in_window({}) is False + + def test_missing_timestamp_included_on_all(self): + m = self._monitor("All") + # 'All' short-circuits before timestamp parsing. + assert m._session_in_window({}) is True + + +# ============================================================================= +# T10 — notification dedup state +# ============================================================================= + +from tools.claude_usage_monitor import ( # noqa: E402 + _rotate_level, + _should_notify, + _record_notified, + _evict_inactive_notifs, +) + + +class TestRotateLevel: + def test_below_amber_is_none(self): + assert _rotate_level(0) == "none" + assert _rotate_level(29.9) == "none" + + def test_amber_threshold(self): + assert _rotate_level(30) == "amber" + assert _rotate_level(59.9) == "amber" + + def test_red_threshold(self): + assert _rotate_level(60) == "red" + assert _rotate_level(100) == "red" + + +class TestShouldNotify: + """Fires only on tier upgrades; downgrades stay silent.""" + + def test_first_alert_fires(self): + state: dict = {} + assert _should_notify(state, "s1", "amber") is True + assert _should_notify(state, "s1", "red") is True + + def test_none_level_never_fires(self): + assert _should_notify({}, "s1", "none") is False + + def test_same_level_no_refire(self): + state = {"s1": {"level": "amber", "fired_at": "x"}} + assert _should_notify(state, "s1", "amber") is False + + def test_upgrade_amber_to_red_fires(self): + state = {"s1": {"level": "amber", "fired_at": "x"}} + assert _should_notify(state, "s1", "red") is True + + def test_downgrade_red_to_amber_is_silent(self): + state = {"s1": {"level": "red", "fired_at": "x"}} + assert _should_notify(state, "s1", "amber") is False + + def test_downgrade_to_none_is_silent(self): + state = {"s1": {"level": "red", "fired_at": "x"}} + assert _should_notify(state, "s1", "none") is False + + def test_independent_sessions_tracked_separately(self): + state = {"s1": {"level": "red", "fired_at": "x"}} + assert _should_notify(state, "s2", "amber") is True + + +class TestRecordNotified: + def test_record_stamps_level_and_timestamp(self): + state: dict = {} + _record_notified(state, "s1", "amber") + assert state["s1"]["level"] == "amber" + assert "fired_at" in state["s1"] + + def test_record_overwrites_prior_entry(self): + state: dict = {} + _record_notified(state, "s1", "amber") + _record_notified(state, "s1", "red") + assert state["s1"]["level"] == "red" + + +# ============================================================================= +# Cost composition + turn stats (Session Detail) +# ============================================================================= + +from tools.claude_usage_monitor import ( # noqa: E402 + _cost_composition, + _turn_cost_stats, +) + + +class TestCostComposition: + """_cost_composition splits session cost into four token-type buckets.""" + + def test_empty_session_returns_zero_total(self): + comp = _cost_composition([]) + assert comp == {"input": 0.0, "output": 0.0, "cache_read": 0.0, "cache_write": 0.0, "total": 0.0} + + def test_single_turn_sonnet_46_pricing(self): + # 1M input, 1M output, 1M cache_read, 1M cache_write (5m rate) + # sonnet-4-6: 3 + 15 + 0.30 + 3.75 = $22.05 + turns = [("t", 0.0, 1_000_000, 1_000_000, 1_000_000, 1_000_000, "claude-sonnet-4-6")] + comp = _cost_composition(turns) + assert comp["input"] == 3.0 + assert comp["output"] == 15.0 + assert comp["cache_read"] == 0.30 + assert comp["cache_write"] == 3.75 + assert abs(comp["total"] - 22.05) < 1e-6 + + def test_multi_model_sums_per_turn(self): + turns = [ + ("t", 0.0, 1_000_000, 0, 0, 0, "claude-sonnet-4-6"), # 3.0 input + ("t", 0.0, 1_000_000, 0, 0, 0, "claude-opus-4-6"), # 5.0 input + ] + comp = _cost_composition(turns) + assert comp["input"] == 8.0 + assert comp["output"] == 0.0 + + def test_unknown_model_uses_default_pricing(self): + turns = [("t", 0.0, 1_000_000, 0, 0, 0, "claude-unknown-99")] + comp = _cost_composition(turns) + # Default falls back to sonnet-4-6 -> $3 for 1M input + assert comp["input"] == 3.0 + + +class TestTurnCostStats: + def test_none_when_no_turns(self): + assert _turn_cost_stats([]) is None + + def test_single_turn_all_same(self): + turns = [("t", 0.75, 0, 0, 0, 0, "m")] + stats = _turn_cost_stats(turns) + assert stats == {"first": 0.75, "last": 0.75, "avg": 0.75, "peak": 0.75, "n": 1} + + def test_first_last_avg_peak(self): + turns = [ + ("t", 0.1, 0, 0, 0, 0, "m"), + ("t", 0.5, 0, 0, 0, 0, "m"), + ("t", 0.2, 0, 0, 0, 0, "m"), + ] + stats = _turn_cost_stats(turns) + assert stats["first"] == 0.1 + assert stats["last"] == 0.2 + assert stats["peak"] == 0.5 + assert abs(stats["avg"] - 0.2666) < 0.001 + assert stats["n"] == 3 + + +class TestEvictInactiveNotifs: + def test_keeps_live_sessions(self): + state = { + "s1": {"level": "red", "fired_at": "x"}, + "s2": {"level": "amber", "fired_at": "y"}, + } + _evict_inactive_notifs(state, {"s1", "s2"}) + assert set(state) == {"s1", "s2"} + + def test_drops_sessions_no_longer_live(self): + state = { + "s1": {"level": "red", "fired_at": "x"}, + "s2": {"level": "amber", "fired_at": "y"}, + } + _evict_inactive_notifs(state, {"s1"}) + assert set(state) == {"s1"} + + def test_mutates_in_place_and_returns_same_dict(self): + state = {"s1": {"level": "red", "fired_at": "x"}} + returned = _evict_inactive_notifs(state, set()) + assert returned is state + assert state == {} diff --git a/tests/test_common_config.py b/tests/test_common_config.py new file mode 100644 index 0000000..36927e8 --- /dev/null +++ b/tests/test_common_config.py @@ -0,0 +1,171 @@ +"""Unit tests for ``tools._common.config``.""" + +from __future__ import annotations + +import importlib +from pathlib import Path + +import pytest + +from tools._common import config as cfg + + +@pytest.fixture(autouse=True) +def _skip_dotenv_load(monkeypatch: pytest.MonkeyPatch) -> None: + """Skip real ``.env`` loading unless a test opts in.""" + monkeypatch.setattr(cfg, "_DOTENV_LOADED", True) + + +class TestGetConfig: + def test_env_var_returned_when_set( + self, monkeypatch: pytest.MonkeyPatch + ) -> None: + monkeypatch.setenv("AUTOMATIONS_TEST_KEY", "hello") + assert cfg.get_config("AUTOMATIONS_TEST_KEY") == "hello" + + def test_default_returned_when_unset( + self, monkeypatch: pytest.MonkeyPatch + ) -> None: + monkeypatch.delenv("AUTOMATIONS_TEST_KEY", raising=False) + assert cfg.get_config("AUTOMATIONS_TEST_KEY", "fallback") == "fallback" + + def test_none_returned_when_unset_and_no_default( + self, monkeypatch: pytest.MonkeyPatch + ) -> None: + monkeypatch.delenv("AUTOMATIONS_MISSING", raising=False) + assert cfg.get_config("AUTOMATIONS_MISSING") is None + + def test_empty_env_value_is_returned_verbatim( + self, monkeypatch: pytest.MonkeyPatch + ) -> None: + monkeypatch.setenv("AUTOMATIONS_EMPTY", "") + assert cfg.get_config("AUTOMATIONS_EMPTY", "fallback") == "" + + +class TestGetBool: + @pytest.mark.parametrize("raw", ["1", "true", "TRUE", "yes", "on", "y", "t"]) + def test_truthy_tokens( + self, monkeypatch: pytest.MonkeyPatch, raw: str + ) -> None: + monkeypatch.setenv("AUTOMATIONS_FLAG", raw) + assert cfg.get_bool("AUTOMATIONS_FLAG") is True + + @pytest.mark.parametrize("raw", ["0", "false", "no", "off", "", "F"]) + def test_falsy_tokens( + self, monkeypatch: pytest.MonkeyPatch, raw: str + ) -> None: + monkeypatch.setenv("AUTOMATIONS_FLAG", raw) + assert cfg.get_bool("AUTOMATIONS_FLAG", default=True) is False + + def test_unset_returns_default( + self, monkeypatch: pytest.MonkeyPatch + ) -> None: + monkeypatch.delenv("AUTOMATIONS_FLAG", raising=False) + assert cfg.get_bool("AUTOMATIONS_FLAG", default=True) is True + assert cfg.get_bool("AUTOMATIONS_FLAG", default=False) is False + + def test_unknown_value_falls_back_to_default( + self, + monkeypatch: pytest.MonkeyPatch, + caplog: pytest.LogCaptureFixture, + ) -> None: + monkeypatch.setenv("AUTOMATIONS_FLAG", "maybe") + with caplog.at_level("WARNING", logger=cfg.__name__): + assert cfg.get_bool("AUTOMATIONS_FLAG", default=True) is True + assert any("not a recognized bool" in r.message for r in caplog.records) + + +class TestGetPath: + def test_env_value_expands_tilde( + self, monkeypatch: pytest.MonkeyPatch + ) -> None: + monkeypatch.setenv("AUTOMATIONS_DIR", "~/custom") + result = cfg.get_path("AUTOMATIONS_DIR", default="/unused") + assert result == Path("~/custom").expanduser() + + def test_default_as_string_expands( + self, monkeypatch: pytest.MonkeyPatch + ) -> None: + monkeypatch.delenv("AUTOMATIONS_DIR", raising=False) + result = cfg.get_path("AUTOMATIONS_DIR", default="~/videos") + assert result == Path("~/videos").expanduser() + + def test_default_as_path_returned_unchanged( + self, monkeypatch: pytest.MonkeyPatch, tmp_path: Path + ) -> None: + monkeypatch.delenv("AUTOMATIONS_DIR", raising=False) + result = cfg.get_path("AUTOMATIONS_DIR", default=tmp_path) + assert result == tmp_path + + +class TestDotenvLoading: + """Exercise the one-shot ``.env`` loader in isolation.""" + + def test_dotenv_file_loaded_when_present( + self, + monkeypatch: pytest.MonkeyPatch, + tmp_path: Path, + ) -> None: + pytest.importorskip("dotenv") + + env_file = tmp_path / ".env" + env_file.write_text("AUTOMATIONS_FROM_DOTENV=via_file\n", encoding="utf-8") + + monkeypatch.setattr(cfg, "REPO_ROOT", tmp_path) + monkeypatch.setattr(cfg, "_DOTENV_LOADED", False) + monkeypatch.delenv("AUTOMATIONS_FROM_DOTENV", raising=False) + + assert cfg.get_config("AUTOMATIONS_FROM_DOTENV") == "via_file" + + def test_real_env_beats_dotenv( + self, + monkeypatch: pytest.MonkeyPatch, + tmp_path: Path, + ) -> None: + pytest.importorskip("dotenv") + + env_file = tmp_path / ".env" + env_file.write_text("AUTOMATIONS_WINS=from_dotenv\n", encoding="utf-8") + + monkeypatch.setattr(cfg, "REPO_ROOT", tmp_path) + monkeypatch.setattr(cfg, "_DOTENV_LOADED", False) + monkeypatch.setenv("AUTOMATIONS_WINS", "from_env") + + assert cfg.get_config("AUTOMATIONS_WINS") == "from_env" + + def test_missing_dotenv_file_is_noop( + self, + monkeypatch: pytest.MonkeyPatch, + tmp_path: Path, + ) -> None: + monkeypatch.setattr(cfg, "REPO_ROOT", tmp_path) + monkeypatch.setattr(cfg, "_DOTENV_LOADED", False) + monkeypatch.delenv("AUTOMATIONS_NONE", raising=False) + + assert cfg.get_config("AUTOMATIONS_NONE", "fallback") == "fallback" + + def test_load_is_idempotent( + self, + monkeypatch: pytest.MonkeyPatch, + tmp_path: Path, + ) -> None: + call_count = {"n": 0} + + def fake_loader(*args: object, **kwargs: object) -> bool: + call_count["n"] += 1 + return True + + fake_dotenv = importlib.util.module_from_spec( + importlib.util.spec_from_loader("dotenv", loader=None) # type: ignore[arg-type] + ) + fake_dotenv.load_dotenv = fake_loader # type: ignore[attr-defined] + + monkeypatch.setitem(__import__("sys").modules, "dotenv", fake_dotenv) + monkeypatch.setattr(cfg, "REPO_ROOT", tmp_path) + monkeypatch.setattr(cfg, "_DOTENV_LOADED", False) + (tmp_path / ".env").write_text("X=1", encoding="utf-8") + + cfg.get_config("X") + cfg.get_config("X") + cfg.get_config("X") + assert call_count["n"] == 1 diff --git a/tests/test_common_paths.py b/tests/test_common_paths.py new file mode 100644 index 0000000..0bff734 --- /dev/null +++ b/tests/test_common_paths.py @@ -0,0 +1,64 @@ +"""Tests for tools._common.paths — repo-layout constants.""" + +import sys +from pathlib import Path + +PROJECT_ROOT = Path(__file__).resolve().parents[1] +sys.path.insert(0, str(PROJECT_ROOT)) + +from tools._common.paths import ( # noqa: E402 + AUDITS_DIR, + COMMON_DIR, + PLANS_DIR, + PORTABLE_DIR, + REPO_ROOT, + TESTS_DIR, + TOOLS_DIR, +) + + +# ── Derivation sanity ──────────────────────────────────────────────── + +def test_repo_root_points_at_project_root(): + """REPO_ROOT matches the conftest-level project root.""" + assert REPO_ROOT == PROJECT_ROOT + + +def test_repo_root_contains_expected_markers(): + """CLAUDE.md and the tools package live at the repo root.""" + assert (REPO_ROOT / "CLAUDE.md").is_file() + assert (REPO_ROOT / "tools").is_dir() + + +def test_all_constants_are_absolute_paths(): + """Every exported constant is an absolute Path (no relative leakage).""" + for p in ( + AUDITS_DIR, + COMMON_DIR, + PLANS_DIR, + PORTABLE_DIR, + REPO_ROOT, + TESTS_DIR, + TOOLS_DIR, + ): + assert isinstance(p, Path) + assert p.is_absolute() + + +def test_directory_relationships(): + """Nested constants resolve inside their parent constants.""" + assert TOOLS_DIR.parent == REPO_ROOT + assert COMMON_DIR.parent == TOOLS_DIR + assert TESTS_DIR.parent == REPO_ROOT + assert PLANS_DIR.parent == REPO_ROOT + assert AUDITS_DIR.parent == REPO_ROOT + assert PORTABLE_DIR.parent == REPO_ROOT + + +def test_existing_dirs_are_present_on_disk(): + """Dirs we know exist today pass is_dir(). Plans/audits may be + user-local, so we don't require them.""" + assert TOOLS_DIR.is_dir() + assert COMMON_DIR.is_dir() + assert TESTS_DIR.is_dir() + assert PORTABLE_DIR.is_dir() diff --git a/tests/test_common_subprocess.py b/tests/test_common_subprocess.py new file mode 100644 index 0000000..71e617a --- /dev/null +++ b/tests/test_common_subprocess.py @@ -0,0 +1,113 @@ +"""Tests for tools._common.subprocess — hidden-window helpers. + +Uses monkeypatch over the stdlib to assert flag merging without +spawning real processes (keeps the suite fast and sandbox-friendly). +""" + +import subprocess as _stdlib_subprocess +import sys +from pathlib import Path +from types import SimpleNamespace + +PROJECT_ROOT = Path(__file__).resolve().parents[1] +sys.path.insert(0, str(PROJECT_ROOT)) + +from tools._common import subprocess as common_subprocess # noqa: E402 +from tools._common.subprocess import ( # noqa: E402 + CREATE_NO_WINDOW, + popen_hidden, + run_hidden, +) + + +# ── CREATE_NO_WINDOW constant ───────────────────────────────────────── + +def test_create_no_window_matches_winapi_on_windows(): + """On Windows the constant equals the documented WinAPI value.""" + if sys.platform == "win32": + assert CREATE_NO_WINDOW == 0x08000000 + else: + assert CREATE_NO_WINDOW == 0 + + +# ── run_hidden flag merging ─────────────────────────────────────────── + +def test_run_hidden_sets_create_no_window(monkeypatch): + """run_hidden OR-merges CREATE_NO_WINDOW into creationflags.""" + captured = {} + + def fake_run(cmd, **kwargs): + captured["cmd"] = cmd + captured["kwargs"] = kwargs + return SimpleNamespace(returncode=0, stdout=b"", stderr=b"") + + monkeypatch.setattr(common_subprocess._stdlib_subprocess, "run", fake_run) + + run_hidden(["echo", "hi"]) + + assert captured["cmd"] == ["echo", "hi"] + assert captured["kwargs"]["creationflags"] == CREATE_NO_WINDOW + + +def test_run_hidden_merges_caller_flags(monkeypatch): + """Caller-supplied creationflags are preserved alongside CREATE_NO_WINDOW.""" + captured = {} + + def fake_run(cmd, **kwargs): + captured["kwargs"] = kwargs + return SimpleNamespace(returncode=0) + + monkeypatch.setattr(common_subprocess._stdlib_subprocess, "run", fake_run) + + # CREATE_NEW_PROCESS_GROUP is 0x00000200 — unrelated to CREATE_NO_WINDOW. + extra = 0x00000200 + run_hidden(["echo"], creationflags=extra) + + flags = captured["kwargs"]["creationflags"] + assert flags & CREATE_NO_WINDOW == CREATE_NO_WINDOW + assert flags & extra == extra + + +def test_run_hidden_passes_through_other_kwargs(monkeypatch): + """Non-creationflags kwargs (timeout, capture_output...) reach stdlib.""" + captured = {} + + def fake_run(cmd, **kwargs): + captured["kwargs"] = kwargs + return SimpleNamespace(returncode=0) + + monkeypatch.setattr(common_subprocess._stdlib_subprocess, "run", fake_run) + + run_hidden(["x"], timeout=5, capture_output=True, text=True) + + assert captured["kwargs"]["timeout"] == 5 + assert captured["kwargs"]["capture_output"] is True + assert captured["kwargs"]["text"] is True + + +# ── popen_hidden mirrors run_hidden ─────────────────────────────────── + +def test_popen_hidden_sets_create_no_window(monkeypatch): + """popen_hidden applies the same creationflags merge as run_hidden.""" + captured = {} + + class FakePopen: + def __init__(self, cmd, **kwargs): + captured["cmd"] = cmd + captured["kwargs"] = kwargs + + monkeypatch.setattr(common_subprocess._stdlib_subprocess, "Popen", FakePopen) + + popen_hidden(["echo"]) + + assert captured["cmd"] == ["echo"] + assert captured["kwargs"]["creationflags"] == CREATE_NO_WINDOW + + +# ── stdlib subprocess remains the stdlib one ───────────────────────── + +def test_stdlib_subprocess_still_resolves_from_top_level(): + """Importing tools._common.subprocess must not clobber the stdlib + module for other callers (absolute-imports sanity check).""" + assert hasattr(_stdlib_subprocess, "run") + assert hasattr(_stdlib_subprocess, "Popen") diff --git a/tests/test_common_threadsafe.py b/tests/test_common_threadsafe.py index 3da3b3b..26bf0b1 100644 --- a/tests/test_common_threadsafe.py +++ b/tests/test_common_threadsafe.py @@ -222,7 +222,7 @@ def test_bounded_deque_no_runtime_error_during_slow_iteration(): raise zero exceptions. This test also covers the self.incidents migration in - tools/NETWORK STABILITY MONITOR.py (see A5 commit). + tools/network_stability_monitor.py (see A5 commit). """ APPEND_COUNT = 2_000 bd = BoundedDeque(maxlen=500) diff --git a/tests/test_decision_dice.py b/tests/test_decision_dice.py new file mode 100644 index 0000000..a18ee1b --- /dev/null +++ b/tests/test_decision_dice.py @@ -0,0 +1,165 @@ +"""Tests for tools/decision_dice.py — pure helpers + JSON persistence (Plan B / B4). + +Covers: + * Color helpers: hex_to_rgb, rgb_to_hex (with clamp), lerp_color. + * build_pool: weighted sampling list + zero-total fallback. + * load_profiles / save_profiles / load_journal / save_journal: + missing-file, happy roundtrip, corrupt-JSON, and the journal cap. + +No Tk/CTk instantiation — only import-level side-effects are exercised. +""" + +from __future__ import annotations + +import json +import sys +from pathlib import Path + +import pytest + +PROJECT_ROOT = Path(__file__).resolve().parents[1] +sys.path.insert(0, str(PROJECT_ROOT)) + +from tools import decision_dice as dd # noqa: E402 + + +# ── Color helpers ──────────────────────────────────────────────────── + +def test_hex_to_rgb_accepts_with_and_without_hash(): + assert dd.hex_to_rgb("#ff0000") == (255, 0, 0) + assert dd.hex_to_rgb("00ff00") == (0, 255, 0) + assert dd.hex_to_rgb("#0000ff") == (0, 0, 255) + + +def test_rgb_to_hex_clamps_out_of_range_values(): + assert dd.rgb_to_hex(300, -10, 128) == "#ff0080" + assert dd.rgb_to_hex(0, 0, 0) == "#000000" + assert dd.rgb_to_hex(255, 255, 255) == "#ffffff" + + +def test_rgb_to_hex_floors_floats(): + assert dd.rgb_to_hex(127.9, 127.1, 127.5) == "#7f7f7f" + + +def test_lerp_color_endpoints_and_midpoint(): + assert dd.lerp_color("#000000", "#ffffff", 0.0) == "#000000" + assert dd.lerp_color("#000000", "#ffffff", 1.0) == "#ffffff" + mid = dd.lerp_color("#000000", "#ffffff", 0.5) + # Midpoint rounds down via int(); 127 is expected. + assert mid == "#7f7f7f" + + +# ── build_pool ─────────────────────────────────────────────────────── + +def test_build_pool_expands_by_weight(): + outcomes = [ + {"label": "A", "weight": 3}, + {"label": "B", "weight": 1}, + ] + pool = dd.build_pool(outcomes) + labels = [o["label"] for o in pool] + assert labels.count("A") == 3 + assert labels.count("B") == 1 + assert len(pool) == 4 + + +def test_build_pool_all_zero_weights_falls_back_to_first(): + outcomes = [ + {"label": "A", "weight": 0}, + {"label": "B", "weight": 0}, + ] + pool = dd.build_pool(outcomes) + assert pool == [outcomes[0]] + + +def test_build_pool_treats_negative_weight_as_zero(): + outcomes = [ + {"label": "A", "weight": -5}, + {"label": "B", "weight": 2}, + ] + pool = dd.build_pool(outcomes) + assert [o["label"] for o in pool] == ["B", "B"] + + +# ── Profile persistence ────────────────────────────────────────────── + +def test_load_profiles_returns_defaults_when_file_missing(tmp_path, monkeypatch): + monkeypatch.setattr(dd, "PROFILES_PATH", str(tmp_path / "missing.json")) + profiles = dd.load_profiles() + assert profiles == dict(dd.DEFAULT_PROFILES) + # Returned dict must be independent of the module-level constant. + profiles["new"] = [1, 2, 3, 4] + assert "new" not in dd.DEFAULT_PROFILES + + +def test_save_and_load_profiles_roundtrip(tmp_path, monkeypatch): + path = tmp_path / "profiles.json" + monkeypatch.setattr(dd, "PROFILES_PATH", str(path)) + payload = {"Custom": [10, 20, 30, 40]} + dd.save_profiles(payload) + assert path.is_file() + assert dd.load_profiles() == payload + + +def test_load_profiles_returns_defaults_on_corrupt_json(tmp_path, monkeypatch): + path = tmp_path / "profiles.json" + path.write_text("{not valid json", encoding="utf-8") + monkeypatch.setattr(dd, "PROFILES_PATH", str(path)) + assert dd.load_profiles() == dict(dd.DEFAULT_PROFILES) + + +# ── Journal persistence ────────────────────────────────────────────── + +def test_load_journal_returns_empty_when_file_missing(tmp_path, monkeypatch): + monkeypatch.setattr(dd, "JOURNAL_PATH", str(tmp_path / "missing.json")) + assert dd.load_journal() == [] + + +def test_save_journal_caps_history_at_500_entries(tmp_path, monkeypatch): + path = tmp_path / "journal.json" + monkeypatch.setattr(dd, "JOURNAL_PATH", str(path)) + entries = [{"i": i} for i in range(750)] + dd.save_journal(entries) + persisted = json.loads(path.read_text(encoding="utf-8")) + assert len(persisted) == 500 + # Cap keeps the tail, not the head. + assert persisted[0] == {"i": 250} + assert persisted[-1] == {"i": 749} + + +def test_load_journal_returns_empty_on_corrupt_json(tmp_path, monkeypatch): + path = tmp_path / "journal.json" + path.write_text("garbage", encoding="utf-8") + monkeypatch.setattr(dd, "JOURNAL_PATH", str(path)) + assert dd.load_journal() == [] + + +# ── play_sound ─────────────────────────────────────────────────────── + +def test_play_sound_unknown_kind_is_noop(): + # Unknown kind returns before spawning a thread; must not raise. + dd.play_sound("not-a-real-sound-kind") + + +def test_play_sound_known_kind_spawns_daemon_thread(monkeypatch): + """Known kind spawns a background thread — patched Beep keeps the test silent.""" + calls: list[tuple[int, int]] = [] + + def fake_beep(freq: int, dur: int) -> None: + calls.append((freq, dur)) + + monkeypatch.setattr(dd, "_beep_thread", fake_beep) + dd.play_sound("tick") + # Thread is daemon; join via a tight poll so test stays fast. + import threading + import time + deadline = time.time() + 1.0 + while not calls and time.time() < deadline: + time.sleep(0.01) + assert calls, "expected _beep_thread to be invoked by play_sound" + # The only 'tick' note is (600, 15). + assert calls[0] == (600, 15) + # No leaked non-daemon threads. + for t in threading.enumerate(): + if t is not threading.main_thread(): + assert t.daemon diff --git a/tests/test_ffmpeg_path_validation.py b/tests/test_ffmpeg_path_validation.py new file mode 100644 index 0000000..e11a063 --- /dev/null +++ b/tests/test_ffmpeg_path_validation.py @@ -0,0 +1,101 @@ +"""Tests for _validate_output_dir + L1 path-traversal regression. + +Covers the validator's core rejection rules and runs the L1 PoC as an +end-to-end regression guard. If either the unit tests or the PoC +regression fails, the L1 vulnerability is back. + +Related audit: audits/2026-04-17-security-probe.md §L1. +""" + +from __future__ import annotations + +import subprocess +import sys +from pathlib import Path + +import pytest + +PROJECT_ROOT = Path(__file__).resolve().parents[1] +sys.path.insert(0, str(PROJECT_ROOT)) + +from tools.ffmpeg_studio import ( # noqa: E402 + OutputDirError, + _validate_output_dir, +) + + +# ── Validator unit tests ──────────────────────────────────────────── + + +def test_empty_string_rejected(): + with pytest.raises(OutputDirError): + _validate_output_dir("") + + +def test_whitespace_only_rejected(): + with pytest.raises(OutputDirError): + _validate_output_dir(" ") + + +def test_none_rejected(): + with pytest.raises(OutputDirError): + _validate_output_dir(None) + + +def test_backslash_traversal_rejected(): + with pytest.raises(OutputDirError, match=r"'\.\.'"): + _validate_output_dir("..\\..\\..\\Windows\\System32") + + +def test_forward_slash_traversal_rejected(): + with pytest.raises(OutputDirError, match=r"'\.\.'"): + _validate_output_dir("../../Windows/System32") + + +def test_relative_path_rejected(): + with pytest.raises(OutputDirError, match="absolute"): + _validate_output_dir("my_videos") + + +def test_absolute_path_without_traversal_accepted(tmp_path): + result = _validate_output_dir(str(tmp_path)) + assert result == tmp_path.resolve() + + +def test_allowed_bases_accept_inside(tmp_path): + inside = tmp_path / "inside" + inside.mkdir() + result = _validate_output_dir(str(inside), allowed_bases=[tmp_path]) + assert result == inside.resolve() + + +def test_allowed_bases_reject_outside(tmp_path): + base = tmp_path / "legit" + outside = tmp_path / "victim" + base.mkdir() + outside.mkdir() + with pytest.raises(OutputDirError, match="must be inside"): + _validate_output_dir(str(outside), allowed_bases=[base]) + + +# ── L1 PoC regression guard ───────────────────────────────────────── + + +def test_l1_poc_is_blocked(): + """End-to-end: L1 PoC must print EXPLOIT BLOCKED + exit 1 after fix.""" + poc = PROJECT_ROOT / "security" / "poc" / "L1_2026-04-17.py" + assert poc.exists(), f"PoC missing at {poc}" + + result = subprocess.run( + [sys.executable, str(poc)], + capture_output=True, + text=True, + timeout=30, + ) + assert "EXPLOIT BLOCKED" in result.stdout, ( + f"L1 PoC did not block:\nstdout={result.stdout}\nstderr={result.stderr}" + ) + assert result.returncode == 1, ( + f"L1 PoC exit code was {result.returncode}, expected 1. " + f"stdout={result.stdout}" + ) diff --git a/tests/test_ffmpeg_screenshot_path_validation.py b/tests/test_ffmpeg_screenshot_path_validation.py new file mode 100644 index 0000000..706fa5e --- /dev/null +++ b/tests/test_ffmpeg_screenshot_path_validation.py @@ -0,0 +1,62 @@ +"""Tests for L2 screenshot-path-traversal regression. + +Validator is shared with L1 (see ``test_ffmpeg_path_validation.py`` for +the full validator unit-test matrix). This file focuses on the L2 PoC +regression + L2-specific payloads from the audit. + +Related audit: audits/2026-04-17-security-probe.md §L2. +""" + +from __future__ import annotations + +import subprocess +import sys +from pathlib import Path + +import pytest + +PROJECT_ROOT = Path(__file__).resolve().parents[1] +sys.path.insert(0, str(PROJECT_ROOT)) + +from tools.ffmpeg_studio import ( # noqa: E402 + OutputDirError, + _validate_output_dir, +) + + +def test_l2_documented_payload_rejected(): + """The exact '../../Windows/Temp' string from the audit must be rejected.""" + with pytest.raises(OutputDirError): + _validate_output_dir("../../Windows/Temp") + + +def test_l2_sandbox_containment(tmp_path): + """Sibling-of-base traversal (same shape as the PoC) must be rejected.""" + sandbox = tmp_path / "sandbox_pictures" + outside = tmp_path / "outside_victim" + sandbox.mkdir() + outside.mkdir() + + traversal = f"{sandbox}{chr(92)}..{chr(92)}outside_victim" + with pytest.raises(OutputDirError): + _validate_output_dir(traversal, allowed_bases=[sandbox]) + + +def test_l2_poc_is_blocked(): + """End-to-end: L2 PoC must print EXPLOIT BLOCKED + exit 1 after fix.""" + poc = PROJECT_ROOT / "security" / "poc" / "L2_2026-04-17.py" + assert poc.exists(), f"PoC missing at {poc}" + + result = subprocess.run( + [sys.executable, str(poc)], + capture_output=True, + text=True, + timeout=30, + ) + assert "EXPLOIT BLOCKED" in result.stdout, ( + f"L2 PoC did not block:\nstdout={result.stdout}\nstderr={result.stderr}" + ) + assert result.returncode == 1, ( + f"L2 PoC exit code was {result.returncode}, expected 1. " + f"stdout={result.stdout}" + ) diff --git a/tests/test_ffmpeg_studio.py b/tests/test_ffmpeg_studio.py new file mode 100644 index 0000000..f63e6d4 --- /dev/null +++ b/tests/test_ffmpeg_studio.py @@ -0,0 +1,238 @@ +"""Tests for tools/ffmpeg_studio.py — pure encoder/label helpers (Plan B / B4). + +Covers the subprocess-free helpers: + * _crf_label: CRF → quality-band label (incl. out-of-range fallback). + * _guess_ratio: common aspect-ratio detection + h=0 guard + custom fallback. + * _gpu_key_from_label / _codec_key_from_label / _chroma_depth_keys. + * _pix_fmt: chroma × depth × GPU dispatch incl. NVENC 444+10-bit quirk. + * _check_compat_warning: AMD AMF 4:4:4 unsupported table. + * _build_video_codec_args: encoder selection + preset + pix-fmt tail. + +Skipped here (need subprocess/FFmpeg mocking — out of scope for B4 smoke): + _test_encoder, _probe_hardware, _detect_audio_devices. +""" + +from __future__ import annotations + +import sys +from pathlib import Path + +import pytest + +PROJECT_ROOT = Path(__file__).resolve().parents[1] +sys.path.insert(0, str(PROJECT_ROOT)) + +from tools import ffmpeg_studio as fs # noqa: E402 + + +# ── _crf_label ─────────────────────────────────────────────────────── + +@pytest.mark.parametrize("val,expected", [ + (0, "Near-Lossless (huge files)"), + (10, "Near-Lossless (huge files)"), + (11, "Excellent"), + (18, "Excellent"), + (19, "Good \u2190 sweet spot"), + (23, "Good \u2190 sweet spot"), + (26, "Balanced"), + (33, "Balanced"), + (34, "Small File"), + (42, "Small File"), + (43, "Low Quality"), + (51, "Low Quality"), +]) +def test_crf_label_bucket_boundaries(val, expected): + assert fs._crf_label(val) == expected + + +def test_crf_label_out_of_range_returns_empty(): + assert fs._crf_label(-1) == "" + assert fs._crf_label(52) == "" + assert fs._crf_label(999) == "" + + +# ── _guess_ratio ───────────────────────────────────────────────────── + +def test_guess_ratio_zero_height_is_safe(): + assert fs._guess_ratio(1920, 0) == "?" + + +@pytest.mark.parametrize("w,h,expected", [ + (1920, 1080, "16:9"), + (3840, 2160, "16:9"), + (1080, 1920, "9:16"), + (1024, 768, "4:3"), + (600, 800, "3:4"), + (1000, 1000, "1:1"), + (2560, 1080, "21:9"), + (1080, 1350, "4:5"), + (1500, 1200, "5:4"), + (1500, 1000, "3:2"), + (1000, 1500, "2:3"), +]) +def test_guess_ratio_known_ratios(w, h, expected): + assert fs._guess_ratio(w, h) == expected + + +def test_guess_ratio_uncommon_falls_back_to_custom(): + # 1234×456 ≈ 2.706 — not within 0.04 of any listed ratio. + assert fs._guess_ratio(1234, 456) == "custom" + + +# ── _gpu_key_from_label ────────────────────────────────────────────── + +@pytest.mark.parametrize("label,expected", [ + ("NVIDIA GPU (NVENC) — H.264 + H.265", "nvidia"), + ("AMD GPU (AMF) — H.264 + H.265", "amd"), + ("Intel GPU (QSV)", "intel"), + ("CPU (software) — most compatible", "cpu"), + ("Some unknown label", "cpu"), +]) +def test_gpu_key_from_label(label, expected): + assert fs._gpu_key_from_label(label) == expected + + +# ── _codec_key_from_label ──────────────────────────────────────────── + +@pytest.mark.parametrize("label,expected", [ + ("H.264 (most compatible)", "h264"), + ("H.265 / HEVC (better quality, smaller files)", "h265"), + ("AV1 (newest, smallest)", "av1"), + ("Anything else", "h264"), +]) +def test_codec_key_from_label(label, expected): + assert fs._codec_key_from_label(label) == expected + + +# ── _chroma_depth_keys ─────────────────────────────────────────────── + +@pytest.mark.parametrize("chroma_label,depth_label,expected", [ + ("4:2:0 (standard — widest compatibility)", "8-bit", ("420", "8")), + ("4:2:2", "10-bit", ("422", "10")), + ("4:4:4", "8-bit", ("444", "8")), + ("4:4:4", "10-bit", ("444", "10")), + # Numeric-only shorthand also recognized. + ("422 sampling", "8", ("422", "8")), +]) +def test_chroma_depth_keys(chroma_label, depth_label, expected): + assert fs._chroma_depth_keys(chroma_label, depth_label) == expected + + +# ── _pix_fmt ───────────────────────────────────────────────────────── + +def test_pix_fmt_420_8bit_all_gpus(): + for gpu in ("nvidia", "amd", "intel", "cpu"): + assert fs._pix_fmt("4:2:0", "8-bit", gpu) == "yuv420p" + + +def test_pix_fmt_420_10bit_gpus_use_p010(): + assert fs._pix_fmt("4:2:0", "10-bit", "nvidia") == "p010le" + assert fs._pix_fmt("4:2:0", "10-bit", "amd") == "p010le" + assert fs._pix_fmt("4:2:0", "10-bit", "intel") == "p010le" + # CPU uses the sw fmt. + assert fs._pix_fmt("4:2:0", "10-bit", "cpu") == "yuv420p10le" + + +def test_pix_fmt_444_10bit_nvenc_uses_16le_quirk(): + """NVENC HEVC 444+10-bit requires yuv444p16le, not yuv444p10le.""" + assert fs._pix_fmt("4:4:4", "10-bit", "nvidia") == "yuv444p16le" + # AMD AMF falls back to p010le for 444+10-bit per the map. + assert fs._pix_fmt("4:4:4", "10-bit", "amd") == "p010le" + assert fs._pix_fmt("4:4:4", "10-bit", "cpu") == "yuv444p10le" + + +def test_pix_fmt_unknown_combo_falls_back_to_yuv420p(): + # Unknown depth → tuple defaults to ("yuv420p",)*4. + assert fs._pix_fmt("4:2:0", "12-bit", "nvidia") == "yuv420p" + + +# ── _check_compat_warning ──────────────────────────────────────────── + +def test_check_compat_warning_amd_amf_444_rejected(): + warn = fs._check_compat_warning("amd", "h264", "4:4:4", "8-bit") + assert warn is not None + assert "4:4:4" in warn + + +def test_check_compat_warning_known_supported_returns_none(): + assert fs._check_compat_warning("nvidia", "h264", "4:2:0", "8-bit") is None + assert fs._check_compat_warning("cpu", "h265", "4:4:4", "10-bit") is None + + +# ── _build_video_codec_args ────────────────────────────────────────── + +def test_build_cpu_h264_uses_libx264_with_crf_and_preset(): + args = fs._build_video_codec_args( + codec_key="h264", gpu_key="cpu", crf=23, + speed_label="Fast", fmt="yuv420p", + ) + assert args[:2] == ["-vcodec", "libx264"] + assert "-crf" in args and args[args.index("-crf") + 1] == "23" + assert "-preset" in args and args[args.index("-preset") + 1] == "fast" + assert args[-2:] == ["-pix_fmt", "yuv420p"] + + +def test_build_cpu_av1_uses_libsvtav1_crf_only_no_preset(): + args = fs._build_video_codec_args( + codec_key="av1", gpu_key="cpu", crf=30, + speed_label="Medium", fmt="yuv420p", + ) + assert args[:2] == ["-vcodec", "libsvtav1"] + assert "-crf" in args + # AV1 SVT path explicitly skips -preset. + assert "-preset" not in args + + +def test_build_nvidia_h264_uses_vbr_cq_and_nvenc_preset(): + args = fs._build_video_codec_args( + codec_key="h264", gpu_key="nvidia", crf=20, + speed_label="Ultrafast", fmt="yuv420p", + ) + assert args[:2] == ["-vcodec", "h264_nvenc"] + assert "-rc:v" in args and args[args.index("-rc:v") + 1] == "vbr" + assert "-cq:v" in args and args[args.index("-cq:v") + 1] == "20" + # Ultrafast → NVENC preset p1. + assert "-preset" in args and args[args.index("-preset") + 1] == "p1" + + +def test_build_amd_clamps_qp_to_0_51_and_uses_amf_quality(): + args = fs._build_video_codec_args( + codec_key="h265", gpu_key="amd", crf=999, # must clamp to 51 + speed_label="Medium", fmt="p010le", + ) + assert args[:2] == ["-vcodec", "hevc_amf"] + assert "-quality" in args and args[args.index("-quality") + 1] == "quality" + assert args[args.index("-qp_i") + 1] == "51" + assert args[args.index("-qp_p") + 1] == "51" + + +def test_build_intel_qsv_uses_global_quality(): + args = fs._build_video_codec_args( + codec_key="h264", gpu_key="intel", crf=25, + speed_label="Fast", fmt="yuv420p", + ) + assert args[:2] == ["-vcodec", "h264_qsv"] + assert args[args.index("-global_quality") + 1] == "25" + # Fast → QSV preset 'fast'. + assert args[args.index("-preset") + 1] == "fast" + + +def test_build_unknown_speed_falls_back_to_ultrafast_family(): + args = fs._build_video_codec_args( + codec_key="h264", gpu_key="cpu", crf=23, + speed_label="Turbo-Mystery", fmt="yuv420p", + ) + # Unknown speed → SPEED_OPTIONS["Ultrafast"] → libx264 preset 'ultrafast'. + assert args[args.index("-preset") + 1] == "ultrafast" + + +def test_build_unknown_gpu_falls_back_to_cpu_encoder(): + """ENCODER_MAP.get(codec_key, ...).get(gpu_key, cpu_encoder) fallback.""" + args = fs._build_video_codec_args( + codec_key="h264", gpu_key="unknown-gpu", crf=23, + speed_label="Fast", fmt="yuv420p", + ) + # Unknown gpu falls through to the final 'else' (qsv branch in the + # current implementation), but the encoder lookup returns libx264. + assert args[1] == "libx264" + assert args[-2:] == ["-pix_fmt", "yuv420p"] diff --git a/tests/test_folder_size_analyzer.py b/tests/test_folder_size_analyzer.py new file mode 100644 index 0000000..34618ad --- /dev/null +++ b/tests/test_folder_size_analyzer.py @@ -0,0 +1,162 @@ +"""Tests for tools/folder_size_analyzer.py — utilities + data model (Plan B / B4). + +Covers: + * format_size across the 0 B / B / KB / MB / GB / TB boundaries. + * format_date on the zero-timestamp sentinel and a known epoch. + * get_folder_size on a nested tmp tree: totals, file count, hidden-dir + skip, permission-error tolerance, and progress callback cadence. + * get_drive_info on a valid path + graceful fallback on a bogus path. + * FolderInfo defaults + documented always-True __lt__ quirk. + +No Tk/CTk instantiation — pure helpers only. +""" + +from __future__ import annotations + +import sys +from datetime import datetime +from pathlib import Path +from typing import List + +import pytest + +PROJECT_ROOT = Path(__file__).resolve().parents[1] +sys.path.insert(0, str(PROJECT_ROOT)) + +from tools import folder_size_analyzer as fsa # noqa: E402 + + +# ── format_size ────────────────────────────────────────────────────── + +def test_format_size_zero(): + assert fsa.format_size(0) == "0 B" + + +def test_format_size_bytes_uses_integer_suffix(): + assert fsa.format_size(512) == "512 B" + assert fsa.format_size(1023) == "1023 B" + + +def test_format_size_kb_mb_gb_tb_boundaries(): + assert fsa.format_size(1024) == "1.0 KB" + assert fsa.format_size(1024 * 1024) == "1.0 MB" + assert fsa.format_size(1024 ** 3) == "1.0 GB" + assert fsa.format_size(1024 ** 4) == "1.0 TB" + + +def test_format_size_mid_unit_rounds_to_one_decimal(): + assert fsa.format_size(1_536) == "1.5 KB" # 1.5 KB exact + assert fsa.format_size(2_500_000) == "2.4 MB" # rounds down via %.1f + + +# ── format_date ────────────────────────────────────────────────────── + +def test_format_date_zero_returns_unknown(): + assert fsa.format_date(0) == "Unknown" + + +def test_format_date_formats_to_minute_resolution(): + # Use a fixed local-time instant; compare against datetime for tz-safety. + ts = datetime(2026, 4, 17, 9, 30, 45).timestamp() + assert fsa.format_date(ts) == "2026-04-17 09:30" + + +# ── get_folder_size ────────────────────────────────────────────────── + +def _populate(root: Path) -> tuple[int, int]: + """Create 3 files summing to 600 bytes, plus a hidden subdir that must + be skipped. Returns (expected_total_size, expected_file_count).""" + (root / "a.txt").write_bytes(b"a" * 100) + sub = root / "sub" + sub.mkdir() + (sub / "b.bin").write_bytes(b"b" * 200) + (sub / "c.bin").write_bytes(b"c" * 300) + + hidden = root / ".cache" + hidden.mkdir() + (hidden / "ignored.bin").write_bytes(b"x" * 9999) + return (100 + 200 + 300, 3) + + +def test_get_folder_size_counts_files_and_sums_bytes(tmp_path): + expected_total, expected_files = _populate(tmp_path) + total, files, ctime, mtime, atime = fsa.get_folder_size(str(tmp_path)) + assert total == expected_total + assert files == expected_files + # All timestamps are populated (non-zero since files were just written). + assert ctime > 0 + assert mtime > 0 + assert atime > 0 + + +def test_get_folder_size_skips_hidden_dirs(tmp_path): + # Without _populate: only the hidden file under .cache exists. + hidden = tmp_path / ".cache" + hidden.mkdir() + (hidden / "ignored.bin").write_bytes(b"x" * 500) + + total, files, *_ = fsa.get_folder_size(str(tmp_path)) + assert total == 0 + assert files == 0 + + +def test_get_folder_size_on_empty_dir_returns_zero_ctime(tmp_path): + total, files, ctime, mtime, atime = fsa.get_folder_size(str(tmp_path)) + assert total == 0 + assert files == 0 + # With no files, the inf sentinel must be normalized to 0. + assert ctime == 0 + assert mtime == 0 + assert atime == 0 + + +def test_get_folder_size_invokes_progress_callback_every_100_files(tmp_path): + # Create 250 tiny files so the callback should fire at 100 and 200. + for i in range(250): + (tmp_path / f"f_{i:04d}.bin").write_bytes(b"0") + + ticks: List[int] = [] + total, files, *_ = fsa.get_folder_size(str(tmp_path), progress_callback=ticks.append) + + assert files == 250 + assert total == 250 + assert ticks == [100, 200] + + +# ── get_drive_info ─────────────────────────────────────────────────── + +def test_get_drive_info_returns_positive_totals_for_real_path(tmp_path): + info = fsa.get_drive_info(str(tmp_path)) + assert set(info.keys()) == {"total", "used", "free"} + assert info["total"] > 0 + assert info["used"] >= 0 + assert info["free"] >= 0 + + +def test_get_drive_info_returns_zeros_for_bogus_path(): + info = fsa.get_drive_info("Z:\\definitely-not-a-real-drive-xyzzy") + assert info == {"total": 0, "used": 0, "free": 0} + + +# ── FolderInfo ─────────────────────────────────────────────────────── + +def test_folder_info_defaults(tmp_path): + info = fsa.FolderInfo(str(tmp_path)) + assert info.path == str(tmp_path) + assert info.name == tmp_path.name + assert info.size == 0 + assert info.file_count == 0 + assert info.created_time == 0 + assert info.modified_time == 0 + assert info.accessed_time == 0 + assert info.size_percentage == 0.0 + + +def test_folder_info_lt_always_true_is_overridden_by_sort_key(): + """__lt__ returns True unconditionally; sorting relies on explicit keys.""" + a = fsa.FolderInfo("a") + b = fsa.FolderInfo("b") + # Documents the current contract — if sort() is ever called without a key, + # results will not be meaningful. + assert (a < b) is True + assert (b < a) is True diff --git a/tests/test_network_pattern_analyzer.py b/tests/test_network_pattern_analyzer.py new file mode 100644 index 0000000..0e566a9 --- /dev/null +++ b/tests/test_network_pattern_analyzer.py @@ -0,0 +1,217 @@ +"""Tests for tools/network_pattern_analyzer.py (Plan B / B4). + +Covers pure-logic methods on NetworkPatternAnalyzer without instantiating +its Tk root. Bypass __init__ via __new__ and drive the analyzers directly +on in-memory fixtures. + +Covered: + * _parse_timestamp: valid, empty, garbage. + * _calculate_duration_seconds: valid pair, either side invalid. + * _get_date_range: empty, single-day, multi-day, all-invalid. + * _analyze_summary / _analyze_time_distribution / + _analyze_category_frequency / _analyze_average_duration / + _analyze_repeating_time_windows / _analyze_sequential_correlations. +""" + +from __future__ import annotations + +import sys +from pathlib import Path + +import pytest + +PROJECT_ROOT = Path(__file__).resolve().parents[1] +sys.path.insert(0, str(PROJECT_ROOT)) + +from tools.network_pattern_analyzer import NetworkPatternAnalyzer # noqa: E402 + + +# ── Fixtures ───────────────────────────────────────────────────────── + +@pytest.fixture +def npa() -> NetworkPatternAnalyzer: + """Bare analyzer with no Tk root — only pure helpers are exercised.""" + return NetworkPatternAnalyzer.__new__(NetworkPatternAnalyzer) + + +def _inc(start: str, end: str = "", category: str = "TIMEOUT") -> dict: + return {"start_time": start, "end_time": end, "category": category} + + +# ── Timestamp parsing ──────────────────────────────────────────────── + +def test_parse_timestamp_valid(npa): + dt = npa._parse_timestamp("2026-04-17 09:30:45") + assert dt is not None + assert (dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second) == ( + 2026, 4, 17, 9, 30, 45, + ) + + +def test_parse_timestamp_empty_and_garbage_return_none(npa): + assert npa._parse_timestamp("") is None + assert npa._parse_timestamp("not-a-timestamp") is None + # Missing seconds — strict format rejects. + assert npa._parse_timestamp("2026-04-17 09:30") is None + + +def test_calculate_duration_seconds_valid(npa): + seconds = npa._calculate_duration_seconds( + "2026-04-17 09:00:00", "2026-04-17 09:05:30", + ) + assert seconds == 330.0 + + +def test_calculate_duration_seconds_invalid_returns_none(npa): + assert npa._calculate_duration_seconds("", "2026-04-17 09:00:00") is None + assert npa._calculate_duration_seconds("2026-04-17 09:00:00", "") is None + assert npa._calculate_duration_seconds("", "") is None + + +# ── Date range ─────────────────────────────────────────────────────── + +def test_get_date_range_empty(npa): + assert npa._get_date_range([]) == "No data" + + +def test_get_date_range_all_invalid_returns_sentinel(npa): + incidents = [_inc(""), _inc("garbage")] + assert npa._get_date_range(incidents) == "No valid timestamps" + + +def test_get_date_range_single_day(npa): + incidents = [ + _inc("2026-04-17 09:00:00"), + _inc("2026-04-17 18:30:00"), + ] + assert npa._get_date_range(incidents) == "2026-04-17" + + +def test_get_date_range_multi_day(npa): + incidents = [ + _inc("2026-04-17 09:00:00"), + _inc("2026-04-19 10:00:00"), + _inc("2026-04-18 11:00:00"), + ] + assert npa._get_date_range(incidents) == "2026-04-17 to 2026-04-19" + + +# ── Summary ────────────────────────────────────────────────────────── + +def test_analyze_summary_shape_and_counts(npa): + incidents = [ + _inc("2026-04-17 09:00:00", category="TIMEOUT"), + _inc("2026-04-17 10:00:00", category="DNS_FAIL"), + _inc("2026-04-17 11:00:00", category="TIMEOUT"), + ] + events = [{"ts": "x"}, {"ts": "y"}] + summary = npa._analyze_summary(incidents, events) + assert summary["total_incidents"] == 3 + assert summary["total_events"] == 2 + assert summary["unique_categories"] == 2 + assert summary["date_range"] == "2026-04-17" + + +def test_analyze_summary_treats_missing_category_as_unknown(npa): + incidents = [{"start_time": "2026-04-17 09:00:00"}] + summary = npa._analyze_summary(incidents, []) + assert summary["unique_categories"] == 1 # 'UNKNOWN' + + +# ── Time distribution ──────────────────────────────────────────────── + +def test_analyze_time_distribution_returns_24_buckets(npa): + dist = npa._analyze_time_distribution([]) + assert len(dist) == 24 + assert all(v == 0 for v in dist.values()) + assert set(dist.keys()) == {f"{h:02d}" for h in range(24)} + + +def test_analyze_time_distribution_counts_by_hour(npa): + incidents = [ + _inc("2026-04-17 09:05:00"), + _inc("2026-04-17 09:55:00"), + _inc("2026-04-17 14:00:00"), + _inc("bad-timestamp"), # ignored + ] + dist = npa._analyze_time_distribution(incidents) + assert dist["09"] == 2 + assert dist["14"] == 1 + assert dist["00"] == 0 + + +# ── Category frequency ────────────────────────────────────────────── + +def test_analyze_category_frequency(npa): + incidents = [ + _inc("2026-04-17 09:00:00", category="TIMEOUT"), + _inc("2026-04-17 10:00:00", category="TIMEOUT"), + _inc("2026-04-17 11:00:00", category="DNS_FAIL"), + {"start_time": "2026-04-17 12:00:00"}, # UNKNOWN + ] + freq = npa._analyze_category_frequency(incidents) + assert freq == {"TIMEOUT": 2, "DNS_FAIL": 1, "UNKNOWN": 1} + + +# ── Average duration ───────────────────────────────────────────────── + +def test_analyze_average_duration_averages_per_category(npa): + incidents = [ + {"start_time": "2026-04-17 09:00:00", "end_time": "2026-04-17 09:01:00", + "category": "TIMEOUT"}, # 60s + {"start_time": "2026-04-17 10:00:00", "end_time": "2026-04-17 10:02:00", + "category": "TIMEOUT"}, # 120s → avg 90 + {"start_time": "2026-04-17 11:00:00", "end_time": "2026-04-17 11:05:00", + "category": "DNS_FAIL"}, # 300s + {"start_time": "", "end_time": "", "category": "NOISE"}, # skipped + ] + avg = npa._analyze_average_duration(incidents) + assert avg["TIMEOUT"] == 90.0 + assert avg["DNS_FAIL"] == 300.0 + assert "NOISE" not in avg + + +# ── Repeating time windows ────────────────────────────────────────── + +def test_analyze_repeating_time_windows_flags_above_average_hours(npa): + incidents = ( + [_inc("2026-04-17 09:10:00")] * 5 # hour 09 → 5 + + [_inc("2026-04-17 14:00:00")] * 3 # hour 14 → 3 + + [_inc("2026-04-17 22:00:00")] # hour 22 → 1 + ) + # Average = (5+3+1)/3 = 3.0 → only hour 09 is strictly above average. + result = npa._analyze_repeating_time_windows(incidents) + assert len(result) == 1 + assert result[0]["hour"] == 9 + assert result[0]["incidents"] == 5 + assert result[0]["above_average"] is True + + +def test_analyze_repeating_time_windows_empty_when_no_incidents(npa): + assert npa._analyze_repeating_time_windows([]) == [] + + +# ── Sequential correlations ───────────────────────────────────────── + +def test_analyze_sequential_correlations_detects_sub10min_pairs(npa): + incidents = [ + _inc("2026-04-17 09:00:00", category="DNS_FAIL"), + _inc("2026-04-17 09:05:00", category="TIMEOUT"), # 5min after — pair + _inc("2026-04-17 09:12:00", category="DNS_FAIL"), # 7min after — pair + _inc("2026-04-17 10:30:00", category="TIMEOUT"), # 78min — skipped + ] + result = npa._analyze_sequential_correlations(incidents) + patterns = {item["pattern"]: item for item in result} + + assert "DNS_FAIL → TIMEOUT" in patterns + assert "TIMEOUT → DNS_FAIL" in patterns + assert patterns["DNS_FAIL → TIMEOUT"]["occurrences"] == 1 + assert patterns["TIMEOUT → DNS_FAIL"]["occurrences"] == 1 + # Two pairs total → each is 50%. + assert patterns["DNS_FAIL → TIMEOUT"]["percentage"] == 50.0 + assert patterns["TIMEOUT → DNS_FAIL"]["percentage"] == 50.0 + + +def test_analyze_sequential_correlations_empty_input(npa): + assert npa._analyze_sequential_correlations([]) == [] + assert npa._analyze_sequential_correlations([_inc("2026-04-17 09:00:00")]) == [] diff --git a/tests/test_no_portable_source_drift.py b/tests/test_no_portable_source_drift.py new file mode 100644 index 0000000..b7a1155 --- /dev/null +++ b/tests/test_no_portable_source_drift.py @@ -0,0 +1,24 @@ +"""Drift guard for portable/ — prevents re-introduction of shadow copies. + +Post-A7, portable/ must contain only *_Portable.py launchers + support +files (.bat, dist/, build/). Any bare `.py` source in portable/ other +than the launchers is a duplicate of a tools/* module and will silently +diverge — exactly the failure mode A7 fixed. +""" + +from pathlib import Path + +PORTABLE_DIR = Path(__file__).resolve().parents[1] / "portable" + + +def test_no_duplicate_tool_sources_in_portable(): + """No flat .py in portable/ other than the _Portable.py launchers.""" + assert PORTABLE_DIR.is_dir(), f"portable/ missing: {PORTABLE_DIR}" + bad = [ + p.name for p in PORTABLE_DIR.glob("*.py") + if not p.name.endswith("_Portable.py") + ] + assert not bad, ( + f"portable/ must not contain tool source duplicates: {bad}. " + "Launchers import from tools.* — do not copy tool sources here." + ) diff --git a/tests/test_screen_lock.py b/tests/test_screen_lock.py index dae994f..f39c1bf 100644 --- a/tests/test_screen_lock.py +++ b/tests/test_screen_lock.py @@ -222,13 +222,14 @@ def test_install_hook_fails_fast_if_critical_combo_unsupported(self, monkeypatch import importlib import tkinter as tk - # Create a mock keyboard module where hook() succeeds but block_key("alt+tab") fails + # Create a mock keyboard module where hook() succeeds but block_key("left windows") fails. + # "left windows" is the first entry in the new _CRITICAL_COMBOS tuple. mock_hook = object() blocked_id_counter = [0] unhook_called = [] def mock_block_key(combo): - if combo == "alt+tab": + if combo == "left windows": raise ValueError(f"Unsupported key name: {combo}") blocked_id_counter[0] += 1 return blocked_id_counter[0] @@ -283,7 +284,7 @@ def mock_showerror(title, message): # Verify error messagebox was shown assert len(showerror_calls) == 1 assert showerror_calls[0][0] == "Screen Lock" - assert "alt+tab" in showerror_calls[0][1] + assert "left windows" in showerror_calls[0][1] finally: root.destroy() finally: @@ -306,9 +307,10 @@ def test_install_hook_rollback_unblocks_on_critical_failure(self, monkeypatch, a unhook_calls = [] def mock_block_key(combo): - # First 2 combos (alt+tab, alt+f4) succeed, 3rd (ctrl+shift+esc) fails + # 1st critical combo (left windows) succeeds, 2nd (right windows) fails. + # Tests the rollback path: previously-blocked IDs must be unblocked. blocked_counter[0] += 1 - if blocked_counter[0] <= 2: + if blocked_counter[0] == 1: return blocked_counter[0] # return unique block ID raise ValueError(f"Unsupported key name: {combo}") @@ -362,18 +364,18 @@ def mock_showerror(title, message): # Verify it failed assert result is False - # Verify unblock_key was called for each of the 2 successful blocks - assert len(unblock_calls) == 2 - assert set(unblock_calls) == {1, 2} + # Verify unblock_key was called for the 1 successful block (left windows) + assert len(unblock_calls) == 1 + assert set(unblock_calls) == {1} # Verify unhook was called once assert len(unhook_calls) == 1 assert unhook_calls[0] is mock_hook - # Verify error messagebox was shown + # Verify error messagebox was shown for the failing 2nd critical combo assert len(showerror_calls) == 1 assert showerror_calls[0][0] == "Screen Lock" - assert "ctrl+shift+esc" in showerror_calls[0][1] + assert "right windows" in showerror_calls[0][1] finally: root.destroy() finally: diff --git a/tests/test_security_audit.py b/tests/test_security_audit.py new file mode 100644 index 0000000..b67ab3d --- /dev/null +++ b/tests/test_security_audit.py @@ -0,0 +1,175 @@ +"""Tests for tools/security_audit.py — helpers + Finding + Engine (Plan B / B4). + +Covers: + * now_ts format, _age_days on tmp file + missing-path sentinel. + * _is_suspicious_path: empty, SAFE_PROCESS_PATHS, suspicious buckets. + * Finding.key() determinism + dedup across identical instances. + * SecurityAuditEngine: load/save state, baseline lifecycle, get_checks. + +check_* methods hit the real registry/psutil/subprocess and are out of +scope for smoke tests — they need a full mock harness (Plan A/B8 work). +""" + +from __future__ import annotations + +import json +import re +import sys +import time +from pathlib import Path + +import pytest + +PROJECT_ROOT = Path(__file__).resolve().parents[1] +sys.path.insert(0, str(PROJECT_ROOT)) + +from tools import security_audit as sa # noqa: E402 +from tools.security_audit import Finding, SecurityAuditEngine # noqa: E402 + + +# ── now_ts ─────────────────────────────────────────────────────────── + +def test_now_ts_matches_iso_minute_shape(): + """Format is YYYY-MM-DD HH:MM:SS (local time).""" + assert re.match(r"^\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}$", sa.now_ts()) + + +# ── _age_days ──────────────────────────────────────────────────────── + +def test_age_days_recent_file_is_near_zero(tmp_path): + f = tmp_path / "fresh.txt" + f.write_text("x") + # Newly-written file: age is well under a day. + # abs() tolerates Windows clock jitter that can produce tiny + # negative values (~1e-12) when reads outpace the timer tick. + assert abs(sa._age_days(str(f))) < 0.01 + + +def test_age_days_missing_path_returns_sentinel(tmp_path): + assert sa._age_days(str(tmp_path / "does-not-exist")) == 9999 + + +# ── _is_suspicious_path ────────────────────────────────────────────── + +def test_is_suspicious_path_empty_returns_false(): + assert sa._is_suspicious_path("") is False + + +def test_is_suspicious_path_safe_system_paths(): + assert sa._is_suspicious_path(r"C:\Windows\System32\svchost.exe") is False + assert sa._is_suspicious_path(r"C:\Program Files\Foo\foo.exe") is False + assert sa._is_suspicious_path(r"C:\Program Files (x86)\Bar\bar.exe") is False + + +def test_is_suspicious_path_flags_temp_and_downloads(): + assert sa._is_suspicious_path(r"C:\Users\foo\AppData\Local\Temp\weird.exe") is True + assert sa._is_suspicious_path(r"C:\Users\foo\Downloads\installer.exe") is True + assert sa._is_suspicious_path(r"C:\Temp\payload.exe") is True + assert sa._is_suspicious_path(r"C:\Users\Public\dropped.exe") is True + + +def test_is_suspicious_path_safe_override_wins_over_suspicious(): + """SAFE_PROCESS_PATHS short-circuits before the suspicious check.""" + # System32 path that also contains 'temp' — safe list must win. + assert sa._is_suspicious_path( + r"C:\Windows\System32\TempSomething\thing.exe" + ) is False + + +# ── Finding.key ────────────────────────────────────────────────────── + +def test_finding_key_is_stable_for_identical_fields(): + a = Finding("startup", "CRITICAL", "t", "detail") + b = Finding("startup", "CRITICAL", "t", "detail") + assert a.key() == b.key() + # Key is a 16-char hex digest prefix. + assert re.match(r"^[0-9a-f]{16}$", a.key()) + + +def test_finding_key_changes_with_category_title_or_detail_head(): + base = Finding("startup", "INFO", "title", "detail") + assert base.key() != Finding("processes", "INFO", "title", "detail").key() + assert base.key() != Finding("startup", "INFO", "other", "detail").key() + assert base.key() != Finding("startup", "INFO", "title", "different").key() + + +def test_finding_key_ignores_detail_beyond_first_120_chars(): + shared_prefix = "x" * 120 + a = Finding("c", "INFO", "t", shared_prefix + "AAAAA") + b = Finding("c", "INFO", "t", shared_prefix + "BBBBB") + assert a.key() == b.key() + + +def test_finding_key_unaffected_by_severity_or_remediation(): + """key() is a dedup token — not a full fingerprint.""" + a = Finding("c", "INFO", "t", "d", remediation="one") + b = Finding("c", "CRITICAL", "t", "d", remediation="two") + assert a.key() == b.key() + + +# ── SecurityAuditEngine state persistence ─────────────────────────── + +def test_engine_loads_default_state_when_file_missing(tmp_path): + engine = SecurityAuditEngine(str(tmp_path / "missing.json")) + assert engine.state == {"baseline": None, "last_scan": None, "last_findings": []} + + +def test_engine_loads_default_state_on_corrupt_json(tmp_path): + state_path = tmp_path / "state.json" + state_path.write_text("{not json", encoding="utf-8") + engine = SecurityAuditEngine(str(state_path)) + assert engine.state["baseline"] is None + assert engine.state["last_findings"] == [] + + +def test_save_and_reload_state_roundtrip(tmp_path): + state_path = tmp_path / "state.json" + engine = SecurityAuditEngine(str(state_path)) + engine.state["last_scan"] = "2026-04-17 09:00:00" + engine.save_state() + + reopened = SecurityAuditEngine(str(state_path)) + assert reopened.state["last_scan"] == "2026-04-17 09:00:00" + + +# ── Baseline lifecycle ─────────────────────────────────────────────── + +def test_save_baseline_stores_keys_and_clears(tmp_path): + engine = SecurityAuditEngine(str(tmp_path / "state.json")) + findings = [ + Finding("startup", "CRITICAL", "A", "detail-a"), + Finding("processes", "WARN", "B", "detail-b"), + ] + engine.save_baseline(findings) + + keys = engine.get_baseline_keys() + assert keys == {f.key() for f in findings} + # Persisted shape matches the expected schema. + on_disk = json.loads((tmp_path / "state.json").read_text(encoding="utf-8")) + assert on_disk["baseline"]["finding_keys"] == [f.key() for f in findings] + assert "saved_at" in on_disk["baseline"] + + engine.clear_baseline() + assert engine.get_baseline_keys() == set() + + +def test_get_baseline_keys_returns_empty_set_when_no_baseline(tmp_path): + engine = SecurityAuditEngine(str(tmp_path / "state.json")) + assert engine.get_baseline_keys() == set() + + +# ── Engine wiring ──────────────────────────────────────────────────── + +def test_get_checks_returns_all_10_categories(tmp_path): + engine = SecurityAuditEngine(str(tmp_path / "state.json")) + checks = engine.get_checks() + assert len(checks) == 10 + names = [name for name, _fn in checks] + assert names == [ + "startup", "processes", "ports", "filesystem", "dns", + "accounts", "wifi", "usb", "browser", "eventlogs", + ] + # All check callables are bound methods on the engine. + for _name, fn in checks: + assert callable(fn) + assert getattr(fn, "__self__", None) is engine diff --git a/tests/test_system_cleaner.py b/tests/test_system_cleaner.py index 9af1996..7fd815e 100644 --- a/tests/test_system_cleaner.py +++ b/tests/test_system_cleaner.py @@ -2,7 +2,10 @@ Covers: * DRY_RUN does not touch disk. - * Symlink entries are skipped, not followed. + * Symlink entries at the top level are skipped, not followed. + * Directory entries whose tree contains a nested symlink are + refused entirely (stricter than rmtree's built-in stop-at-link). + * Successful deletions emit a structured log line per outcome. * Missing %TEMP% env var resolves to a non-existent path that ``_delete_dir_contents`` handles gracefully (no crash, no deletes). @@ -10,6 +13,7 @@ """ from __future__ import annotations +import json import os import sys from pathlib import Path @@ -23,6 +27,7 @@ DeleteMode, _delete_dir_contents, _delete_glob_files, + _tree_has_symlink, main, ) @@ -153,3 +158,92 @@ def test_permanent_without_categories_exits_2(capsys): assert exit_code == 2 _, stderr = capsys.readouterr() assert "requires --categories" in stderr + + +# ---------- (e) nested-symlink refusal ---------- + +def test_nested_symlink_refuses_whole_directory(tmp_path, capsys): + """A dir entry whose tree contains a symlink must be refused entirely. + + Stricter than rmtree's "stop at the link" — we refuse the whole + directory and emit a ``symlink_in_tree_refused`` structured log so + ops can audit the refusal. + """ + outside = tmp_path / "outside" + outside.mkdir() + protected = outside / "precious.txt" + protected.write_text("do-not-delete", encoding="utf-8") + + target = tmp_path / "target" + target.mkdir() + # Directory entry with a NESTED symlink (not the top entry itself) + nested = target / "nested" + nested.mkdir() + (nested / "plain.txt").write_text("would-be-deleted", encoding="utf-8") + link = nested / "link_inside" + try: + link.symlink_to(outside, target_is_directory=True) + except (OSError, NotImplementedError) as exc: + pytest.skip(f"symlink creation not permitted in this env: {exc}") + + freed = _delete_dir_contents(str(target), _silent_log, DeleteMode.PERMANENT) + + # The whole nested dir was refused -> nothing inside it was deleted. + assert nested.is_dir(), "refused directory must remain" + assert (nested / "plain.txt").exists(), "sibling of symlink must not be deleted" + assert link.is_symlink(), "symlink must remain" + assert protected.exists() and protected.read_text(encoding="utf-8") == "do-not-delete" + assert freed == 0, "nothing should have been freed" + + _, stderr = capsys.readouterr() + refusal_events = [ + line for line in stderr.splitlines() + if line.strip().startswith("{") and '"symlink_in_tree_refused"' in line + ] + assert refusal_events, f"expected symlink_in_tree_refused log, got: {stderr!r}" + payload = json.loads(refusal_events[0]) + assert payload["event"] == "symlink_in_tree_refused" + assert Path(payload["path"]).name == "nested" + assert Path(payload["symlink"]).name == "link_inside" + + +def test_tree_has_symlink_returns_offender(tmp_path): + """Unit test for the pre-walk helper: returns first symlink path, None otherwise.""" + clean = tmp_path / "clean" + clean.mkdir() + (clean / "a.txt").write_text("x", encoding="utf-8") + assert _tree_has_symlink(str(clean)) is None + + dirty = tmp_path / "dirty" + dirty.mkdir() + sub = dirty / "sub" + sub.mkdir() + link = sub / "ln" + try: + link.symlink_to(clean, target_is_directory=True) + except (OSError, NotImplementedError) as exc: + pytest.skip(f"symlink creation not permitted in this env: {exc}") + + offender = _tree_has_symlink(str(dirty)) + assert offender is not None + assert Path(offender).name == "ln" + + +# ---------- (f) structured success log ---------- + +def test_permanent_delete_emits_structured_success_log(tmp_path, capsys): + """Each successful PERMANENT delete emits a ``permanent_deleted`` line + with path + byte count so operators can audit what a run actually touched.""" + (tmp_path / "a.txt").write_bytes(b"1234567890") + freed = _delete_dir_contents(str(tmp_path), _silent_log, DeleteMode.PERMANENT) + assert freed == 10 + + _, stderr = capsys.readouterr() + events = [ + json.loads(line) for line in stderr.splitlines() + if line.strip().startswith("{") and '"permanent_deleted"' in line + ] + assert events, f"expected permanent_deleted log, got: {stderr!r}" + assert events[0]["event"] == "permanent_deleted" + assert Path(events[0]["path"]).name == "a.txt" + assert events[0]["bytes"] == 10 diff --git a/tests/test_tool_runner.py b/tests/test_tool_runner.py index 76bbcf3..ee31572 100644 --- a/tests/test_tool_runner.py +++ b/tests/test_tool_runner.py @@ -5,6 +5,7 @@ import os import subprocess import sys +import uuid from pathlib import Path import pytest @@ -169,6 +170,35 @@ def _run_runner(arg: str, cwd: Path) -> subprocess.CompletedProcess: ) +@pytest.fixture +def make_tool_file(): + """Create a .py file inside ``tools/`` for the test's duration. + + The runner's path-containment guard refuses paths outside ``tools/``, so + subprocess tests that exercise the file-load branch must place their + fixtures under ``tools/``. Files are underscore-prefixed so + ``Main.discover_tools`` skips any stragglers if cleanup fails. + """ + created: list[Path] = [] + tools_dir = PROJECT_ROOT / "tools" + + def _factory(body: str, name: str | None = None) -> Path: + if name is None: + name = f"_test_{uuid.uuid4().hex[:8]}.py" + path = tools_dir / name + path.write_text(body, encoding="utf-8") + created.append(path) + return path + + yield _factory + + for p in created: + try: + p.unlink() + except FileNotFoundError: + pass + + def test_runner_exits_with_usage_when_no_args(): r = subprocess.run( [sys.executable, "-m", "tools._runner"], @@ -180,23 +210,17 @@ def test_runner_exits_with_usage_when_no_args(): assert "usage" in r.stderr.lower() -def test_runner_executes_run_tool_from_file_path(tmp_path): - tool_file = tmp_path / "test_tool.py" - tool_file.write_text( - "def run_tool():\n print('OK')\n", - encoding="utf-8", - ) +def test_runner_executes_run_tool_from_file_path(make_tool_file): + tool_file = make_tool_file("def run_tool():\n print('OK')\n") r = _run_runner(str(tool_file), cwd=PROJECT_ROOT) assert r.returncode == 0, r.stderr assert r.stdout.strip() == "OK" -def test_runner_reports_nonzero_on_tool_exception(tmp_path): - tool_file = tmp_path / "crashy.py" - tool_file.write_text( +def test_runner_reports_nonzero_on_tool_exception(make_tool_file): + tool_file = make_tool_file( "def run_tool():\n raise RuntimeError('boom')\n", - encoding="utf-8", ) r = _run_runner(str(tool_file), cwd=PROJECT_ROOT) @@ -205,26 +229,21 @@ def test_runner_reports_nonzero_on_tool_exception(tmp_path): assert "boom" in r.stderr -def test_runner_propagates_sys_exit_code(tmp_path): - tool_file = tmp_path / "exit_code.py" - tool_file.write_text( +def test_runner_propagates_sys_exit_code(make_tool_file): + tool_file = make_tool_file( "import sys\n" "def run_tool():\n sys.exit(7)\n", - encoding="utf-8", ) r = _run_runner(str(tool_file), cwd=PROJECT_ROOT) assert r.returncode == 7 -def test_runner_handles_spaces_in_filename(tmp_path): - """Runner must accept file paths containing spaces (e.g., NETWORK STABILITY MONITOR.py).""" - spaced_dir = tmp_path / "with spaces" - spaced_dir.mkdir() - tool_file = spaced_dir / "My Tool.py" - tool_file.write_text( +def test_runner_handles_spaces_in_filename(make_tool_file): + """Runner must accept file paths containing spaces (general contract).""" + tool_file = make_tool_file( "def run_tool():\n print('spaced OK')\n", - encoding="utf-8", + name="_test with spaces.py", ) r = _run_runner(str(tool_file), cwd=PROJECT_ROOT) @@ -232,13 +251,9 @@ def test_runner_handles_spaces_in_filename(tmp_path): assert r.stdout.strip() == "spaced OK" -def test_runner_runs_module_without_run_tool(tmp_path): +def test_runner_runs_module_without_run_tool(make_tool_file): """If a module has no run_tool(), the runner still exits 0 after import.""" - tool_file = tmp_path / "no_entry.py" - tool_file.write_text( - "print('top level ran')\n", - encoding="utf-8", - ) + tool_file = make_tool_file("print('top level ran')\n") r = _run_runner(str(tool_file), cwd=PROJECT_ROOT) assert r.returncode == 0, r.stderr @@ -252,15 +267,68 @@ def test_runner_accepts_dotted_module_name(): assert r.returncode == 0, r.stderr -def test_runner_returns_exit_code_3_on_import_failure(tmp_path): +def test_runner_returns_exit_code_3_on_import_failure(make_tool_file): """Runner must return exit code 3 when a tool fails to import (e.g., syntax error).""" - tool_file = tmp_path / "broken_import.py" - tool_file.write_text( - "raise ImportError('broken module')\n", - encoding="utf-8", - ) + tool_file = make_tool_file("raise ImportError('broken module')\n") r = _run_runner(str(tool_file), cwd=PROJECT_ROOT) assert r.returncode == 3 assert "ImportError" in r.stderr assert "broken module" in r.stderr + + +# ── Path-containment guard (A8) ────────────────────────────────────────────── + + +def test_runner_refuses_path_outside_tools_dir(tmp_path): + """Runner must refuse to load a .py file that lives outside tools/.""" + outside = tmp_path / "outside_tool.py" + outside.write_text( + "def run_tool():\n print('should not run')\n", + encoding="utf-8", + ) + + r = _run_runner(str(outside), cwd=PROJECT_ROOT) + assert r.returncode == 3 + assert "refusing to load" in r.stderr + assert "should not run" not in r.stdout + + +def test_runner_refuses_nonexistent_py_path(tmp_path): + """A .py path that does not exist must be rejected by the strict-resolve guard.""" + missing = tmp_path / "does_not_exist.py" + + r = _run_runner(str(missing), cwd=PROJECT_ROOT) + assert r.returncode == 3 + stderr_lower = r.stderr.lower() + assert ( + "filenotfounderror" in stderr_lower + or "no such file" in stderr_lower + or "cannot find the file" in stderr_lower + ) + + +def test_runner_refuses_symlink_escape_from_tools(tmp_path): + """A symlink inside tools/ pointing outside must be refused after resolve().""" + outside = tmp_path / "external_tool.py" + outside.write_text( + "def run_tool():\n print('escaped!')\n", + encoding="utf-8", + ) + + link = PROJECT_ROOT / "tools" / "_test_escape_link.py" + try: + os.symlink(str(outside), str(link)) + except (OSError, NotImplementedError): + pytest.skip("symlink creation not permitted on this platform") + + try: + r = _run_runner(str(link), cwd=PROJECT_ROOT) + assert r.returncode == 3 + assert "refusing to load" in r.stderr + assert "escaped!" not in r.stdout + finally: + try: + link.unlink() + except FileNotFoundError: + pass diff --git a/tools/NETWORK STABILITY MONITOR.py b/tools/NETWORK STABILITY MONITOR.py deleted file mode 100644 index 74247c9..0000000 --- a/tools/NETWORK STABILITY MONITOR.py +++ /dev/null @@ -1,3209 +0,0 @@ -""" -network_stability_monitor_pro.py - -NETWORK STABILITY MONITOR PRO - AI Enhanced (Windows 10) - -Advanced Features: -- AI-powered root cause analysis (Router/ISP/DNS/Adapter/Suspicious) -- Suspicious behavior detection and security monitoring -- Human-readable explanations with evidence -- AI-friendly export for ChatGPT analysis (under 50KB) -- Intelligent probability-based reasoning - -Dependencies: - pip install psutil customtkinter -""" - -import os -import re -import json -import time -import queue -import sqlite3 -import threading -import subprocess -import concurrent.futures -from dataclasses import dataclass, asdict -from datetime import datetime -from typing import Dict, List, Optional, Tuple, Any - -from tools._common.threadsafe import BoundedDeque - -import tkinter as tk -from tkinter import ttk, messagebox, filedialog - -# Handle optional dependencies gracefully -try: - import customtkinter as ctk - HAS_CTK = True -except ImportError: - HAS_CTK = False - ctk = None - -try: - import psutil - HAS_PSUTIL = True -except ImportError: - HAS_PSUTIL = False - psutil = None - -try: - from network_intelligence_engine import ( - NetworkIntelligenceEngine, - RootCauseProbability, - SuspiciousIndicators, - AnomalyFlags - ) - HAS_INTELLIGENCE = True -except ImportError: - HAS_INTELLIGENCE = False - NetworkIntelligenceEngine = None - RootCauseProbability = None - SuspiciousIndicators = None - AnomalyFlags = None - -TOOL_NAME = "Network Stability Monitor Pro" - -# ========================= -# Auto Export Configuration -# ========================= -AUTO_EXPORT_ENABLED = True -AUTO_EXPORT_TIME = "23:30" # HH:MM -EXPORT_FOLDER = "exports" - -# ========================= -# Helpers -# ========================= - -def now_ts() -> str: - return datetime.now().strftime("%Y-%m-%d %H:%M:%S") - -def safe_run(cmd: List[str], timeout: int = 10) -> Tuple[int, str, str]: - try: - cp = subprocess.run(cmd, capture_output=True, text=True, errors="replace", timeout=timeout, shell=False, - creationflags=subprocess.CREATE_NO_WINDOW) - return cp.returncode, cp.stdout, cp.stderr - except Exception as e: - return 1, "", str(e) - -def parse_ipv4(s: str) -> List[str]: - return re.findall(r"\b(\d{1,3}(?:\.\d{1,3}){3})\b", s) - -def short(s: str, n: int = 220) -> str: - s = s.replace("\r", "") - return s[:n] + ("..." if len(s) > n else "") - -def get_default_gateway() -> Optional[str]: - rc, out, _ = safe_run(["route", "print"], timeout=10) - if rc == 0: - for line in out.splitlines(): - line = line.strip() - if line.startswith("0.0.0.0"): - parts = re.split(r"\s+", line) - if len(parts) >= 4 and parts[0] == "0.0.0.0" and parts[1] == "0.0.0.0": - gw = parts[2] - if re.match(r"^\d+\.\d+\.\d+\.\d+$", gw): - return gw - return None - -def get_dns_servers() -> List[str]: - rc, out, _ = safe_run(["ipconfig", "/all"], timeout=12) - if rc != 0: - return [] - servers: List[str] = [] - collecting = False - for line in out.splitlines(): - if "DNS Servers" in line: - collecting = True - servers.extend(parse_ipv4(line)) - continue - if collecting: - if line and not line.startswith(" "): - collecting = False - else: - servers.extend(parse_ipv4(line)) - seen = set() - res = [] - for s in servers: - if s not in seen: - seen.add(s) - res.append(s) - return res - -def get_default_route_interface_ip() -> Tuple[str, str]: - """ - Returns (local_ip, interface_name_guess) - Uses: route print 0.0.0.0 -> Interface column (IPv4) - Then maps that interface IP to a psutil interface name - """ - rc, out, _ = safe_run(["route", "print", "0.0.0.0"], timeout=10) - if rc != 0: - return "", "" - - local_ip = "" - for line in out.splitlines(): - line = line.strip() - if line.startswith("0.0.0.0"): - parts = re.split(r"\s+", line) - if len(parts) >= 5 and parts[0] == "0.0.0.0" and parts[1] == "0.0.0.0": - iface_ip = parts[3] - if re.match(r"^\d+\.\d+\.\d+\.\d+$", iface_ip): - local_ip = iface_ip - break - - if not local_ip: - return "", "" - - if HAS_PSUTIL: - try: - addrs = psutil.net_if_addrs() - for ifname, addr_list in addrs.items(): - for a in addr_list: - if str(a.family) == "AddressFamily.AF_INET" or int(getattr(a.family, "value", 0) or 0) == 2: - if a.address == local_ip: - return local_ip, ifname - except Exception: - pass - - return local_ip, "" - -def ping_once(target: str, timeout_ms: int = 800) -> Tuple[bool, Optional[float], str]: - rc, out, err = safe_run(["ping", "-n", "1", "-w", str(timeout_ms), target], timeout=max(2, int(timeout_ms/200)+2)) - raw = out + ("\n" + err if err else "") - m = re.search(r"time[=<]\s*(\d+)\s*ms", raw, re.IGNORECASE) - rtt = float(m.group(1)) if m else None - success = ("TTL=" in raw) or ("Reply from" in raw and "Destination host unreachable" not in raw) - return success, rtt, raw - -def nslookup(domain: str, server: Optional[str], timeout_s: int = 4) -> Tuple[str, str]: - """ - Returns (dns_state, raw) - dns_state: OK | SLOW | FAIL - """ - cmd = ["nslookup", domain] - if server: - cmd.append(server) - rc, out, err = safe_run(cmd, timeout=timeout_s) - raw = out + ("\n" + err if err else "") - low = raw.lower() - - has_ip = len(parse_ipv4(raw)) > 0 - has_timeout = "timed out" in low - has_nxdomain = ("non-existent" in low) or ("can't find" in low) - - if has_nxdomain: - return "FAIL", raw - - if has_timeout and has_ip: - return "SLOW", raw - if has_timeout and not has_ip: - return "FAIL", raw - - return ("OK" if has_ip else "FAIL"), raw - -def netsh_wlan_info() -> Dict[str, str]: - rc, out, _ = safe_run(["netsh", "wlan", "show", "interfaces"], timeout=6) - if rc != 0: - return {} - def grab(key: str) -> str: - m = re.search(rf"^\s*{re.escape(key)}\s*:\s*(.+)\s*$", out, re.MULTILINE | re.IGNORECASE) - return m.group(1).strip() if m else "" - info = { - "state": grab("State"), - "ssid": grab("SSID"), - "bssid": grab("BSSID"), - "signal": grab("Signal"), - "radio": grab("Radio type"), - "channel": grab("Channel"), - "rx_rate": grab("Receive rate (Mbps)"), - "tx_rate": grab("Transmit rate (Mbps)"), - } - return {k: v for k, v in info.items() if v} - -def _trigger_wifi_scan(): - """Force Windows to perform an active Wi-Fi scan using the native WlanScan API.""" - try: - import ctypes - from ctypes import wintypes - - wlanapi = ctypes.windll.wlanapi - client_handle = wintypes.HANDLE() - negotiated = wintypes.DWORD() - ret = wlanapi.WlanOpenHandle(2, None, ctypes.byref(negotiated), ctypes.byref(client_handle)) - if ret != 0: - return - - class WLAN_INTERFACE_INFO(ctypes.Structure): - _fields_ = [ - ('InterfaceGuid', ctypes.c_byte * 16), - ('strInterfaceDescription', ctypes.c_wchar * 256), - ('isState', ctypes.c_uint), - ] - class WLAN_INTERFACE_INFO_LIST(ctypes.Structure): - _fields_ = [ - ('dwNumberOfItems', ctypes.c_uint), - ('dwIndex', ctypes.c_uint), - ('InterfaceInfo', WLAN_INTERFACE_INFO * 1), - ] - - info_list = ctypes.POINTER(WLAN_INTERFACE_INFO_LIST)() - ret2 = wlanapi.WlanEnumInterfaces(client_handle, None, ctypes.byref(info_list)) - if ret2 == 0 and info_list.contents.dwNumberOfItems > 0: - guid = info_list.contents.InterfaceInfo[0].InterfaceGuid - guid_bytes = (ctypes.c_byte * 16)(*guid) - wlanapi.WlanScan(client_handle, ctypes.byref(guid_bytes), None, None, None) - - wlanapi.WlanCloseHandle(client_handle, None) - except Exception: - pass - - -def scan_wifi_networks() -> List[Dict]: - """Trigger an active Wi-Fi scan, wait briefly, then parse results.""" - _trigger_wifi_scan() - time.sleep(1.5) # Give the scan time to complete - rc, out, _ = safe_run(["netsh", "wlan", "show", "networks", "mode=bssid"], timeout=10) - if rc != 0: - return [] - networks = [] - current: Dict = {} - - def _commit(): - if current.get("ssid") and current.get("channel"): - networks.append(dict(current)) - - for line in out.splitlines(): - line = line.strip() - if line.startswith("SSID") and "BSSID" not in line and ":" in line: - _commit() - current = {"ssid": line.split(":", 1)[1].strip()} - elif "BSSID" in line and ":" in line: - # If we already have a BSSID, this is a second AP for same SSID — commit previous - if current.get("bssid") and current.get("channel"): - _commit() - ssid = current.get("ssid", "") - current = {"ssid": ssid} - current["bssid"] = line.split(":", 1)[1].strip() - elif line.startswith("Signal") and ":" in line: - sig = line.split(":")[1].strip().rstrip("%").strip() - try: - current["signal_pct"] = int(sig) - except ValueError: - current["signal_pct"] = 0 - elif line.startswith("Channel") and ":" in line: - try: - current["channel"] = int(line.split(":")[1].strip()) - except ValueError: - current["channel"] = 0 - elif line.startswith("Radio type") and ":" in line: - current["radio"] = line.split(":")[1].strip() - elif line.startswith("Authentication") and ":" in line: - current["auth"] = line.split(":")[1].strip() - elif line.startswith("Encryption") and ":" in line: - current["encryption"] = line.split(":")[1].strip() - - _commit() # Don't forget the last network - return networks - - -def recommend_channel(networks: List[Dict], my_ssid: str = "") -> Dict: - """Score channels 1, 6, 11 and recommend the best one. - Returns {best: int, scores: {1: X, 6: Y, 11: Z}, current: int, reason: str} - """ - candidates = [1, 6, 11] - scores = {} - current_ch = 0 - - for candidate in candidates: - score = 100.0 - for net in networks: - ch = net.get("channel", 0) - sig = net.get("signal_pct", 0) - ssid = net.get("ssid", "") - - # Skip our own network - if ssid == my_ssid: - if ch: - current_ch = ch - continue - - # Distance in channels - dist = abs(ch - candidate) - if dist == 0: - # Same channel: full penalty based on signal - score -= sig * 0.8 - elif dist < 5: - # Overlapping channel: partial penalty - overlap_factor = (5 - dist) / 5.0 # 1.0 at dist=0, 0.2 at dist=4 - score -= sig * 0.5 * overlap_factor - - scores[candidate] = max(0, round(score, 1)) - - best = max(candidates, key=lambda c: scores[c]) - reason = "" - if current_ch == best: - reason = "You're already on the best channel!" - elif current_ch: - diff = scores[best] - scores.get(current_ch, 0) - reason = f"Channel {best} has {diff:.0f} points less interference than your current channel {current_ch}" - else: - reason = f"Channel {best} has the least interference" - - return {"best": best, "scores": scores, "current": current_ch, "reason": reason} - - -def parse_ts(ts: str) -> Optional[datetime]: - try: - return datetime.strptime(ts, "%Y-%m-%d %H:%M:%S") - except Exception: - return None - -def duration_str(start_ts: str, end_ts: str) -> str: - a = parse_ts(start_ts) - b = parse_ts(end_ts) - if not a or not b: - return "" - sec = int(max(0, (b - a).total_seconds())) - if sec < 60: - return f"{sec}s" - m, s = divmod(sec, 60) - if m < 60: - return f"{m}m {s}s" - h, m = divmod(m, 60) - return f"{h}h {m}m" - - -# ========================= -# Data -# ========================= - -@dataclass -class Sample: - timestamp: str - local_ip: str - iface: str - gateway_ip: str - dns_servers: List[str] - wifi_state: str - wifi_signal: str - wifi_ssid: str - - gw_ok: bool - gw_rtt: Optional[float] - inet_ok: bool - inet_rtt: Optional[float] - inet2_ok: bool - inet2_rtt: Optional[float] - - dns_state: str # OK | SLOW | FAIL - dns_raw_hint: str - - status: str # OK | DEGRADED | DOWN - reason: str # summary - - # Wi-Fi details - wifi_bssid: str = "" - wifi_channel: str = "" - wifi_radio: str = "" # e.g. "802.11ac", "802.11ax" - wifi_signal_pct: int = -1 # parsed integer signal % - - # Enhanced intelligence fields - root_cause: Optional[RootCauseProbability] = None - explanation: str = "" - suspicion_level: str = "NONE" - anomaly_flags: Optional[AnomalyFlags] = None - -@dataclass -class Event: - timestamp: str - severity: str # INFO|WARN|HIGH - category: str # LINK|GATEWAY|ISP|DNS|DEGRADED|CONFIG - title: str - details: Dict - -@dataclass -class Incident: - """ - Combined 'problem + recovery' record. - """ - id: int - start_time: str - end_time: str # empty until recovered - duration: str # computed at end - severity: str # highest severity seen during incident - category: str # primary category at start - start_status: str # DEGRADED/DOWN - end_status: str # OK - cause: str # reason - details: Dict - cause_timeline: List[Dict] = None # New field for tracking cause changes - -# ========================= - -class NetworkStabilityEngine: - def __init__(self, state_path: str): - self.state_path = state_path - self.samples: BoundedDeque = BoundedDeque(maxlen=3000) - self.events: BoundedDeque = BoundedDeque(maxlen=2000) - self.incidents: BoundedDeque = BoundedDeque(maxlen=1000) - - self.baseline_gateway: str = "" - self.baseline_dns: List[str] = [] - - self.last_status: str = "" - self.last_reason: str = "" - - self.last_gateway_seen: str = "" - self.last_dns_seen: List[str] = [] - - self._baseline_gw_warn_fired: bool = False - self._baseline_dns_warn_fired: bool = False - self._last_diagnostic_event: float = 0.0 - self._start_time: float = time.time() - - # Latency thresholds (can be updated from the App) - self.thresh_elevated: int = 200 - self.thresh_high: int = 400 - - self.flap_window: List[Tuple[float, str]] = [] - self._last_flap_log_at: float = 0.0 - - self.roll: Dict[str, List[Tuple[float, bool, Optional[float]]]] = { - "gw": [], - "inet1": [], - "inet2": [], - } - - # Wi-Fi signal tracking - self.signal_history: List[Tuple[float, int]] = [] # (timestamp, signal_pct) - self.baseline_bssid: str = "" - self.last_bssid_seen: str = "" - self._bssid_change_warned: bool = False - - # incident tracking - self._next_incident_id = 1 - self.active_incidents: Dict[Tuple[str, str], int] = {} # (category, reason) -> incident_id - self.last_export_timestamp: Optional[datetime] = None - self.last_export_minute_key: Optional[str] = None - - # Parse export time from AUTO_EXPORT_TIME - self.export_hour = 23 - self.export_minute = 30 - try: - h, m = AUTO_EXPORT_TIME.split(":") - self.export_hour = int(h) - self.export_minute = int(m) - except Exception: - pass - - # Enhanced intelligence engine (if available) - if HAS_INTELLIGENCE: - self.intelligence = NetworkIntelligenceEngine() - else: - self.intelligence = None - - self._init_db() - self._load_state() - - def _load_state(self): - if not os.path.exists(self.state_path): - return - try: - with open(self.state_path, "r", encoding="utf-8") as f: - data = json.load(f) - self.baseline_gateway = data.get("baseline_gateway", "") or "" - self.baseline_dns = data.get("baseline_dns", []) or [] - except Exception: - pass - - def save_state(self): - try: - data = { - "baseline_gateway": self.baseline_gateway, - "baseline_dns": self.baseline_dns, - "saved_at": now_ts(), - } - with open(self.state_path, "w", encoding="utf-8") as f: - json.dump(data, f, indent=2, ensure_ascii=False) - except Exception: - pass - - # ---------- SQLite incident database ---------- - - def _init_db(self): - """Initialise SQLite database for persistent incident storage.""" - db_dir = os.path.dirname(self.state_path) or "." - self._db_path = os.path.join(db_dir, "network_incidents.db") - con = sqlite3.connect(self._db_path) - con.execute(""" - CREATE TABLE IF NOT EXISTS incidents ( - id INTEGER PRIMARY KEY, - start_time TEXT NOT NULL, - end_time TEXT, - duration TEXT, - severity TEXT NOT NULL, - category TEXT NOT NULL, - start_status TEXT, - end_status TEXT, - cause TEXT, - details TEXT, - cause_timeline TEXT - ) - """) - con.commit() - con.close() - - def _db_conn(self) -> sqlite3.Connection: - return sqlite3.connect(self._db_path) - - def _db_insert_incident(self, inc: 'Incident'): - """INSERT a newly created incident into the database.""" - try: - con = self._db_conn() - con.execute( - "INSERT INTO incidents (id, start_time, end_time, duration, severity, " - "category, start_status, end_status, cause, details, cause_timeline) " - "VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)", - ( - inc.id, inc.start_time, inc.end_time, inc.duration, - inc.severity, inc.category, inc.start_status, inc.end_status, - inc.cause, - json.dumps(inc.details, ensure_ascii=False), - json.dumps(inc.cause_timeline, ensure_ascii=False) if inc.cause_timeline else "[]", - ), - ) - con.commit() - con.close() - except Exception: - pass - - def _db_update_incident(self, inc: 'Incident'): - """UPDATE an existing incident (severity escalation or closure).""" - try: - con = self._db_conn() - con.execute( - "UPDATE incidents SET end_time=?, duration=?, severity=?, cause=?, " - "details=?, cause_timeline=? WHERE id=?", - ( - inc.end_time, inc.duration, inc.severity, inc.cause, - json.dumps(inc.details, ensure_ascii=False), - json.dumps(inc.cause_timeline, ensure_ascii=False) if inc.cause_timeline else "[]", - inc.id, - ), - ) - con.commit() - con.close() - except Exception: - pass - - def _db_load_incidents(self, limit: int = 500, - severity_filter: str = "ALL", - category_filter: str = "ALL") -> List[Dict]: - """Load incidents from DB with optional filters.""" - try: - con = self._db_conn() - con.row_factory = sqlite3.Row - query = "SELECT * FROM incidents WHERE 1=1" - params: list = [] - if severity_filter != "ALL": - query += " AND severity = ?" - params.append(severity_filter) - if category_filter != "ALL": - query += " AND category = ?" - params.append(category_filter) - query += " ORDER BY id DESC LIMIT ?" - params.append(limit) - rows = con.execute(query, params).fetchall() - con.close() - return [dict(r) for r in rows] - except Exception: - return [] - - def log_event(self, severity: str, category: str, title: str, details: Dict): - # BoundedDeque enforces maxlen=2000 automatically. - self.events.append(Event(now_ts(), severity, category, title, details)) - - def add_sample(self, s: Sample): - # BoundedDeque enforces maxlen=3000 automatically. - self.samples.append(s) - - def set_baseline(self, gateway: str, dns_servers: List[str], local_ip: str = ""): - self.baseline_gateway = gateway - self.baseline_dns = dns_servers[:] - if self.intelligence: - self.intelligence.update_baseline(gateway, dns_servers, local_ip) - self.save_state() - self.log_event("INFO", "CONFIG", "Baseline updated", {"gateway": gateway, "dns_servers": dns_servers}) - - def _roll_add(self, key: str, ok: bool, rtt: Optional[float]): - t = time.time() - self.roll[key].append((t, ok, rtt)) - self.roll[key] = [(ts, o, r) for (ts, o, r) in self.roll[key] if t - ts <= 60] - - def _roll_loss(self, key: str) -> float: - w = self.roll[key] - if not w: - return 0.0 - fails = sum(1 for _, ok, _ in w if not ok) - return fails / max(1, len(w)) - - def _roll_max_rtt(self, key: str) -> Optional[float]: - rtts = [r for _, ok, r in self.roll[key] if ok and r is not None] - return max(rtts) if rtts else None - - def classify(self, - local_ip: str, - wifi: Dict[str, str], - gw: str, - gw_ok: bool, gw_rtt: Optional[float], - inet_ok: bool, inet_rtt: Optional[float], - inet2_ok: bool, inet2_rtt: Optional[float], - dns_state: str) -> Tuple[str, str, str, str]: - wifi_state = wifi.get("state", "").lower() - wifi_connected = ("connected" in wifi_state) if wifi_state else True - - # Signal correlation hint — appended to reason when signal is weak - sig_hint = "" - sig_corr = self.signal_correlated_with_issue() - if sig_corr: - sig_hint = f" [Wi-Fi: {sig_corr}]" - - if not wifi_connected: - return "DOWN", "Wi-Fi disconnected (link down)", "HIGH", "LINK" - - if not local_ip: - # Only report as DOWN if pings also fail — avoids false positives - # during DHCP renewal or when IPv6 is the primary route - any_ping = gw_ok or inet_ok or inet2_ok - if not any_ping: - return "DOWN", "No local IPv4 on default route (adapter/DHCP issue)", "HIGH", "LINK" - # else: route table glitch but connectivity works — log as INFO, not DOWN - - if gw and (not gw_ok) and (not inet_ok) and (not inet2_ok): - return "DOWN", f"Gateway unreachable and internet down (router/Wi-Fi issue){sig_hint}", "HIGH", "GATEWAY" - - if (not inet_ok) and (not inet2_ok) and (gw_ok or not gw): - return "DOWN", f"Internet unreachable (ISP/WAN outage) while local network seems up{sig_hint}", "HIGH", "ISP" - - if (inet_ok or inet2_ok) and dns_state == "FAIL": - return "DEGRADED", f"DNS failing while internet reachable (DNS issue){sig_hint}", "WARN", "DNS" - if (inet_ok or inet2_ok) and dns_state == "SLOW": - return "DEGRADED", f"DNS slow/timeouts (intermittent DNS issue){sig_hint}", "INFO", "DNS" - - loss1 = self._roll_loss("inet1") - loss2 = self._roll_loss("inet2") - if max(loss1, loss2) >= 0.25: - return "DEGRADED", f"Packet loss detected (internet) ~{max(loss1, loss2)*100:.0f}% (last 60s){sig_hint}", "WARN", "DEGRADED" - - rtts = [r for r in [gw_rtt, inet_rtt, inet2_rtt] if r is not None] - mx = max(rtts) if rtts else None - if mx is not None: - if mx >= self.thresh_high: - return "DEGRADED", f"High latency detected (max {mx:.0f} ms){sig_hint}", "WARN", "DEGRADED" - if mx >= self.thresh_elevated: - return "DEGRADED", f"Latency elevated (max {mx:.0f} ms){sig_hint}", "INFO", "DEGRADED" - - if gw and (not gw_ok) and (inet_ok or inet2_ok): - return "DEGRADED", f"Gateway ping failing but internet OK (router ICMP blocked/rate-limited){sig_hint}", "INFO", "GATEWAY" - - return "OK", "Stable", "INFO", "DEGRADED" - - def detect_config_changes(self, gateway_ip: str, dns_servers: List[str]): - if gateway_ip and gateway_ip != self.last_gateway_seen: - if self.last_gateway_seen: - self.log_event("WARN", "CONFIG", "Gateway IP changed", {"old": self.last_gateway_seen, "new": gateway_ip}) - self.last_gateway_seen = gateway_ip - - if dns_servers != self.last_dns_seen: - if self.last_dns_seen: - self.log_event("WARN", "CONFIG", "DNS servers changed", {"old": self.last_dns_seen, "new": dns_servers}) - self.last_dns_seen = dns_servers[:] - - if self.baseline_gateway and gateway_ip and gateway_ip != self.baseline_gateway: - if not self._baseline_gw_warn_fired: - self._baseline_gw_warn_fired = True - self.log_event("WARN", "CONFIG", "Current gateway differs from baseline", {"baseline": self.baseline_gateway, "current": gateway_ip}) - elif self.baseline_gateway and gateway_ip and gateway_ip == self.baseline_gateway: - self._baseline_gw_warn_fired = False - - if self.baseline_dns and dns_servers and dns_servers != self.baseline_dns: - if not self._baseline_dns_warn_fired: - self._baseline_dns_warn_fired = True - self.log_event("WARN", "CONFIG", "Current DNS differs from baseline", {"baseline": self.baseline_dns, "current": dns_servers}) - elif self.baseline_dns and dns_servers and dns_servers == self.baseline_dns: - self._baseline_dns_warn_fired = False - - def detect_flapping(self, status: str): - t = time.time() - self.flap_window.append((t, status)) - self.flap_window = [(ts, st) for (ts, st) in self.flap_window if t - ts <= 300] - - transitions = 0 - prev = None - for _, st in self.flap_window: - if prev is None: - prev = st - continue - if st != prev: - transitions += 1 - prev = st - - if transitions >= 6: - if (t - self._last_flap_log_at) >= 60: - self._last_flap_log_at = t - # NOTE: flapping is an event, not an incident - self.log_event("WARN", "DEGRADED", "Frequent network state changes (flapping)", {"transitions_last_5min": transitions}) - - # ---------- Wi-Fi signal & BSSID tracking ---------- - - def track_signal(self, signal_pct: int): - """Track Wi-Fi signal strength over time.""" - if signal_pct < 0: - return - t = time.time() - self.signal_history.append((t, signal_pct)) - # Keep last 5 minutes - self.signal_history = [(ts, s) for ts, s in self.signal_history if t - ts <= 300] - - def get_signal_avg(self, window_sec: int = 60) -> Optional[float]: - """Average signal % over last N seconds.""" - t = time.time() - vals = [s for ts, s in self.signal_history if t - ts <= window_sec] - return sum(vals) / len(vals) if vals else None - - def get_signal_min(self, window_sec: int = 60) -> Optional[int]: - """Min signal % over last N seconds.""" - t = time.time() - vals = [s for ts, s in self.signal_history if t - ts <= window_sec] - return min(vals) if vals else None - - def signal_correlated_with_issue(self) -> Optional[str]: - """Check if current issues correlate with weak Wi-Fi signal.""" - avg = self.get_signal_avg(60) - mn = self.get_signal_min(60) - if avg is None: - return None - if avg < 30: - return f"Very weak Wi-Fi signal ({avg:.0f}% avg) — likely cause of instability" - if avg < 50 and mn is not None and mn < 25: - return f"Weak Wi-Fi signal ({avg:.0f}% avg, dipped to {mn}%) — probable cause" - if avg < 60: - return f"Below-average Wi-Fi signal ({avg:.0f}% avg) — may contribute to issues" - return None - - def detect_bssid_change(self, bssid: str, ssid: str): - """Detect BSSID changes — potential evil twin AP attack.""" - if not bssid: - return - bssid_upper = bssid.upper().strip() - - # Set baseline on first observation - if not self.baseline_bssid: - self.baseline_bssid = bssid_upper - self.last_bssid_seen = bssid_upper - self.log_event("INFO", "CONFIG", "Wi-Fi BSSID baseline set", - {"bssid": bssid_upper, "ssid": ssid}) - return - - if bssid_upper != self.last_bssid_seen: - old_bssid = self.last_bssid_seen - self.last_bssid_seen = bssid_upper - - if bssid_upper != self.baseline_bssid: - # BSSID changed from baseline — possible evil twin or roaming - self.log_event("HIGH", "SECURITY", - "BSSID changed! Possible evil twin AP or Wi-Fi roaming", - {"baseline_bssid": self.baseline_bssid, - "previous_bssid": old_bssid, - "current_bssid": bssid_upper, - "ssid": ssid, - "warning": "If you have only ONE router, this is suspicious — " - "someone may have set up a fake access point with " - "the same SSID to intercept your traffic."}) - else: - # Returned to baseline - if old_bssid != self.baseline_bssid: - self.log_event("INFO", "CONFIG", "BSSID returned to baseline", - {"bssid": bssid_upper, "ssid": ssid}) - - # ---------- Incident logic ---------- - def _sev_rank(self, s: str) -> int: - return {"INFO": 1, "WARN": 2, "HIGH": 3}.get(s, 0) - - def _sev_max(self, a: str, b: str) -> str: - return a if self._sev_rank(a) >= self._sev_rank(b) else b - - def on_state_update(self, status: str, category: str, severity: str, reason: str, details: Dict): - """ - Called every sample cycle after classify(). - Creates/updates incidents based on category and reason combinations. - """ - # Normalize variable reasons so fluctuating values don't create separate incidents - # Strip Wi-Fi signal hint first - normalized_reason = reason.split(" [Wi-Fi:")[0] if "[Wi-Fi:" in reason else reason - - # Group all latency issues into ONE incident type (elevated + high share the same root cause) - if "High latency" in normalized_reason or "Latency elevated" in normalized_reason: - normalized_reason = "Latency issue" - elif "Packet loss detected" in normalized_reason: - normalized_reason = "Packet loss detected" - elif "Gateway unreachable" in normalized_reason: - normalized_reason = "Gateway unreachable" - elif "Internet unreachable" in normalized_reason: - normalized_reason = "Internet unreachable" - elif "DNS slow" in normalized_reason or "DNS failing" in normalized_reason: - normalized_reason = "DNS issue" - elif "No local IPv4" in normalized_reason: - normalized_reason = "No network interface" - - key = (category, normalized_reason) - - # Start incident when leaving OK — only for WARN and HIGH severity - if status in ("DEGRADED", "DOWN"): - if self._sev_rank(severity) < 2: # Skip INFO-level - return - - # Create new incident if this (category, reason) combination doesn't exist - if key not in self.active_incidents: - inc = Incident( - id=self._next_incident_id, - start_time=now_ts(), - end_time="", - duration="", - severity=severity, - category=category, - start_status=status, - end_status="OK", - cause=reason, - details={ - "start_reason": reason, - "start_category": category, - "start_severity": severity, - **(details or {}), - }, - cause_timeline=[{ - "timestamp": now_ts(), - "category": category, - "reason": reason - }] - ) - self.active_incidents[key] = inc.id - self._next_incident_id += 1 - self.incidents.append(inc) - self._db_insert_incident(inc) - else: - # Update existing incident with same category/reason - incident_id = self.active_incidents[key] - inc = self._find_incident(incident_id) - if inc: - inc.severity = self._sev_max(inc.severity, severity) - inc.details["latest_reason"] = reason - inc.details["latest_category"] = category - inc.details["latest_severity"] = severity - inc.cause = reason - - # Add to cause timeline if reason changed - if inc.cause_timeline: - last_entry = inc.cause_timeline[-1] - if last_entry["reason"] != reason: - inc.cause_timeline.append({ - "timestamp": now_ts(), - "category": category, - "reason": reason - }) - self._db_update_incident(inc) - - # End incidents on recovery to OK - close ALL active incidents - if status == "OK": - incidents_to_close = list(self.active_incidents.items()) - - for (cat, rsn), incident_id in incidents_to_close: - inc = self._find_incident(incident_id) - if inc and not inc.end_time: - inc.end_time = now_ts() - inc.duration = duration_str(inc.start_time, inc.end_time) - inc.details["end_reason"] = reason - self._db_update_incident(inc) - del self.active_incidents[(cat, rsn)] - - def _find_incident(self, incident_id: int) -> Optional[Incident]: - for inc in reversed(self.incidents.snapshot()): - if inc.id == incident_id: - return inc - return None - - # ---------- Auto Export Methods ---------- - def auto_export_check(self, enabled_var, hour_var, folder_var): - """Check if auto export should run and execute if needed""" - if not enabled_var.get(): # Use UI variable - return - - current_time = datetime.now() - - # Check if it's export hour AND minute - minute_key = current_time.strftime("%Y%m%d_%H%M") - - if ( - current_time.hour == self.export_hour and - current_time.minute == self.export_minute and - self.last_export_minute_key != minute_key - ): - self.last_export_minute_key = minute_key - self.run_auto_export(folder_var) - - def run_auto_export(self, folder_var): - """Execute automatic export""" - try: - # Get export folder from UI variable - export_folder = folder_var.get() - - # Create export folder if it doesn't exist - os.makedirs(export_folder, exist_ok=True) - - # Generate export filename - current_time = datetime.now() - filename = f"network_export_{current_time.strftime('%Y%m%d_%H%M')}.json" - filepath = os.path.join(export_folder, filename) - - # Prepare export data - export_data = self._prepare_export_data(current_time) - - # Write export file - with open(filepath, 'w', encoding='utf-8') as f: - json.dump(export_data, f, indent=2, ensure_ascii=False) - - # Update last export timestamp - self.last_export_timestamp = current_time - - # Log export event - self.log_event("INFO", "EXPORT", "Auto export completed", { - "filename": filename, - "incidents_count": len(export_data["incidents"]), - "events_count": len(export_data["events"]) - }) - - except Exception as e: - self.log_event("HIGH", "EXPORT", "Auto export failed", {"error": str(e)}) - - def _prepare_export_data(self, current_time: datetime) -> Dict[str, Any]: - """Prepare data for export""" - # Filter incidents since last export - incidents_snap = self.incidents.snapshot() - incidents_to_export = [] - if self.last_export_timestamp: - # Only include incidents after last export - for inc in incidents_snap: - inc_start = parse_ts(inc.start_time) - if inc_start and inc_start >= self.last_export_timestamp: - incidents_to_export.append(asdict(inc)) - else: - # Include all incidents if no previous export - incidents_to_export = [asdict(inc) for inc in incidents_snap] - - # Filter events since last export — snapshot once, iterate the copy. - events_snap = self.events.snapshot() - events_to_export = [] - if self.last_export_timestamp: - for event in events_snap: - event_time = parse_ts(event.timestamp) - if event_time and event_time >= self.last_export_timestamp: - events_to_export.append(asdict(event)) - else: - events_to_export = [asdict(event) for event in events_snap] - - # Calculate statistics - stats = self._calculate_export_statistics(incidents_to_export) - - # Determine period start/end - period_start = self.last_export_timestamp.strftime("%Y-%m-%d %H:%M:%S") if self.last_export_timestamp else incidents_to_export[0]["start_time"] if incidents_to_export else current_time.strftime("%Y-%m-%d %H:%M:%S") - period_end = current_time.strftime("%Y-%m-%d %H:%M:%S") - - return { - "export_time": current_time.strftime("%Y-%m-%d %H:%M:%S"), - "period_start": period_start, - "period_end": period_end, - "incidents": incidents_to_export, - "events": events_to_export, - "stats": stats - } - - def _calculate_export_statistics(self, incidents: List[Dict]) -> Dict[str, Any]: - """Calculate statistics for export""" - if not incidents: - return { - "total_incidents": 0, - "incident_by_category": {}, - "average_duration_seconds": 0.0, - "incidents_per_hour": {f"{i:02d}": 0 for i in range(24)} - } - - # Count incidents by category - incident_by_category = {} - for inc in incidents: - cat = inc["category"] - incident_by_category[cat] = incident_by_category.get(cat, 0) + 1 - - # Calculate average duration - durations = [] - for inc in incidents: - if inc["end_time"] and inc["start_time"]: - start = parse_ts(inc["start_time"]) - end = parse_ts(inc["end_time"]) - if start and end: - durations.append((end - start).total_seconds()) - - avg_duration = sum(durations) / len(durations) if durations else 0.0 - - # Count incidents per hour - incidents_per_hour = {f"{i:02d}": 0 for i in range(24)} - for inc in incidents: - start_time = parse_ts(inc["start_time"]) - if start_time: - hour_key = f"{start_time.hour:02d}" - incidents_per_hour[hour_key] += 1 - - return { - "total_incidents": len(incidents), - "incident_by_category": incident_by_category, - "average_duration_seconds": avg_duration, - "incidents_per_hour": incidents_per_hour - } - - # ---------- Enhanced Intelligence Methods ---------- - def get_rolling_stats(self) -> Dict[str, Any]: - """Get comprehensive rolling statistics for intelligence analysis""" - return { - 'gw_loss_percent': self._roll_loss('gw') * 100, - 'inet1_loss_percent': self._roll_loss('inet1') * 100, - 'inet2_loss_percent': self._roll_loss('inet2') * 100, - 'gw_max_latency': self._roll_max_rtt('gw') or 0, - 'inet1_max_latency': self._roll_max_rtt('inet1') or 0, - 'inet2_max_latency': self._roll_max_rtt('inet2') or 0, - 'max_latency_ms': max([self._roll_max_rtt('gw') or 0, - self._roll_max_rtt('inet1') or 0, - self._roll_max_rtt('inet2') or 0]), - 'packet_loss_percent': max(self._roll_loss('inet1'), self._roll_loss('inet2')) * 100, - 'dns_fail_rate': self._get_dns_fail_rate(), - 'state_transitions': len([i for i in range(len(self.flap_window)-1) - if self.flap_window[i][1] != self.flap_window[i+1][1]]) - } - - def _get_dns_fail_rate(self) -> float: - """Calculate DNS failure rate from recent samples""" - recent_samples = [s for s in self.samples.snapshot()[-30:] if s.dns_state in ['OK', 'FAIL', 'SLOW']] - if not recent_samples: - return 0.0 - fails = sum(1 for s in recent_samples if s.dns_state == 'FAIL') - return (fails / len(recent_samples)) * 100 - - def enhance_sample_with_intelligence(self, sample: Sample) -> Sample: - """Enhance a sample with intelligence analysis""" - # Convert sample to dict for analysis - sample_dict = { - 'local_ip': sample.local_ip, - 'gateway_ip': sample.gateway_ip, - 'dns_servers': sample.dns_servers, - 'wifi_state': sample.wifi_state, - 'gw_ok': sample.gw_ok, - 'gw_rtt': sample.gw_rtt, - 'inet_ok': sample.inet_ok, - 'inet_rtt': sample.inet_rtt, - 'inet2_ok': sample.inet2_ok, - 'inet2_rtt': sample.inet2_rtt, - 'dns_state': sample.dns_state, - 'status': sample.status, - 'wifi_signal': sample.wifi_signal, - 'iface': sample.iface - } - - rolling_stats = self.get_rolling_stats() - - # Track configuration and state changes - current_time = time.time() - self.intelligence.track_configuration( - sample.gateway_ip, sample.dns_servers, sample.local_ip, current_time - ) - self.intelligence.track_state_changes(sample.status, current_time) - - # Detect anomalies - anomalies = self.intelligence.detect_anomalies(sample_dict, rolling_stats) - - # Calculate root cause probabilities - root_cause = self.intelligence.calculate_root_cause_probability( - sample_dict, rolling_stats, anomalies - ) - - # Generate explanation - explanation = self.intelligence.generate_explanation( - sample_dict, rolling_stats, anomalies, root_cause - ) - - # Get suspicion level - suspicion_level = self.intelligence.detect_suspicious_indicators(current_time).suspicion_level() - - # Update sample with intelligence data - sample.root_cause = root_cause - sample.explanation = explanation - sample.suspicion_level = suspicion_level - sample.anomaly_flags = anomalies - - return sample - - def generate_ai_export(self) -> Dict[str, Any]: - """Generate AI-friendly export data""" - samples_snap = self.samples.snapshot() - if not samples_snap: - return {} - - current_sample = samples_snap[-1] - rolling_stats = self.get_rolling_stats() - incidents_data = [asdict(inc) for inc in self.incidents.snapshot()] - - # Handle missing intelligence engine gracefully - if self.intelligence: - try: - return self.intelligence.generate_ai_export( - asdict(current_sample), - rolling_stats, - incidents_data, - current_sample.anomaly_flags or AnomalyFlags(), - current_sample.root_cause or RootCauseProbability() - ) - except Exception as e: - print(f"Intelligence export failed: {e}") - # Fall back to basic export - - # Basic AI-friendly export without intelligence engine - return { - "summary": { - "timestamp": current_sample.timestamp, - "status": current_sample.status, - "reason": current_sample.reason, - "samples_collected": len(self.samples), - "incidents_count": len(self.incidents), - "events_count": len(self.events) - }, - "current_metrics": { - "local_ip": current_sample.local_ip, - "gateway_ip": current_sample.gateway_ip, - "dns_servers": current_sample.dns_servers, - "wifi_state": current_sample.wifi_state, - "wifi_signal": current_sample.wifi_signal, - "wifi_ssid": current_sample.wifi_ssid, - "gateway_ping": {"ok": current_sample.gw_ok, "rtt_ms": current_sample.gw_rtt}, - "internet_ping_1": {"ok": current_sample.inet_ok, "rtt_ms": current_sample.inet_rtt}, - "internet_ping_2": {"ok": current_sample.inet2_ok, "rtt_ms": current_sample.inet2_rtt}, - "dns_state": current_sample.dns_state - }, - "rolling_stats": rolling_stats, - "recent_incidents": incidents_data[-5:], # Last 5 incidents - "baseline": { - "gateway": self.baseline_gateway, - "dns_servers": self.baseline_dns - } - } -# Base class for App depending on available GUI framework -if HAS_CTK: - AppBase = ctk.CTkFrame -else: - AppBase = tk.Frame - -class App(AppBase): - def __init__(self, parent): - super().__init__(parent) - self.parent = parent - parent.title("Network Stability Monitor Pro") - parent.geometry("1280x780") - parent.minsize(800, 500) - - # Remove aggressive focus management to prevent ghost trails - # Just set window normally without topmost tricks - - self.state_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "net_stability_state.json") - self.engine = NetworkStabilityEngine(self.state_path) - - self.running = True - self.work_q: "queue.Queue[str]" = queue.Queue() - - self.interval_ms = tk.IntVar(value=3000) - self.ping_timeout_ms = tk.IntVar(value=1500) - - # Auto export configuration - self.auto_export_enabled = tk.BooleanVar(value=AUTO_EXPORT_ENABLED) - _h, _m = 23, 30 - try: - _h, _m = int(AUTO_EXPORT_TIME.split(":")[0]), int(AUTO_EXPORT_TIME.split(":")[1]) - except Exception: - pass - self.auto_export_hour = tk.IntVar(value=_h) - self.auto_export_minute = tk.IntVar(value=_m) - self.export_folder = tk.StringVar(value=EXPORT_FOLDER) - - # Latency threshold variables - self.thresh_elevated = tk.IntVar(value=200) - self.thresh_high = tk.IntVar(value=400) - - gw = get_default_gateway() or "" - ip, ifname = get_default_route_interface_ip() - dns = get_dns_servers() - - self.gateway = tk.StringVar(value=gw) - self.localip = tk.StringVar(value=ip) - self.iface = tk.StringVar(value=ifname) - self.dns_text = tk.StringVar(value=", ".join(dns) if dns else "") - - self.target1 = tk.StringVar(value="8.8.8.8") - self.target2 = tk.StringVar(value="1.1.1.1") - self.dns_domain = tk.StringVar(value="google.com") - - self._last_sample: Optional[Sample] = None - self._last_diag: Dict[str, str] = {} - self._refresh_counter: int = 0 - - # log filter state - self.filter_category = tk.StringVar(value="ALL") - self.filter_severity = tk.StringVar(value="ALL") - - # Set initial baseline if we have network info - if gw or dns: - self.engine.set_baseline(gw, dns, ip) - - # Log startup event - self.engine.log_event( - "INFO", - "CONFIG", - "Network Monitor Started", - { - "gateway": gw, - "local_ip": ip, - "interface": ifname, - "dns_servers": dns, - "has_intelligence": self.engine.intelligence is not None, - "monitoring_interval_ms": 2000 - } - ) - - self._build_ui() - - threading.Thread(target=self._worker_loop, daemon=True).start() - # Start monitoring immediately - self.after(100, self.tick) - # Wi-Fi analyzer auto-refresh - self.after(20000, self._wifi_auto_refresh) - - def _build_ui(self): - # --- Top bar: single compact row with essential info --- - top = ctk.CTkFrame(self) - top.pack(fill="x", padx=6, pady=(4, 2)) - - # Left side: key network info as compact labels - info = ctk.CTkFrame(top, fg_color="transparent") - info.pack(side="left", fill="x", expand=True) - for lbl, var in [("IP", self.localip), ("GW", self.gateway), ("DNS", self.dns_text)]: - ctk.CTkLabel(info, text=lbl, font=("Segoe UI", 9), text_color="#888").pack(side="left", padx=(6,1)) - ctk.CTkEntry(info, textvariable=var, width=100, height=24, font=("Segoe UI", 9)).pack(side="left", padx=(0,4)) - ctk.CTkLabel(info, text="Targets", font=("Segoe UI", 9), text_color="#888").pack(side="left", padx=(6,1)) - ctk.CTkEntry(info, textvariable=self.target1, width=70, height=24, font=("Segoe UI", 9)).pack(side="left", padx=(0,2)) - ctk.CTkEntry(info, textvariable=self.target2, width=70, height=24, font=("Segoe UI", 9)).pack(side="left", padx=(0,4)) - - # Right side: buttons always visible - btn_frame = ctk.CTkFrame(top, fg_color="transparent") - btn_frame.pack(side="right") - ctk.CTkButton(btn_frame, text="Settings", command=self.show_settings, width=70, height=26).pack(side="left", padx=2) - ctk.CTkButton(btn_frame, text="AI Export", command=self.ai_export, width=70, height=26).pack(side="left", padx=2) - ctk.CTkButton(btn_frame, text="Export", command=self.export_report, width=60, height=26).pack(side="left", padx=2) - - nb = ctk.CTkTabview(self) - nb.pack(fill="both", expand=True, padx=10, pady=(0, 10)) - - self.tab_overview = nb.add("Overview") - self.tab_incidents = nb.add("Incidents (combined)") - self.tab_events = nb.add("Events (config/meta)") - self.tab_diagnostics = nb.add("Diagnostics") - self.tab_wifi = nb.add("Wi-Fi Analyzer") - - self._build_overview() - self._build_incidents() - self._build_events() - self._build_diagnostics() - self._build_wifi_analyzer() - - self.pack(fill="both", expand=True) - - def _build_overview(self): - f = self.tab_overview - - # Use scrollable frame for the whole overview - scroll = ctk.CTkScrollableFrame(f) - scroll.pack(fill="both", expand=True, padx=5, pady=5) - - # --- Status bar --- - row = ctk.CTkFrame(scroll) - row.pack(fill="x", padx=5, pady=(5, 2)) - - self.status_label = ctk.CTkLabel(row, text="Status: (initializing)", - font=("Segoe UI", 14, "bold")) - self.status_label.pack(side="left") - - self.reason_label = ctk.CTkLabel(row, text="", font=("Segoe UI", 11)) - self.reason_label.pack(side="left", padx=15) - - # --- Dashboard cards (6 large metric cards) --- - dash_frame = ctk.CTkFrame(scroll) - dash_frame.pack(fill="x", padx=5, pady=(2, 4)) - - self.dash_values = {} - cards = [ - ("GW RTT", "gw_rtt", "#00BFFF"), - ("INET 1", "inet1_rtt", "#FFD700"), - ("INET 2", "inet2_rtt", "#FF6347"), - ("SIGNAL", "wifi_sig", "#00FF88"), - ("PKT LOSS", "pkt_loss", "#ccaa00"), - ("DNS", "dns_st", "#44cc44"), - ] - for i, (title, key, default_color) in enumerate(cards): - card = ctk.CTkFrame(dash_frame, fg_color="#1e1e1e", corner_radius=8) - card.grid(row=0, column=i, padx=4, pady=4, sticky="nsew") - dash_frame.columnconfigure(i, weight=1) - - ctk.CTkLabel(card, text=title, font=("Segoe UI", 9), - text_color="#888888").pack(pady=(6, 0)) - val_lbl = ctk.CTkLabel(card, text="--", font=("Segoe UI", 22, "bold"), - text_color=default_color) - val_lbl.pack(pady=(0, 6)) - self.dash_values[key] = val_lbl - - # --- Live chart (last 5 minutes) --- - chart_frame = ctk.CTkFrame(scroll) - chart_frame.pack(fill="x", padx=5, pady=(2, 4)) - ctk.CTkLabel(chart_frame, text="Live — Last 5 Minutes", - font=("Segoe UI", 10, "bold"), text_color="#888888").pack( - anchor="w", padx=10, pady=(4, 0)) - - self.live_chart_canvas = tk.Canvas(chart_frame, height=180, bg="#1a1a1a", - highlightthickness=0) - self.live_chart_canvas.pack(fill="x", padx=8, pady=(2, 6)) - self._live_chart_width = 0 - self.live_chart_canvas.bind("", - lambda e: setattr(self, '_live_chart_width', e.width)) - - # --- Compact info grid (secondary details) --- - info_frame = ctk.CTkFrame(scroll) - info_frame.pack(fill="x", padx=5, pady=(2, 4)) - - grid = ctk.CTkFrame(info_frame) - grid.pack(fill="x", padx=10, pady=6) - grid.columnconfigure(1, weight=1) - grid.columnconfigure(3, weight=1) - - self.kv = {} - # Two-column layout for compact display - left_fields = [ - ("Wi-Fi", "wifi"), ("SSID", "ssid"), ("BSSID", "bssid"), - ("Channel / Band", "channel_band"), ("Signal quality", "signal_quality"), - ] - right_fields = [ - ("Local IP", "local"), ("Iface", "iface"), ("Gateway", "gw"), - ("DNS servers", "dns_servers"), ("Suspicion", "suspicion"), - ("Root cause", "root_cause"), - ] - - for r, (label, key) in enumerate(left_fields): - ctk.CTkLabel(grid, text=label, font=("Segoe UI", 9), - text_color="#999999").grid(row=r, column=0, sticky="w", padx=(0, 6), pady=1) - v = ctk.CTkLabel(grid, text="—", font=("Segoe UI", 9)) - v.grid(row=r, column=1, sticky="w", padx=(0, 20), pady=1) - self.kv[key] = v - - for r, (label, key) in enumerate(right_fields): - ctk.CTkLabel(grid, text=label, font=("Segoe UI", 9), - text_color="#999999").grid(row=r, column=2, sticky="w", padx=(0, 6), pady=1) - v = ctk.CTkLabel(grid, text="—", font=("Segoe UI", 9)) - v.grid(row=r, column=3, sticky="w", pady=1) - self.kv[key] = v - - # Hidden KV entries still needed by refresh_overview but not displayed as primary - for key in ["signal", "gw_ping", "inet1", "inet2", "dns"]: - self.kv[key] = ctk.CTkLabel(grid, text="") # hidden, not gridded - - def _build_category_bar(self, parent, on_change): - bar = ctk.CTkFrame(parent) - bar.pack(fill="x", padx=10, pady=(10, 0)) - - ctk.CTkLabel(bar, text="Filter:").pack(side="left", padx=(0, 8)) - - # categories you use - cats = ["ALL", "LINK", "GATEWAY", "ISP", "DNS", "DEGRADED", "CONFIG", "SECURITY"] - - def set_cat(c): - self.filter_category.set(c) - on_change() - - for c in cats: - ctk.CTkButton(bar, text=c, width=70, command=lambda cc=c: set_cat(cc)).pack(side="left", padx=2) - - ctk.CTkLabel(bar, text="(Click a category to filter)").pack(side="left", padx=10) - - def _build_severity_bar(self, parent, on_change): - bar = ctk.CTkFrame(parent) - bar.pack(fill="x", padx=10, pady=(4, 0)) - - ctk.CTkLabel(bar, text="Severity:").pack(side="left", padx=(0, 8)) - - sevs = ["ALL", "WARN", "HIGH"] - - def set_sev(s): - self.filter_severity.set(s) - on_change() - - for s in sevs: - ctk.CTkButton(bar, text=s, width=60, command=lambda ss=s: set_sev(ss)).pack(side="left", padx=2) - - def _sort_tree(self, tree, col, reverse): - """Sort a Treeview by column on heading click.""" - data = [(tree.set(k, col), k) for k in tree.get_children("")] - try: - data.sort(key=lambda t: t[0], reverse=reverse) - except Exception: - pass - for idx, (_val, k) in enumerate(data): - tree.move(k, "", idx) - tree.heading(col, command=lambda: self._sort_tree(tree, col, not reverse)) - - def _build_incidents(self): - f = self.tab_incidents - - self._build_category_bar(f, self.refresh_incidents) - self._build_severity_bar(f, self.refresh_incidents) - - style = ttk.Style(self) - style.configure("Treeview", background="#2b2b2b", foreground="white", - fieldbackground="#2b2b2b", borderwidth=0) - style.configure("Treeview.Heading", background="#565b5e", foreground="white", - relief="flat", font=("Segoe UI", 9, "bold")) - style.map("Treeview", - background=[('selected', '#1f538d')], - foreground=[('selected', 'white')]) - - # Create main container for side-by-side layout - main_container = ctk.CTkFrame(f) - main_container.pack(fill="both", expand=True, padx=10, pady=10) - - # Left side - Incident tree - left_frame = ctk.CTkFrame(main_container) - left_frame.pack(side="left", fill="both", expand=True, padx=(0, 5)) - - cols = ("start", "end", "duration", "severity", "category", "cause") - tree_frame = tk.Frame(left_frame, bg="#2b2b2b") - tree_frame.pack(fill="both", expand=True, padx=5, pady=5) - self.inc_tree = ttk.Treeview(tree_frame, columns=cols, show="headings", height=20) - inc_scroll = ttk.Scrollbar(tree_frame, orient="vertical", command=self.inc_tree.yview) - self.inc_tree.configure(yscrollcommand=inc_scroll.set) - self.inc_tree.pack(side="left", fill="both", expand=True) - inc_scroll.pack(side="right", fill="y") - headings = { - "start": "START", - "end": "END", - "duration": "DURATION", - "severity": "SEVERITY", - "category": "CATEGORY", - "cause": "CAUSE (LATEST)", - } - widths = { - "start": 150, - "end": 150, - "duration": 80, - "severity": 80, - "category": 100, - "cause": 200, - } - for c in cols: - self.inc_tree.heading(c, text=headings[c], - command=lambda _c=c: self._sort_tree(self.inc_tree, _c, False)) - self.inc_tree.column(c, width=widths[c], stretch=(c == "cause")) - self.inc_tree.bind("<>", lambda _e: self.show_incident_details()) - - # Configure row tags for coloring - self.inc_tree.tag_configure("high", foreground="#ff4444") - self.inc_tree.tag_configure("warn", foreground="#ff8800") - self.inc_tree.tag_configure("info", foreground="#aaaaaa") - self.inc_tree.tag_configure("security", background="#441111") - self.inc_tree.tag_configure("open", background="#333333") - - # Right side - Incident details + graph - right_frame = ctk.CTkFrame(main_container) - right_frame.pack(side="right", fill="both", expand=True, padx=(5, 0)) - - ctk.CTkLabel(right_frame, text="Incident Details", font=("Segoe UI", 12, "bold")).pack(pady=(5, 2)) - - # Incident graph canvas - self.inc_graph_canvas = tk.Canvas(right_frame, height=140, bg="#1a1a1a", - highlightthickness=0) - self.inc_graph_canvas.pack(fill="x", padx=5, pady=(2, 4)) - - self.inc_details = ctk.CTkTextbox(right_frame, height=10, wrap="word") - self.inc_details.pack(fill="both", expand=True, padx=5, pady=(0, 5)) - self.inc_details.configure(state="disabled") - - # Bottom - Clear button - row = ctk.CTkFrame(f) - row.pack(fill="x", padx=10, pady=(0, 10)) - ctk.CTkButton(row, text="Clear incidents", command=self.clear_incidents).pack(side="left") - - def _build_events(self): - f = self.tab_events - - self._build_category_bar(f, self.refresh_events) - self._build_severity_bar(f, self.refresh_events) - - # Create main container for side-by-side layout (like Incidents) - main_container = ctk.CTkFrame(f) - main_container.pack(fill="both", expand=True, padx=10, pady=10) - - # Left side - Event tree with better columns - left_frame = ctk.CTkFrame(main_container) - left_frame.pack(side="left", fill="both", expand=True, padx=(0, 5)) - - cols = ("time", "severity", "category", "title") - tree_frame = tk.Frame(left_frame, bg="#2b2b2b") - tree_frame.pack(fill="both", expand=True, padx=5, pady=5) - self.event_tree = ttk.Treeview(tree_frame, columns=cols, show="headings", height=20) - evt_scroll = ttk.Scrollbar(tree_frame, orient="vertical", command=self.event_tree.yview) - self.event_tree.configure(yscrollcommand=evt_scroll.set) - self.event_tree.pack(side="left", fill="both", expand=True) - evt_scroll.pack(side="right", fill="y") - headings = { - "time": "TIME", - "severity": "SEVERITY", - "category": "CATEGORY", - "title": "EVENT", - } - widths = { - "time": 150, - "severity": 80, - "category": 100, - "title": 280, - } - for c in cols: - self.event_tree.heading(c, text=headings[c], - command=lambda _c=c: self._sort_tree(self.event_tree, _c, False)) - self.event_tree.column(c, width=widths[c], stretch=(c == "title")) - self.event_tree.bind("<>", lambda _e: self.show_event_details()) - - # Configure row tags for coloring - self.event_tree.tag_configure("high", foreground="#ff4444") - self.event_tree.tag_configure("warn", foreground="#ff8800") - self.event_tree.tag_configure("info", foreground="#aaaaaa") - self.event_tree.tag_configure("security", background="#441111") - - # Right side - Event details with more space - right_frame = ctk.CTkFrame(main_container) - right_frame.pack(side="right", fill="both", expand=True, padx=(5, 0)) - - ctk.CTkLabel(right_frame, text="Event Details", font=("Segoe UI", 12, "bold")).pack(pady=(5, 5)) - self.event_details = ctk.CTkTextbox(right_frame, height=15, wrap="word") - self.event_details.pack(fill="both", expand=True, padx=5, pady=5) - self.event_details.configure(state="disabled") - - # Bottom - Clear button - row = ctk.CTkFrame(f) - row.pack(fill="x", padx=10, pady=(0, 10)) - ctk.CTkButton(row, text="Clear events", command=self.clear_events).pack(side="left") - - def _build_diagnostics(self): - f = self.tab_diagnostics - - # Scrollable container - outer = ctk.CTkScrollableFrame(f) - outer.pack(fill="both", expand=True, padx=10, pady=10) - - # --- Wi-Fi panel --- - wifi_panel = ctk.CTkFrame(outer) - wifi_panel.pack(fill="x", pady=(0, 8)) - ctk.CTkLabel(wifi_panel, text="Wi-Fi Info", font=("Segoe UI", 13, "bold")).pack(anchor="w", padx=10, pady=(6, 2)) - self.diag_wifi_grid = ctk.CTkFrame(wifi_panel) - self.diag_wifi_grid.pack(fill="x", padx=10, pady=(0, 8)) - self.diag_wifi_labels: Dict[str, ctk.CTkLabel] = {} - wifi_fields = ["state", "ssid", "bssid", "signal", "channel", "radio"] - for idx, key in enumerate(wifi_fields): - ctk.CTkLabel(self.diag_wifi_grid, text=key.upper(), font=("Segoe UI", 10, "bold")).grid(row=0, column=idx, padx=6, pady=2, sticky="w") - lbl = ctk.CTkLabel(self.diag_wifi_grid, text="--", font=("Segoe UI", 10)) - lbl.grid(row=1, column=idx, padx=6, pady=2, sticky="w") - self.diag_wifi_labels[key] = lbl - - # --- Ping results panel --- - ping_panel = ctk.CTkFrame(outer) - ping_panel.pack(fill="x", pady=(0, 8)) - ctk.CTkLabel(ping_panel, text="Ping Results", font=("Segoe UI", 13, "bold")).pack(anchor="w", padx=10, pady=(6, 2)) - self.diag_ping_grid = ctk.CTkFrame(ping_panel) - self.diag_ping_grid.pack(fill="x", padx=10, pady=(0, 8)) - self.diag_ping_labels: Dict[str, ctk.CTkLabel] = {} - ping_cols = ["Gateway", "Target 1", "Target 2"] - for idx, name in enumerate(ping_cols): - ctk.CTkLabel(self.diag_ping_grid, text=name, font=("Segoe UI", 10, "bold")).grid(row=0, column=idx, padx=14, pady=2, sticky="w") - lbl = ctk.CTkLabel(self.diag_ping_grid, text="--", font=("Segoe UI", 10)) - lbl.grid(row=1, column=idx, padx=14, pady=2, sticky="w") - self.diag_ping_labels[name] = lbl - - # --- Rolling stats panel --- - roll_panel = ctk.CTkFrame(outer) - roll_panel.pack(fill="x", pady=(0, 8)) - ctk.CTkLabel(roll_panel, text="Rolling Stats (last 60s)", font=("Segoe UI", 13, "bold")).pack(anchor="w", padx=10, pady=(6, 2)) - self.diag_roll_grid = ctk.CTkFrame(roll_panel) - self.diag_roll_grid.pack(fill="x", padx=10, pady=(0, 8)) - self.diag_roll_bars: Dict[str, Tuple[tk.Canvas, ctk.CTkLabel]] = {} - roll_items = [("GW Loss", "gw"), ("Inet1 Loss", "inet1"), ("Inet2 Loss", "inet2")] - for idx, (label, key) in enumerate(roll_items): - ctk.CTkLabel(self.diag_roll_grid, text=label, font=("Segoe UI", 10, "bold")).grid(row=idx, column=0, padx=6, pady=2, sticky="w") - canvas = tk.Canvas(self.diag_roll_grid, width=160, height=16, bg="#2b2b2b", highlightthickness=0) - canvas.grid(row=idx, column=1, padx=6, pady=2) - val_lbl = ctk.CTkLabel(self.diag_roll_grid, text="0%", font=("Segoe UI", 10)) - val_lbl.grid(row=idx, column=2, padx=6, pady=2, sticky="w") - self.diag_roll_bars[key] = (canvas, val_lbl) - # Max RTT labels - self.diag_rtt_labels: Dict[str, ctk.CTkLabel] = {} - rtt_items = [("GW max RTT", "gw_rtt"), ("Inet1 max RTT", "inet1_rtt"), ("Inet2 max RTT", "inet2_rtt")] - for idx, (label, key) in enumerate(rtt_items): - ctk.CTkLabel(self.diag_roll_grid, text=label, font=("Segoe UI", 10, "bold")).grid(row=idx, column=3, padx=12, pady=2, sticky="w") - lbl = ctk.CTkLabel(self.diag_roll_grid, text="--", font=("Segoe UI", 10)) - lbl.grid(row=idx, column=4, padx=6, pady=2, sticky="w") - self.diag_rtt_labels[key] = lbl - - # --- DNS panel --- - dns_panel = ctk.CTkFrame(outer) - dns_panel.pack(fill="x", pady=(0, 8)) - ctk.CTkLabel(dns_panel, text="DNS", font=("Segoe UI", 13, "bold")).pack(anchor="w", padx=10, pady=(6, 2)) - self.diag_dns_frame = ctk.CTkFrame(dns_panel) - self.diag_dns_frame.pack(fill="x", padx=10, pady=(0, 8)) - self.diag_dns_status = ctk.CTkLabel(self.diag_dns_frame, text="--", font=("Segoe UI", 10)) - self.diag_dns_status.pack(anchor="w", padx=6, pady=2) - self.diag_dns_summary = ctk.CTkLabel(self.diag_dns_frame, text="", font=("Segoe UI", 9), wraplength=0, justify="left") - self.diag_dns_summary.pack(anchor="w", padx=6, pady=2) - - # --------------------- - # Wi-Fi Analyzer Tab - # --------------------- - - def _build_wifi_analyzer(self): - f = self.tab_wifi - scroll = ctk.CTkScrollableFrame(f) - scroll.pack(fill="both", expand=True, padx=10, pady=10) - - # Header - top = ctk.CTkFrame(scroll, fg_color="transparent") - top.pack(fill="x", pady=(0, 4)) - ctk.CTkLabel(top, text="Wi-Fi Channel Analyzer", - font=("Segoe UI", 14, "bold")).pack(side="left") - self._wifi_scan_status = ctk.CTkLabel(top, text="", font=("Segoe UI", 9), - text_color="#888888") - self._wifi_scan_status.pack(side="left", padx=10) - ctk.CTkButton(top, text="Scan Now", width=80, height=26, - command=self._request_wifi_scan).pack(side="right") - - # Recommendation banner - self._wifi_rec_frame = ctk.CTkFrame(scroll, fg_color="#1a2a1a", corner_radius=8) - self._wifi_rec_frame.pack(fill="x", pady=(0, 6)) - self._wifi_rec_lbl = ctk.CTkLabel(self._wifi_rec_frame, text="Click 'Scan Now' to analyze channels", - font=("Segoe UI", 12, "bold"), text_color="#888888") - self._wifi_rec_lbl.pack(padx=12, pady=8) - - # Channel scores row - self._wifi_scores_frame = ctk.CTkFrame(scroll, fg_color="transparent") - self._wifi_scores_frame.pack(fill="x", pady=(0, 6)) - self._wifi_score_labels: Dict[int, ctk.CTkLabel] = {} - for ch in [1, 6, 11]: - card = ctk.CTkFrame(self._wifi_scores_frame, fg_color="#1e1e1e", corner_radius=8) - card.pack(side="left", fill="x", expand=True, padx=3) - ctk.CTkLabel(card, text=f"Channel {ch}", font=("Segoe UI", 9), - text_color="#888888").pack(pady=(6, 0)) - lbl = ctk.CTkLabel(card, text="--", font=("Segoe UI", 18, "bold"), - text_color="#888888") - lbl.pack(pady=(0, 6)) - self._wifi_score_labels[ch] = lbl - - # Channel map (canvas) - map_frame = ctk.CTkFrame(scroll, fg_color="#1e1e1e", corner_radius=8) - map_frame.pack(fill="x", pady=(0, 6)) - ctk.CTkLabel(map_frame, text="Channel Map — 2.4 GHz", - font=("Segoe UI", 10, "bold"), - text_color="#888888").pack(anchor="w", padx=10, pady=(6, 0)) - self._wifi_canvas = tk.Canvas(map_frame, height=220, bg="#1a1a1a", highlightthickness=0) - self._wifi_canvas.pack(fill="x", padx=8, pady=(2, 8)) - self._wifi_canvas_width = 700 - self._wifi_canvas.bind("", lambda e: setattr(self, '_wifi_canvas_width', e.width)) - - # Networks table - table_frame = ctk.CTkFrame(scroll, fg_color="#1e1e1e", corner_radius=8) - table_frame.pack(fill="x", pady=(0, 6)) - ctk.CTkLabel(table_frame, text="Visible Networks", - font=("Segoe UI", 10, "bold"), - text_color="#888888").pack(anchor="w", padx=10, pady=(6, 2)) - - tree_frame = ctk.CTkFrame(table_frame, fg_color="transparent") - tree_frame.pack(fill="x", padx=8, pady=(0, 8)) - - cols = ("ssid", "bssid", "channel", "signal", "radio", "auth", "interference") - headers = ("SSID", "BSSID", "Channel", "Signal", "Radio", "Security", "Interference") - widths = (160, 130, 60, 60, 80, 100, 90) - - self._wifi_tree = ttk.Treeview(tree_frame, columns=cols, show="headings", height=8) - for col, hdr, w in zip(cols, headers, widths): - self._wifi_tree.heading(col, text=hdr) - self._wifi_tree.column(col, width=w, stretch=(col == "ssid")) - self._wifi_tree.tag_configure("mine", foreground="#00FF88") - self._wifi_tree.tag_configure("overlap", foreground="#FFA500") - self._wifi_tree.tag_configure("clean", foreground="#cccccc") - self._wifi_tree.tag_configure("stale", foreground="#555555") - self._wifi_tree.pack(fill="x") - - # Store scan data — accumulate across scans for consistency - self._wifi_networks: List[Dict] = [] - self._wifi_scan_history: List[Tuple[float, List[Dict]]] = [] - self._wifi_accumulated: Dict[str, Dict] = {} # bssid -> {network_data, last_seen} - - # Auto-scan on tab switch - self.after(1500, self._request_wifi_scan) - - def _request_wifi_scan(self): - self._wifi_scan_status.configure(text="Scanning...") - # Run in thread to avoid blocking - def do_scan(): - networks = scan_wifi_networks() - my_ssid = self.ssid.get() if hasattr(self, 'ssid') else "" - if not my_ssid: - # Get from current WiFi info - wifi = netsh_wlan_info() - my_ssid = wifi.get("ssid", "") - rec = recommend_channel(networks, my_ssid) - self.after(0, lambda: self._on_wifi_scan(networks, rec, my_ssid)) - threading.Thread(target=do_scan, daemon=True).start() - - def _on_wifi_scan(self, networks: List[Dict], rec: Dict, my_ssid: str): - now = time.time() - - # Accumulate: update seen networks, keep those seen in last 90 seconds - for net in networks: - bssid = net.get("bssid", "") - if bssid: - self._wifi_accumulated[bssid] = {**net, "_last_seen": now, "_stale": False} - - # Mark stale networks (not seen in this scan but seen recently) - current_bssids = {n.get("bssid", "") for n in networks} - stale_cutoff = now - 90 # 90 seconds - to_remove = [] - for bssid, data in self._wifi_accumulated.items(): - if bssid not in current_bssids: - if data["_last_seen"] < stale_cutoff: - to_remove.append(bssid) - else: - data["_stale"] = True - for bssid in to_remove: - del self._wifi_accumulated[bssid] - - # Build merged network list from accumulated data - merged = list(self._wifi_accumulated.values()) - self._wifi_networks = merged - - self._wifi_scan_history.append((now, networks)) - if len(self._wifi_scan_history) > 20: - self._wifi_scan_history = self._wifi_scan_history[-20:] - - # Recalculate recommendation using merged (accumulated) networks - rec = recommend_channel(merged, my_ssid) - - fresh_count = sum(1 for n in merged if not n.get("_stale")) - stale_count = sum(1 for n in merged if n.get("_stale")) - now_str = datetime.now().strftime("%H:%M:%S") - status = f"Last scan: {now_str} — {fresh_count} active" - if stale_count: - status += f", {stale_count} recent" - self._wifi_scan_status.configure(text=status) - - # Update recommendation - best = rec.get("best", 0) - current = rec.get("current", 0) - scores = rec.get("scores", {}) - reason = rec.get("reason", "") - - if current == best and current > 0: - self._wifi_rec_frame.configure(fg_color="#112211") - self._wifi_rec_lbl.configure( - text=f"✓ You're on Channel {current} — the best available channel!", - text_color="#00FF88") - elif best: - self._wifi_rec_frame.configure(fg_color="#2a1a00") - self._wifi_rec_lbl.configure( - text=f"⚡ Recommendation: Switch to Channel {best} | {reason}", - text_color="#FFD700") - - # Update score cards - for ch, lbl in self._wifi_score_labels.items(): - score = scores.get(ch, 0) - if score >= 80: - color = "#00FF88" - elif score >= 50: - color = "#FFD700" - else: - color = "#FF4444" - lbl.configure(text=f"{score:.0f}/100", text_color=color) - # Highlight best channel - parent = lbl.master - if ch == best: - parent.configure(border_width=2, border_color=color) - else: - parent.configure(border_width=0) - - # Draw channel map - self._draw_channel_map(networks, my_ssid, current) - - # Update networks table using merged (accumulated) data - self._wifi_tree.delete(*self._wifi_tree.get_children()) - merged = list(self._wifi_accumulated.values()) - sorted_nets = sorted(merged, key=lambda n: ( - 0 if n.get("ssid") == my_ssid else 1, - n.get("_stale", False), # stale networks last - -n.get("signal_pct", 0) - )) - - for net in sorted_nets: - ssid = net.get("ssid", "?") - is_mine = ssid == my_ssid - is_stale = net.get("_stale", False) - ch = net.get("channel", 0) - - # Calculate interference level - if is_mine: - interference = "—" - tag = "mine" - elif is_stale: - interference = "?" - tag = "stale" - else: - # Check if overlaps with my channel - if current and abs(ch - current) < 5: - overlap = (5 - abs(ch - current)) / 5.0 - interference = f"{'High' if overlap > 0.6 else 'Medium' if overlap > 0.3 else 'Low'}" - tag = "overlap" - else: - interference = "None" - tag = "clean" - - self._wifi_tree.insert("", "end", values=( - ssid, net.get("bssid", ""), - ch, f"{net.get('signal_pct', 0)}%", - net.get("radio", ""), net.get("auth", ""), - interference - ), tags=(tag,)) - - def _draw_channel_map(self, networks: List[Dict], my_ssid: str, my_channel: int): - """Draw visual channel map on canvas.""" - canvas = self._wifi_canvas - canvas.delete("all") - w = self._wifi_canvas_width - h = 220 - - if w < 100: - return - - pad_l, pad_r, pad_t, pad_b = 40, 20, 25, 30 - cw = w - pad_l - pad_r - ch_h = h - pad_t - pad_b - - # Background grid - for pct in (0, 25, 50, 75, 100): - y = pad_t + ch_h - (pct / 100.0) * ch_h - canvas.create_line(pad_l, y, w - pad_r, y, fill="#2a2a2a", dash=(2, 4)) - canvas.create_text(pad_l - 4, y, text=f"{pct}%", anchor="e", - fill="#555555", font=("Segoe UI", 7)) - - # Channel labels (1-13) - num_channels = 13 - ch_width = cw / num_channels - for ch_num in range(1, num_channels + 1): - x = pad_l + (ch_num - 0.5) * ch_width - color = "#888888" if ch_num in (1, 6, 11) else "#444444" - font_weight = "bold" if ch_num in (1, 6, 11) else "" - canvas.create_text(x, h - 10, text=str(ch_num), fill=color, - font=("Segoe UI", 8, font_weight)) - - # Draw overlap zones first (semi-transparent wider bars) - for net in networks: - ch = net.get("channel", 0) - sig = net.get("signal_pct", 0) - ssid = net.get("ssid", "") - if ch < 1 or ch > 13: - continue - - is_mine = ssid == my_ssid - - # Overlap zone: channels ch-2 to ch+2 (2.4GHz 22MHz width) - for overlap_ch in range(max(1, ch - 2), min(14, ch + 3)): - if overlap_ch == ch: - continue - x = pad_l + (overlap_ch - 1) * ch_width + 2 - bar_w = ch_width - 4 - # Height proportional to signal, but dimmer for overlap - bar_h = (sig / 100.0) * ch_h * 0.3 - y = pad_t + ch_h - bar_h - color = "#1a3a1a" if is_mine else "#3a2a1a" - canvas.create_rectangle(x, y, x + bar_w, pad_t + ch_h, - fill=color, outline="") - - # Draw main bars - for net in networks: - ch = net.get("channel", 0) - sig = net.get("signal_pct", 0) - ssid = net.get("ssid", "") - if ch < 1 or ch > 13: - continue - - is_mine = ssid == my_ssid - x = pad_l + (ch - 1) * ch_width + 2 - bar_w = ch_width - 4 - bar_h = (sig / 100.0) * ch_h - y = pad_t + ch_h - bar_h - - if is_mine: - fill = "#00CC66" - outline = "#00FF88" - elif my_channel and abs(ch - my_channel) < 5: - fill = "#CC6600" - outline = "#FF8800" - else: - fill = "#4466AA" - outline = "#6688CC" - - canvas.create_rectangle(x, y, x + bar_w, pad_t + ch_h, - fill=fill, outline=outline, width=1) - - # SSID label on bar - label = ssid[:12] if ssid else "?" - label_y = y - 8 if y > pad_t + 15 else y + 12 - canvas.create_text(x + bar_w / 2, label_y, text=label, - fill="#ffffff" if is_mine else "#cccccc", - font=("Segoe UI", 7), anchor="n" if label_y == y - 8 else "s") - - # Signal % inside bar - if bar_h > 20: - canvas.create_text(x + bar_w / 2, y + bar_h / 2, - text=f"{sig}%", fill="#ffffff", - font=("Segoe UI", 8, "bold")) - - # Legend - lx = pad_l + 8 - items = [("Your network", "#00CC66"), ("Overlapping", "#CC6600"), ("Non-overlapping", "#4466AA")] - for i, (label, color) in enumerate(items): - ly = pad_t + 2 + i * 14 - canvas.create_rectangle(lx, ly, lx + 10, ly + 8, fill=color, outline="") - canvas.create_text(lx + 14, ly + 4, text=label, anchor="w", - fill="#999999", font=("Segoe UI", 7)) - - def _wifi_auto_refresh(self): - """Auto-refresh Wi-Fi scan every 15 seconds when the tab is visible.""" - if not self.running: - return - try: - if self.nb.get() == "Wi-Fi Analyzer": - self._request_wifi_scan() - except Exception: - pass - self.after(15000, self._wifi_auto_refresh) - - # --------------------- - # Actions - # --------------------- - - def force_stop(self): - self.running = False - try: - self.parent.destroy() - except Exception: - pass - - def set_baseline(self): - gw = self.gateway.get().strip() - dns = [x.strip() for x in self.dns_text.get().split(",") if x.strip()] - local_ip = self.localip.get().strip() - if not gw: - messagebox.showerror("Baseline", "Gateway is empty.") - return - if not dns: - dns = get_dns_servers() - self.engine.set_baseline(gw, dns, local_ip) - messagebox.showinfo("Baseline", "Baseline saved (gateway + DNS + local IP).") - - def show_settings(self): - """Show settings popup window""" - settings_window = ctk.CTkToplevel(self.parent) - settings_window.title("Network Monitor Settings") - settings_window.geometry("520x600") - settings_window.minsize(400, 350) - settings_window.transient(self.parent) - settings_window.grab_set() - - # Scrollable main container - main_frame = ctk.CTkScrollableFrame(settings_window) - main_frame.pack(fill="both", expand=True, padx=8, pady=(4, 0)) - - # --- Monitoring --- - ctk.CTkLabel(main_frame, text="Monitoring", font=ctk.CTkFont(size=12, weight="bold")).pack(anchor="w", pady=(4, 2)) - mon_grid = ctk.CTkFrame(main_frame) - mon_grid.pack(fill="x", padx=4, pady=2) - ctk.CTkLabel(mon_grid, text="Interval (ms):").grid(row=0, column=0, sticky="w", padx=(4,4), pady=2) - ttk.Spinbox(mon_grid, from_=800, to=30000, increment=200, textvariable=self.interval_ms, width=8).grid(row=0, column=1, sticky="w", pady=2) - ctk.CTkLabel(mon_grid, text="Ping timeout (ms):").grid(row=0, column=2, sticky="w", padx=(16,4), pady=2) - ttk.Spinbox(mon_grid, from_=300, to=5000, increment=100, textvariable=self.ping_timeout_ms, width=8).grid(row=0, column=3, sticky="w", pady=2) - - # --- Network --- - ctk.CTkLabel(main_frame, text="Network", font=ctk.CTkFont(size=12, weight="bold")).pack(anchor="w", pady=(8, 2)) - net_grid = ctk.CTkFrame(main_frame) - net_grid.pack(fill="x", padx=4, pady=2) - net_grid.columnconfigure(1, weight=1) - fields = [ - (0, "Gateway:", self.gateway), - (1, "DNS servers:", self.dns_text), - (2, "DNS domain:", self.dns_domain), - ] - for r, lbl, var in fields: - ctk.CTkLabel(net_grid, text=lbl).grid(row=r, column=0, sticky="w", padx=(4,4), pady=2) - ctk.CTkEntry(net_grid, textvariable=var).grid(row=r, column=1, sticky="ew", padx=(0,4), pady=2) - # Ping targets on same grid - ctk.CTkLabel(net_grid, text="Ping targets:").grid(row=3, column=0, sticky="w", padx=(4,4), pady=2) - tgt_frame = ctk.CTkFrame(net_grid, fg_color="transparent") - tgt_frame.grid(row=3, column=1, sticky="w", pady=2) - ctk.CTkEntry(tgt_frame, textvariable=self.target1, width=90).pack(side="left", padx=(0,4)) - ctk.CTkEntry(tgt_frame, textvariable=self.target2, width=90).pack(side="left") - - # --- Sensitivity --- - ctk.CTkLabel(main_frame, text="Sensitivity", font=ctk.CTkFont(size=12, weight="bold")).pack(anchor="w", pady=(8, 2)) - preset_row = ctk.CTkFrame(main_frame, fg_color="transparent") - preset_row.pack(fill="x", padx=4, pady=2) - presets = [("Strict", 80, 150), ("Normal", 120, 250), ("Relaxed", 200, 400), ("Wi-Fi tolerant", 300, 600)] - def apply_preset(elev, high): - self.thresh_elevated.set(elev) - self.thresh_high.set(high) - for name, elev, high in presets: - ctk.CTkButton(preset_row, text=name, width=90, height=26, - command=lambda e=elev, h=high: apply_preset(e, h)).pack(side="left", padx=2) - thresh_row = ctk.CTkFrame(main_frame, fg_color="transparent") - thresh_row.pack(fill="x", padx=4, pady=4) - ctk.CTkLabel(thresh_row, text="Elevated (ms):").pack(side="left", padx=(0, 2)) - ttk.Spinbox(thresh_row, from_=20, to=1000, increment=10, textvariable=self.thresh_elevated, width=5).pack(side="left", padx=(0, 12)) - ctk.CTkLabel(thresh_row, text="High (ms):").pack(side="left", padx=(0, 2)) - ttk.Spinbox(thresh_row, from_=50, to=2000, increment=10, textvariable=self.thresh_high, width=5).pack(side="left") - - # --- Auto Export --- - ctk.CTkLabel(main_frame, text="Auto Export", font=ctk.CTkFont(size=12, weight="bold")).pack(anchor="w", pady=(8, 2)) - ctk.CTkCheckBox(main_frame, text="Enable auto export", variable=self.auto_export_enabled).pack(anchor="w", padx=4, pady=2) - exp_row = ctk.CTkFrame(main_frame, fg_color="transparent") - exp_row.pack(fill="x", padx=4, pady=2) - ctk.CTkLabel(exp_row, text="Time:").pack(side="left", padx=(0, 4)) - ttk.Spinbox(exp_row, from_=0, to=23, increment=1, textvariable=self.auto_export_hour, width=3, wrap=True).pack(side="left") - ctk.CTkLabel(exp_row, text=":").pack(side="left") - ttk.Spinbox(exp_row, from_=0, to=59, increment=1, textvariable=self.auto_export_minute, width=3, wrap=True).pack(side="left", padx=(0,6)) - for label, h, m in [("06:00", 6, 0), ("12:00", 12, 0), ("18:00", 18, 0), ("23:30", 23, 30)]: - ctk.CTkButton(exp_row, text=label, width=48, height=24, - command=lambda hh=h, mm=m: (self.auto_export_hour.set(hh), self.auto_export_minute.set(mm))).pack(side="left", padx=2) - folder_row = ctk.CTkFrame(main_frame, fg_color="transparent") - folder_row.pack(fill="x", padx=4, pady=2) - ctk.CTkLabel(folder_row, text="Folder:").pack(side="left", padx=(0, 4)) - ctk.CTkEntry(folder_row, textvariable=self.export_folder).pack(side="left", fill="x", expand=True) - - # --- Action buttons (outside scrollable, always visible at bottom) --- - button_frame = ctk.CTkFrame(settings_window) - button_frame.pack(fill="x", padx=8, pady=8) - ctk.CTkButton(button_frame, text="Save & Close", width=100, command=lambda: self.save_settings(settings_window)).pack(side="left", padx=4) - ctk.CTkButton(button_frame, text="Cancel", width=70, command=settings_window.destroy).pack(side="left", padx=4) - ctk.CTkButton(button_frame, text="Set Baseline", width=90, command=self.set_baseline).pack(side="left", padx=4) - - def save_settings(self, window): - """Save settings and close window""" - window.destroy() - - def clear_events(self): - self.engine.events.clear() - self.refresh_events() - - def clear_incidents(self): - self.engine.incidents.clear() - self.engine.active_incidents.clear() - self.refresh_incidents() - - def ai_export(self): - """Quick AI-friendly export""" - try: - # Check if we have any data - if not self.engine.samples: - messagebox.showwarning("AI Export", "No data to export yet. Please wait for monitoring to collect some samples.") - return - - path = filedialog.asksaveasfilename( - defaultextension=".json", - filetypes=[("AI export", "*.json")], - title="Save AI-friendly lightweight export" - ) - if not path: - return - - ai_export = self.engine.generate_ai_export() - - if not ai_export: - messagebox.showwarning("AI Export", "No data available for export.") - return - - with open(path, "w", encoding="utf-8") as f: - json.dump(ai_export, f, indent=2, ensure_ascii=False) - - size_kb = len(str(ai_export))/1024 - messagebox.showinfo("AI Export", f"AI-friendly export saved:\n{path}\n\nSize: {size_kb:.1f} KB\n\nPerfect for ChatGPT analysis!") - except Exception as e: - messagebox.showerror("Export failed", f"AI Export failed:\n{str(e)}") - - def export_report(self): - # Ask user what type of export they want - choice = messagebox.askyesnocancel( - "Export Type", - "Choose export type:\n\nYES = Full detailed report\nNO = AI-friendly lightweight export\nCANCEL = Abort", - icon='question' - ) - - if choice is None: # CANCEL - return - - if choice: # YES - Full report - path = filedialog.asksaveasfilename( - defaultextension=".json", - filetypes=[("JSON report", "*.json")], - title="Save full network stability report" - ) - if not path: - return - - report = { - "generated_at": now_ts(), - "settings": { - "interval_ms": int(self.interval_ms.get()), - "ping_timeout_ms": int(self.ping_timeout_ms.get()), - "target1": self.target1.get().strip(), - "target2": self.target2.get().strip(), - "dns_domain": self.dns_domain.get().strip(), - }, - "baseline": { - "gateway": self.engine.baseline_gateway, - "dns_servers": self.engine.baseline_dns, - }, - "last_sample": asdict(self._last_sample) if self._last_sample else None, - "incidents": [asdict(i) for i in self.engine.incidents.snapshot()], - "events": [asdict(e) for e in self.engine.events.snapshot()], - "samples_tail": [asdict(s) for s in self.engine.samples.snapshot()[-500:]], - } - - try: - with open(path, "w", encoding="utf-8") as f: - json.dump(report, f, indent=2, ensure_ascii=False) - messagebox.showinfo("Export", f"Full report saved:\n{path}") - except Exception as e: - messagebox.showerror("Export failed", str(e)) - - else: # NO - AI-friendly export - path = filedialog.asksaveasfilename( - defaultextension=".json", - filetypes=[("AI export", "*.json")], - title="Save AI-friendly lightweight export" - ) - if not path: - return - - try: - ai_export = self.engine.generate_ai_export() - with open(path, "w", encoding="utf-8") as f: - json.dump(ai_export, f, indent=2, ensure_ascii=False) - messagebox.showinfo("Export", f"AI-friendly export saved:\n{path}\n\nSize: {len(str(ai_export))/1024:.1f} KB") - except Exception as e: - messagebox.showerror("Export failed", str(e)) - - def tick(self): - if not self.running: - return - - # Snapshot StringVar values on the main thread before passing to worker - sample_params = { - "gateway": self.gateway.get().strip(), - "target1": self.target1.get().strip() or "8.8.8.8", - "target2": self.target2.get().strip() or "1.1.1.1", - "dns_domain": self.dns_domain.get().strip() or "google.com", - "ping_timeout_ms": int(self.ping_timeout_ms.get()), - } - - try: - self.work_q.put_nowait(("sample", sample_params)) - except Exception: - pass - - # Sync latency thresholds to engine - self.engine.thresh_elevated = self.thresh_elevated.get() - self.engine.thresh_high = self.thresh_high.get() - - # Sync export hour/minute to engine and check for auto export - self.engine.export_hour = self.auto_export_hour.get() - self.engine.export_minute = self.auto_export_minute.get() - self.engine.auto_export_check(self.auto_export_enabled, self.auto_export_hour, self.export_folder) - - # Reduce refresh frequency to improve performance - self.refresh_overview() - # Only refresh incidents and events every 5th tick (further reduced) - self._refresh_counter += 1 - - if self._refresh_counter % 5 == 0: - self.refresh_incidents() - self.refresh_events() - if self._refresh_counter % 10 == 0: - self.refresh_diagnostics() - - # Dynamic interval: faster polling during active incidents - base_ms = int(self.interval_ms.get()) - status = self.engine.last_status - has_active = bool(self.engine.active_incidents) - - if status == "DOWN" or (has_active and any( - self.engine._sev_rank(self.engine._find_incident(iid).severity) >= 3 - for iid in self.engine.active_incidents.values() - if self.engine._find_incident(iid))): - # Critical/DOWN: poll every 1.5s for precise timing - interval = max(1500, base_ms // 3) - elif status == "DEGRADED" or has_active: - # Active incident: poll every 2s - interval = max(2000, base_ms // 2) - else: - # Stable: use configured interval (default 3s) - interval = max(1500, base_ms) - - self.after(interval, self.tick) - - def _worker_loop(self): - while self.running: - try: - job = self.work_q.get(timeout=0.2) - except queue.Empty: - continue - - if isinstance(job, tuple) and job[0] == "sample": - try: - self._do_sample(job[1]) - except Exception as e: - self.engine.log_event("WARN", "DEGRADED", "Sampling error", {"error": str(e)}) - - def _do_sample(self, params: Dict[str, Any]): - local_ip, ifname = get_default_route_interface_ip() - gw = get_default_gateway() or params["gateway"] - - dns = get_dns_servers() - dns_server_for_test = dns[0] if dns else (gw if gw else None) - - wifi = netsh_wlan_info() - wifi_state = wifi.get("state", "") - wifi_signal = wifi.get("signal", "") - wifi_ssid = wifi.get("ssid", "") - wifi_bssid = wifi.get("bssid", "") - wifi_channel = wifi.get("channel", "") - wifi_radio = wifi.get("radio", "") - - # Parse signal % as integer for tracking - wifi_signal_pct = -1 - if wifi_signal: - m = re.search(r"(\d+)", wifi_signal) - if m: - wifi_signal_pct = int(m.group(1)) - - # Feed signal and BSSID to engine for tracking - self.engine.track_signal(wifi_signal_pct) - self.engine.detect_bssid_change(wifi_bssid, wifi_ssid) - - timeout_ms = params["ping_timeout_ms"] - t1 = params["target1"] - t2 = params["target2"] - - # Run ping calls concurrently - with concurrent.futures.ThreadPoolExecutor(max_workers=3) as pool: - gw_future = pool.submit(ping_once, gw, timeout_ms) if gw else None - inet_future = pool.submit(ping_once, t1, timeout_ms) - inet2_future = pool.submit(ping_once, t2, timeout_ms) - - gw_ok, gw_rtt, gw_raw = gw_future.result() if gw_future else (True, None, "") - inet_ok, inet_rtt, inet_raw = inet_future.result() - inet2_ok, inet2_rtt, inet2_raw = inet2_future.result() - - self.engine._roll_add("gw", gw_ok, gw_rtt) - self.engine._roll_add("inet1", inet_ok, inet_rtt) - self.engine._roll_add("inet2", inet2_ok, inet2_rtt) - - domain = params["dns_domain"] - dns_state, dns_raw = nslookup(domain, dns_server_for_test, timeout_s=4) - - status, reason, sev, cat = self.engine.classify( - local_ip, wifi, gw, - gw_ok, gw_rtt, - inet_ok, inet_rtt, - inet2_ok, inet2_rtt, - dns_state - ) - - # Combined incident handling (problem + recovery) - self.engine.on_state_update( - status=status, - category=cat, - severity=sev, - reason=reason, - details={ - "local_ip": local_ip, - "iface": ifname, - "gateway": gw, - "dns_state": dns_state, - "wifi_signal": wifi_signal, - "wifi_signal_pct": wifi_signal_pct, - "wifi_bssid": wifi_bssid, - "wifi_channel": wifi_channel, - "wifi_radio": wifi_radio, - }, - ) - - self.engine.last_status = status - self.engine.last_reason = reason - - self.engine.detect_config_changes(gw, dns) - self.engine.detect_flapping(status) - - # Add periodic diagnostic events to show tool is working - current_time = time.time() - if current_time - self.engine._last_diagnostic_event >= 300: # Every 5 minutes - self.engine._last_diagnostic_event = current_time - sample_count = len(self.engine.samples) - incident_count = len(self.engine.incidents) - event_count = len(self.engine.events) - - # Log a diagnostic event - self.engine.log_event( - "INFO", - "DIAGNOSTIC", - "Monitoring Status Update", - { - "samples_collected": sample_count, - "active_incidents": len([i for i in self.engine.incidents.snapshot() if not i.end_time]), - "total_incidents": incident_count, - "total_events": event_count, - "current_status": status, - "uptime_minutes": int((current_time - self.engine._start_time) / 60) - } - ) - - s = Sample( - timestamp=now_ts(), - local_ip=local_ip, - iface=ifname, - gateway_ip=gw, - dns_servers=dns, - wifi_state=wifi_state, - wifi_signal=wifi_signal, - wifi_ssid=wifi_ssid, - gw_ok=gw_ok, - gw_rtt=gw_rtt, - inet_ok=inet_ok, - inet_rtt=inet_rtt, - inet2_ok=inet2_ok, - inet2_rtt=inet2_rtt, - dns_state=dns_state, - dns_raw_hint=short(dns_raw, 260), - status=status, - reason=reason, - wifi_bssid=wifi_bssid, - wifi_channel=wifi_channel, - wifi_radio=wifi_radio, - wifi_signal_pct=wifi_signal_pct, - ) - - # Enhance with intelligence analysis - if self.engine.intelligence: - try: - s = self.engine.enhance_sample_with_intelligence(s) - except Exception as e: - # If intelligence fails, continue with basic analysis - print(f"Intelligence analysis failed: {e}") - self.engine.add_sample(s) - self._last_sample = s - - self._last_diag = { - "wifi": json.dumps(wifi, indent=2, ensure_ascii=False) if wifi else "(netsh wlan info unavailable)", - "ping_gateway": short(gw_raw, 400) if gw else "(no gateway)", - "ping_target1": short(inet_raw, 400), - "ping_target2": short(inet2_raw, 400), - "dns": short(dns_raw, 500), - "roll": json.dumps({ - "loss_gw_last60s": round(self.engine._roll_loss("gw") * 100, 1), - "loss_inet1_last60s": round(self.engine._roll_loss("inet1") * 100, 1), - "loss_inet2_last60s": round(self.engine._roll_loss("inet2") * 100, 1), - "max_rtt_gw_last60s": self.engine._roll_max_rtt("gw"), - "max_rtt_inet1_last60s": self.engine._roll_max_rtt("inet1"), - "max_rtt_inet2_last60s": self.engine._roll_max_rtt("inet2"), - }, indent=2) - } - - if local_ip: - self.localip.set(local_ip) - if ifname: - self.iface.set(ifname) - if gw: - self.gateway.set(gw) - if dns: - self.dns_text.set(", ".join(dns)) - - # --------------------- - # UI refresh - # --------------------- - - def refresh_overview(self): - s = self._last_sample - if not s: - self.status_label.configure(text="Status: (initializing)") - return - - self.status_label.configure(text=f"Status: {s.status}") - self.reason_label.configure(text=s.reason) - - self.kv["wifi"].configure(text=s.wifi_state or "(unknown)") - self.kv["signal"].configure(text=s.wifi_signal or "—") - self.kv["ssid"].configure(text=s.wifi_ssid or "—") - self.kv["bssid"].configure(text=s.wifi_bssid or "—") - - # Channel + band display - ch_text = s.wifi_channel or "—" - if s.wifi_radio: - ch_text += f" ({s.wifi_radio})" - if s.wifi_channel: - try: - ch_num = int(s.wifi_channel) - band = "2.4 GHz" if ch_num <= 14 else "5 GHz" - ch_text = f"Ch {ch_num} — {band}" - if s.wifi_radio: - ch_text += f" ({s.wifi_radio})" - except ValueError: - pass - self.kv["channel_band"].configure(text=ch_text) - - # Signal quality assessment - sig_q = "—" - if s.wifi_signal_pct >= 0: - pct = s.wifi_signal_pct - avg = self.engine.get_signal_avg(60) - mn = self.engine.get_signal_min(60) - if pct >= 80: - sig_q = f"Excellent ({pct}%)" - elif pct >= 60: - sig_q = f"Good ({pct}%)" - elif pct >= 40: - sig_q = f"Fair ({pct}%)" - elif pct >= 20: - sig_q = f"Weak ({pct}%) — may cause issues" - else: - sig_q = f"Very weak ({pct}%) — likely causing problems" - if avg is not None and mn is not None: - sig_q += f" [avg {avg:.0f}%, min {mn}% last 60s]" - self.kv["signal_quality"].configure(text=sig_q) - - self.kv["gw_ping"].configure(text=("OK" if s.gw_ok else "FAIL") + (f" ({s.gw_rtt:.0f} ms)" if s.gw_rtt is not None else "")) - self.kv["inet1"].configure(text=("OK" if s.inet_ok else "FAIL") + (f" ({s.inet_rtt:.0f} ms)" if s.inet_rtt is not None else "")) - self.kv["inet2"].configure(text=("OK" if s.inet2_ok else "FAIL") + (f" ({s.inet2_rtt:.0f} ms)" if s.inet2_rtt is not None else "")) - - self.kv["dns"].configure(text=s.dns_state) - self.kv["local"].configure(text=s.local_ip or "—") - self.kv["iface"].configure(text=s.iface or "—") - self.kv["gw"].configure(text=s.gateway_ip or "—") - self.kv["dns_servers"].configure(text=", ".join(s.dns_servers) if s.dns_servers else "—") - - # Show intelligence information - self.kv["suspicion"].configure(text=s.suspicion_level) - if s.root_cause: - max_prob = max([s.root_cause.router_issue, s.root_cause.isp_issue, s.root_cause.dns_issue, - s.root_cause.local_adapter_issue, s.root_cause.possible_malicious_activity]) - if max_prob > 0.3: - if s.root_cause.router_issue == max_prob: - cause_text = f"Router ({max_prob:.2f})" - elif s.root_cause.isp_issue == max_prob: - cause_text = f"ISP ({max_prob:.2f})" - elif s.root_cause.dns_issue == max_prob: - cause_text = f"DNS ({max_prob:.2f})" - elif s.root_cause.local_adapter_issue == max_prob: - cause_text = f"Adapter ({max_prob:.2f})" - elif s.root_cause.possible_malicious_activity == max_prob: - cause_text = f"Suspicious ({max_prob:.2f})" - else: - cause_text = "Unknown" - self.kv["root_cause"].configure(text=cause_text) - else: - self.kv["root_cause"].configure(text="Stable") - - # --- Dashboard cards --- - # GW RTT - if s.gw_ok and s.gw_rtt is not None: - self.dash_values["gw_rtt"].configure( - text=f"{s.gw_rtt:.0f} ms", text_color=self._rtt_color(s.gw_rtt)) - elif not s.gw_ok: - self.dash_values["gw_rtt"].configure(text="FAIL", text_color="#cc4444") - else: - self.dash_values["gw_rtt"].configure(text="--", text_color="#888888") - - # Inet 1 - if s.inet_ok and s.inet_rtt is not None: - self.dash_values["inet1_rtt"].configure( - text=f"{s.inet_rtt:.0f} ms", text_color=self._rtt_color(s.inet_rtt)) - elif not s.inet_ok: - self.dash_values["inet1_rtt"].configure(text="FAIL", text_color="#cc4444") - else: - self.dash_values["inet1_rtt"].configure(text="--", text_color="#888888") - - # Inet 2 - if s.inet2_ok and s.inet2_rtt is not None: - self.dash_values["inet2_rtt"].configure( - text=f"{s.inet2_rtt:.0f} ms", text_color=self._rtt_color(s.inet2_rtt)) - elif not s.inet2_ok: - self.dash_values["inet2_rtt"].configure(text="FAIL", text_color="#cc4444") - else: - self.dash_values["inet2_rtt"].configure(text="--", text_color="#888888") - - # Wi-Fi signal - if s.wifi_signal_pct >= 0: - self.dash_values["wifi_sig"].configure( - text=f"{s.wifi_signal_pct}%", - text_color=self._signal_color(s.wifi_signal_pct)) - else: - self.dash_values["wifi_sig"].configure(text="N/A", text_color="#888888") - - # Packet loss (max of all targets) - loss_pct = max(self.engine._roll_loss("gw"), - self.engine._roll_loss("inet1"), - self.engine._roll_loss("inet2")) * 100 - self.dash_values["pkt_loss"].configure( - text=f"{loss_pct:.1f}%", - text_color=self._loss_bar_color(loss_pct)) - - # DNS - self.dash_values["dns_st"].configure( - text=s.dns_state, text_color=self._dns_color(s.dns_state)) - - # --- Live chart --- - self._update_live_chart() - - def refresh_incidents(self): - for i in self.inc_tree.get_children(): - self.inc_tree.delete(i) - - fc = self.filter_category.get() - fs = self.filter_severity.get() - incidents = list(self.engine.incidents.snapshot()) - - # newest first - incidents.reverse() - - for inc in incidents[:1200]: - if fc != "ALL" and inc.category != fc: - continue - if fs != "ALL" and inc.severity != fs: - continue - - end = inc.end_time if inc.end_time else "(open)" - dur = inc.duration if inc.duration else "" - - tags = [] - sev_lower = inc.severity.upper() - if sev_lower == "HIGH": - tags.append("high") - elif sev_lower == "WARN": - tags.append("warn") - else: - tags.append("info") - if inc.category == "SECURITY": - tags.append("security") - if not inc.end_time: - tags.append("open") - - self.inc_tree.insert( - "", - "end", - iid=f"inc-{inc.id}", - values=(inc.start_time, end, dur, inc.severity, inc.category, inc.cause), - tags=tags, - ) - - def show_incident_details(self): - sel = self.inc_tree.selection() - if not sel: - return - iid = sel[0] - try: - inc_id = int(iid.split("-")[1]) - except Exception: - return - - inc = None - for x in self.engine.incidents.snapshot(): - if x.id == inc_id: - inc = x - break - if not inc: - return - - # Draw incident graph - self._draw_incident_graph(inc) - - # Create human-friendly explanation - friendly_explanation = self._get_friendly_explanation(inc) - - text = json.dumps(asdict(inc), indent=2, ensure_ascii=False) - friendly_text = f"{friendly_explanation}\n\n--- Technical Details ---\n{text}" - - self.inc_details.configure(state="normal") - self.inc_details.delete("1.0", "end") - self.inc_details.insert("1.0", friendly_text) - self.inc_details.configure(state="disabled") - - def _get_friendly_explanation(self, inc) -> str: - """Generate human-friendly explanation for non-technical users""" - category = inc.category.lower() - severity = inc.severity.lower() - cause = inc.cause.lower() if inc.cause else "" - - explanations = { - "link": { - "title": "🔌 Connection Problem (Your PC/Router)", - "what_happened": "Your computer lost connection to the router or Wi-Fi network.", - "who_fixes": "You or your family can fix this:", - "steps": [ - "✓ Check if Wi-Fi is working on your phone", - "✓ Restart your router (unplug for 30 seconds, plug back in)", - "✓ Check if network cable is connected properly", - "✓ Move closer to the router if using Wi-Fi" - ], - "not_hacker": "This is NOT a hacker - just a connection issue" - }, - "gateway": { - "title": "🏠 Router Problem (Your Network Equipment)", - "what_happened": "Your router is not responding properly to connections.", - "who_fixes": "You can usually fix this:", - "steps": [ - "✓ Restart the router (unplug for 30 seconds)", - "✓ Check if router lights are normal", - "✓ Too many devices connected? Disconnect some", - "✓ Router might be overheating - give it space" - ], - "not_hacker": "This is NOT hacking - router needs restart" - }, - "isp": { - "title": "🌐 Internet Provider Problem (External)", - "what_happened": "Your internet service provider is having issues.", - "who_fixes": "Only your internet provider can fix this:", - "steps": [ - "✓ Check if neighbors have same issue", - "✓ Call your internet provider's support number", - "✓ Check provider's website for outage reports", - "✓ Wait - provider is already working on it" - ], - "not_hacker": "This is NOT hacking - provider maintenance/outage" - }, - "dns": { - "title": "🔍 Address Lookup Problem (Internet Directory)", - "what_happened": "Your computer can't translate website names to addresses.", - "who_fixes": "Usually fixes itself, but you can try:", - "steps": [ - "✓ Wait 5-10 minutes (often fixes itself)", - "✓ Restart your computer", - "✓ Change DNS to Google (8.8.8.8) in router settings", - "✓ Restart router if problem continues" - ], - "not_hacker": "This is NOT hacking - just internet directory issues" - }, - "degraded": { - "title": "⚠ Slow Internet (Performance Issue)", - "what_happened": "Internet is working but much slower than normal.", - "who_fixes": "You can try these fixes:", - "steps": [ - "✓ Restart router", - "✓ Close unnecessary programs/tabs", - "✓ Check if someone is downloading/streaming heavily", - "✓ Try connecting with cable instead of Wi-Fi" - ], - "not_hacker": "This is NOT hacking - just temporary slowness" - }, - "security": { - "title": "🔴 Security Alert (Possible Attack)", - "what_happened": "A suspicious change was detected on your network.", - "who_fixes": "Investigate immediately:", - "steps": [ - "✓ Check the BSSID — did your router's MAC address change?", - "✓ If you have only ONE router, a BSSID change is suspicious", - "✓ Someone may have set up a fake Wi-Fi access point (evil twin)", - "✓ Disconnect from Wi-Fi and use mobile data until verified" - ], - "not_hacker": "This COULD be an attack — investigate before dismissing!" - } - } - - # Get the appropriate explanation - base_explanation = explanations.get(category, explanations["degraded"]) - - # Add severity context - severity_info = "" - if severity == "high": - severity_info = "\n🚨 This is a serious problem - internet barely works!" - elif severity == "warn": - severity_info = "\n⚠️ This is annoying but internet still works partially." - else: - severity_info = "\n✅ This is a minor issue or just informational." - - # Add Wi-Fi signal context from incident details - signal_info = "" - details = inc.details or {} - sig_pct = details.get("wifi_signal_pct", -1) - if isinstance(sig_pct, int) and sig_pct >= 0: - if sig_pct < 30: - signal_info = f"\n📶 Wi-Fi signal was VERY WEAK ({sig_pct}%) when this started — likely the cause!" - elif sig_pct < 50: - signal_info = f"\n📶 Wi-Fi signal was weak ({sig_pct}%) — probably contributing to the problem." - elif sig_pct < 70: - signal_info = f"\n📶 Wi-Fi signal was fair ({sig_pct}%) — signal alone unlikely to be the cause." - else: - signal_info = f"\n📶 Wi-Fi signal was strong ({sig_pct}%) — signal is NOT the problem." - - wifi_detail = "" - ch = details.get("wifi_channel", "") - radio = details.get("wifi_radio", "") - bssid = details.get("wifi_bssid", "") - if ch or radio or bssid: - parts = [] - if ch: - try: - ch_num = int(ch) - band = "2.4 GHz" if ch_num <= 14 else "5 GHz" - parts.append(f"Channel {ch} ({band})") - except ValueError: - parts.append(f"Channel {ch}") - if radio: - parts.append(radio) - if bssid: - parts.append(f"BSSID {bssid}") - wifi_detail = "\n🔗 " + " | ".join(parts) - - # Build friendly explanation - friendly = f""" -{base_explanation['title']} - -What happened: -{base_explanation['what_happened']} - -{severity_info} -{signal_info} -{wifi_detail} - -{base_explanation['who_fixes']} -{chr(10).join(base_explanation['steps'])} - -{base_explanation['not_hacker']} - -Time started: {inc.start_time} -Duration: {inc.duration or 'Still ongoing'} -""" - - return friendly - - def refresh_events(self): - for i in self.event_tree.get_children(): - self.event_tree.delete(i) - - fc = self.filter_category.get() - fs = self.filter_severity.get() - events = list(self.engine.events.snapshot()) - events.reverse() - - for idx, e in enumerate(events[:1200]): - if fc != "ALL" and e.category != fc: - continue - if fs != "ALL" and e.severity != fs: - continue - tags = [] - if e.severity == "HIGH": - tags.append("high") - elif e.severity == "WARN": - tags.append("warn") - else: - tags.append("info") - if e.category == "SECURITY": - tags.append("security") - self.event_tree.insert("", "end", iid=f"e-{idx}", values=(e.timestamp, e.severity, e.category, e.title), tags=tags) - - def show_event_details(self): - sel = self.event_tree.selection() - if not sel: - return - iid = sel[0] - idx = int(iid.split("-")[1]) - recent = list(reversed(self.engine.events.snapshot()[-1600:])) - if idx < 0 or idx >= len(recent): - return - e = recent[idx] - text = json.dumps(asdict(e), indent=2, ensure_ascii=False) - self.event_details.configure(state="normal") - self.event_details.delete("1.0", "end") - self.event_details.insert("1.0", text) - self.event_details.configure(state="disabled") - - def _loss_bar_color(self, pct: float) -> str: - if pct < 5: - return "#44cc44" - if pct < 25: - return "#ccaa00" - return "#cc4444" - - def _signal_color(self, pct: int) -> str: - if pct >= 70: - return "#44cc44" - if pct >= 40: - return "#ccaa00" - return "#cc4444" - - def _rtt_color(self, rtt_ms: Optional[float], ok: bool = True) -> str: - if not ok or rtt_ms is None: - return "#cc4444" - if rtt_ms < 60: - return "#44cc44" - if rtt_ms < 200: - return "#ccaa00" - return "#cc4444" - - def _dns_color(self, state: str) -> str: - if state == "OK": - return "#44cc44" - if state == "SLOW": - return "#ccaa00" - return "#cc4444" - - # ---- Reusable Canvas line chart ---- - - def _draw_line_chart(self, canvas, series_list, width, height, show_legend=True): - """Draw a multi-series line chart on a tkinter Canvas. - - series_list: list of dicts with keys: - label (str), color (str), points (list of (float_ts, float_val)), - axis ("left" or "right") - """ - canvas.delete("all") - if width < 80 or height < 40: - return - - ml, mr, mt, mb = 50, 50, 18, 22 # margins - dw = width - ml - mr - dh = height - mt - mb - if dw < 20 or dh < 20: - return - - # Collect all timestamps for X range - all_ts = [] - for s in series_list: - for t, _ in s["points"]: - all_ts.append(t) - if not all_ts: - canvas.create_text(width // 2, height // 2, text="No data yet", - fill="#666666", font=("Segoe UI", 10)) - return - - t_min, t_max = min(all_ts), max(all_ts) - if t_max - t_min < 1: - t_max = t_min + 1 - - # Compute Y ranges per axis - def y_range(axis): - vals = [v for s in series_list if s.get("axis", "left") == axis - for _, v in s["points"] if v is not None] - if not vals: - return 0, 100 - lo, hi = 0, max(vals) * 1.15 - if hi < 10: - hi = 10 - return lo, hi - - left_lo, left_hi = y_range("left") - right_lo, right_hi = y_range("right") - - def map_x(t): - return ml + (t - t_min) / (t_max - t_min) * dw - - def map_y(v, axis="left"): - lo, hi = (left_lo, left_hi) if axis == "left" else (right_lo, right_hi) - if hi == lo: - return mt + dh // 2 - return mt + (1 - (v - lo) / (hi - lo)) * dh - - # Grid lines (horizontal) - for i in range(5): - y = mt + i * dh // 4 - canvas.create_line(ml, y, ml + dw, y, fill="#333333", dash=(2, 4)) - # Left axis labels - val = left_hi - i * (left_hi - left_lo) / 4 - canvas.create_text(ml - 4, y, text=f"{val:.0f}", anchor="e", - fill="#888888", font=("Segoe UI", 7)) - # Right axis labels - val_r = right_hi - i * (right_hi - right_lo) / 4 - canvas.create_text(ml + dw + 4, y, text=f"{val_r:.0f}", anchor="w", - fill="#888888", font=("Segoe UI", 7)) - - # Axis unit labels - canvas.create_text(ml - 4, mt - 8, text="ms", anchor="e", - fill="#888888", font=("Segoe UI", 7)) - canvas.create_text(ml + dw + 4, mt - 8, text="%", anchor="w", - fill="#888888", font=("Segoe UI", 7)) - - # X-axis time labels (~5 labels) - span = t_max - t_min - step = max(1, span / 5) - t_cur = t_min - while t_cur <= t_max: - x = map_x(t_cur) - try: - lbl = datetime.fromtimestamp(t_cur).strftime("%H:%M:%S") - except Exception: - lbl = "" - canvas.create_text(x, mt + dh + 12, text=lbl, - fill="#888888", font=("Segoe UI", 7)) - canvas.create_line(x, mt, x, mt + dh, fill="#2a2a2a", dash=(1, 6)) - t_cur += step - - # Draw series - for s in series_list: - pts = s["points"] - axis = s.get("axis", "left") - color = s["color"] - coords = [] - for t, v in pts: - if v is None: - # Break the line at None values - if len(coords) >= 4: - canvas.create_line(*coords, fill=color, width=2, smooth=False) - coords = [] - continue - coords.extend([map_x(t), map_y(v, axis)]) - if len(coords) >= 4: - canvas.create_line(*coords, fill=color, width=2, smooth=False) - - # Legend - if show_legend: - lx = ml + 6 - ly = mt + 4 - for s in series_list: - canvas.create_rectangle(lx, ly, lx + 10, ly + 8, fill=s["color"], outline="") - canvas.create_text(lx + 14, ly + 4, text=s["label"], anchor="w", - fill="#cccccc", font=("Segoe UI", 7)) - lx += len(s["label"]) * 6 + 28 - - def _samples_to_ts(self, samples): - """Convert sample timestamps to float timestamps once.""" - result = [] - for s in samples: - dt = parse_ts(s.timestamp) - if dt: - result.append((dt.timestamp(), s)) - return result - - def _update_live_chart(self): - """Redraw the live scrolling chart with last 5 minutes of data.""" - canvas = self.live_chart_canvas - w = canvas.winfo_width() - h = canvas.winfo_height() - if w < 100 or h < 40: - return - - cutoff = time.time() - 300 - recent = self._samples_to_ts(self.engine.samples.snapshot()) - recent = [(t, s) for t, s in recent if t >= cutoff] - - series = [ - {"label": "GW RTT", "color": "#00BFFF", "axis": "left", - "points": [(t, s.gw_rtt) for t, s in recent]}, - {"label": "Inet 1", "color": "#FFD700", "axis": "left", - "points": [(t, s.inet_rtt) for t, s in recent]}, - {"label": "Inet 2", "color": "#FF6347", "axis": "left", - "points": [(t, s.inet2_rtt) for t, s in recent]}, - {"label": "Signal %", "color": "#00FF88", "axis": "right", - "points": [(t, s.wifi_signal_pct if s.wifi_signal_pct >= 0 else None) - for t, s in recent]}, - ] - self._draw_line_chart(canvas, series, w, h) - - def _draw_incident_graph(self, inc): - """Draw a graph of metrics during an incident's lifetime.""" - canvas = self.inc_graph_canvas - w = canvas.winfo_width() - h = canvas.winfo_height() - if w < 80 or h < 30: - canvas.delete("all") - return - - start_dt = parse_ts(inc.start_time) - end_dt = parse_ts(inc.end_time) if inc.end_time else datetime.now() - if not start_dt: - canvas.delete("all") - canvas.create_text(w // 2, h // 2, text="No timestamp", - fill="#666666", font=("Segoe UI", 9)) - return - - # Add 30s buffer on each side - start_f = start_dt.timestamp() - 30 - end_f = end_dt.timestamp() + 30 - - all_ts = self._samples_to_ts(self.engine.samples.snapshot()) - incident_data = [(t, s) for t, s in all_ts if start_f <= t <= end_f] - - if not incident_data: - canvas.delete("all") - canvas.create_text(w // 2, h // 2, - text="No sample data for this incident\n(data may have been pruned)", - fill="#666666", font=("Segoe UI", 9), justify="center") - return - - series = [ - {"label": "GW RTT", "color": "#00BFFF", "axis": "left", - "points": [(t, s.gw_rtt) for t, s in incident_data]}, - {"label": "Inet 1", "color": "#FFD700", "axis": "left", - "points": [(t, s.inet_rtt) for t, s in incident_data]}, - {"label": "Inet 2", "color": "#FF6347", "axis": "left", - "points": [(t, s.inet2_rtt) for t, s in incident_data]}, - ] - self._draw_line_chart(canvas, series, w, h, show_legend=True) - - # Draw incident start/end markers - if incident_data: - t_min_d = min(t for t, _ in incident_data) - t_max_d = max(t for t, _ in incident_data) - span = t_max_d - t_min_d - if span < 1: - span = 1 - ml, mr, mt_m, mb_m = 50, 50, 18, 22 - dw = w - ml - mr - - def mx(t): - return ml + (t - t_min_d) / span * dw - - # Start marker (red dashed) - sx = mx(start_dt.timestamp()) - if ml <= sx <= ml + dw: - canvas.create_line(sx, mt_m, sx, h - mb_m, fill="#ff4444", dash=(4, 3), width=1) - canvas.create_text(sx, h - mb_m + 8, text="START", fill="#ff4444", - font=("Segoe UI", 6)) - - # End marker (green dashed) - if inc.end_time: - ex = mx(end_dt.timestamp()) - if ml <= ex <= ml + dw: - canvas.create_line(ex, mt_m, ex, h - mb_m, fill="#44cc44", dash=(4, 3), width=1) - canvas.create_text(ex, h - mb_m + 8, text="END", fill="#44cc44", - font=("Segoe UI", 6)) - - def refresh_diagnostics(self): - s = self._last_sample - d = self._last_diag - if not d and not s: - return - - # --- Wi-Fi panel --- - if s: - wifi_data = { - "state": s.wifi_state or "--", - "ssid": s.wifi_ssid or "--", - "bssid": s.wifi_bssid or "--", - "signal": s.wifi_signal or "--", - "channel": s.wifi_channel or "--", - "radio": s.wifi_radio or "--", - } - for key, val in wifi_data.items(): - lbl = self.diag_wifi_labels.get(key) - if lbl: - lbl.configure(text=val) - if key == "signal" and s.wifi_signal_pct >= 0: - lbl.configure(text_color=self._signal_color(s.wifi_signal_pct)) - elif key == "state": - clr = "#44cc44" if "connected" in val.lower() else "#cc4444" - lbl.configure(text_color=clr) - - # --- Ping panel --- - if s: - def ping_text(ok, rtt): - st = "OK" if ok else "FAIL" - rt = f" ({rtt:.0f} ms)" if rtt is not None else "" - return st + rt - def ping_color(ok): - return "#44cc44" if ok else "#cc4444" - mapping = [ - ("Gateway", s.gw_ok, s.gw_rtt), - ("Target 1", s.inet_ok, s.inet_rtt), - ("Target 2", s.inet2_ok, s.inet2_rtt), - ] - for name, ok, rtt in mapping: - lbl = self.diag_ping_labels.get(name) - if lbl: - lbl.configure(text=ping_text(ok, rtt), text_color=ping_color(ok)) - - # --- Rolling stats bars --- - loss_map = { - "gw": self.engine._roll_loss("gw") * 100, - "inet1": self.engine._roll_loss("inet1") * 100, - "inet2": self.engine._roll_loss("inet2") * 100, - } - for key, pct in loss_map.items(): - canvas, val_lbl = self.diag_roll_bars[key] - canvas.delete("all") - bar_w = min(int(pct / 100 * 160), 160) - color = self._loss_bar_color(pct) - if bar_w > 0: - canvas.create_rectangle(0, 0, bar_w, 16, fill=color, outline="") - val_lbl.configure(text=f"{pct:.1f}%", text_color=color) - - rtt_map = { - "gw_rtt": self.engine._roll_max_rtt("gw"), - "inet1_rtt": self.engine._roll_max_rtt("inet1"), - "inet2_rtt": self.engine._roll_max_rtt("inet2"), - } - for key, val in rtt_map.items(): - lbl = self.diag_rtt_labels.get(key) - if lbl: - lbl.configure(text=f"{val:.0f} ms" if val is not None else "--") - - # --- DNS panel --- - if s: - dns_st = s.dns_state - dns_clr = "#44cc44" if dns_st == "OK" else ("#ccaa00" if dns_st == "SLOW" else "#cc4444") - self.diag_dns_status.configure(text=f"DNS: {dns_st}", text_color=dns_clr) - hint = s.dns_raw_hint or "" - self.diag_dns_summary.configure(text=hint[:300]) - -# Toolbox entrypoint -# ========================= - -def run_tool(): - try: - if tk._default_root is None: - if HAS_CTK: - root = ctk.CTkToplevel() - else: - root = tk.Toplevel() - app = App(root) - root.protocol("WM_DELETE_WINDOW", app.force_stop) - root.mainloop() - else: - if HAS_CTK: - win = ctk.CTkToplevel() - else: - win = tk.Toplevel() - app = App(win) - win.protocol("WM_DELETE_WINDOW", app.force_stop) - except Exception as e: - messagebox.showerror("Network Stability Monitor Pro", f"Startup error:\n{e}") - - -if __name__ == "__main__": - run_tool() diff --git a/tools/_common/__init__.py b/tools/_common/__init__.py index e69de29..a81da3d 100644 --- a/tools/_common/__init__.py +++ b/tools/_common/__init__.py @@ -0,0 +1,51 @@ +"""Shared helpers used by toolbox tools and the launcher. + +Submodules +---------- +logging Configured rotating-file loggers + ``get_log_dir()``. +threadsafe ``BoundedDeque`` and ``SnapshotDict`` primitives. +paths Repo-layout path constants (``REPO_ROOT`` etc.). +subprocess Hidden-window subprocess helpers (``run_hidden`` etc.). +ui_theme Shared ttk dark-theme styling. +exceptions Narrow-except decorator + context manager. +config Environment + ``.env`` configuration lookups. + +Commonly-used names are re-exported at package level so callers can +write ``from tools._common import TOOLS_DIR, run_hidden`` without +reaching into the submodule layout. Submodule imports remain valid +and are preferred when a caller only needs one or two names from a +single module. +""" + +from tools._common.config import get_bool, get_config, get_path +from tools._common.exceptions import narrow_excepts, suppress_and_log +from tools._common.paths import ( + AUDITS_DIR, + COMMON_DIR, + PLANS_DIR, + PORTABLE_DIR, + REPO_ROOT, + TESTS_DIR, + TOOLS_DIR, +) +from tools._common.subprocess import CREATE_NO_WINDOW, popen_hidden, run_hidden +from tools._common.ui_theme import apply_dark_treeview_style + +__all__ = [ + "AUDITS_DIR", + "COMMON_DIR", + "CREATE_NO_WINDOW", + "PLANS_DIR", + "PORTABLE_DIR", + "REPO_ROOT", + "TESTS_DIR", + "TOOLS_DIR", + "apply_dark_treeview_style", + "get_bool", + "get_config", + "get_path", + "narrow_excepts", + "popen_hidden", + "run_hidden", + "suppress_and_log", +] diff --git a/tools/_common/config.py b/tools/_common/config.py new file mode 100644 index 0000000..bad6b83 --- /dev/null +++ b/tools/_common/config.py @@ -0,0 +1,141 @@ +"""Environment + ``.env`` configuration lookups. + +Tools in the toolbox have historically stored per-user or per-environment +values (paths, schedules, feature flags) as module-level literals. The +``CLAUDE.md`` project rules require these to move to environment +variables with optional ``.env`` fallback, and this module provides the +single read path. + +Usage +----- + from tools._common.config import get_config, get_bool, get_path + + output_dir = get_path("AUTOMATIONS_FFMPEG_OUTPUT_DIR", + default=Path.home() / "Videos") + auto_export = get_bool("AUTOMATIONS_NSM_AUTO_EXPORT", default=True) + token = get_config("AUTOMATIONS_VT_API_KEY", default=None) + +Precedence (highest wins) +------------------------- +1. ``os.environ`` at call time. +2. Values loaded from ``/.env`` via :mod:`python-dotenv` (optional + dependency). ``load_dotenv`` is called once, lazily, on the first + :func:`get_config` call. If ``python-dotenv`` is not installed, the + ``.env`` file is ignored and only real environment variables apply. +3. The ``default`` argument. + +Only real environment variables override an already-set key, so shell +``export AUTOMATIONS_FOO=x`` always wins over ``AUTOMATIONS_FOO=y`` in +``.env``. +""" + +from __future__ import annotations + +import logging +import os +from pathlib import Path +from typing import Optional, Union + +from tools._common.paths import REPO_ROOT + +_log = logging.getLogger(__name__) + +_DOTENV_LOADED = False +_TRUTHY = {"1", "true", "yes", "on", "y", "t"} +_FALSY = {"0", "false", "no", "off", "n", "f", ""} + + +def _load_dotenv_once() -> None: + """Load ``/.env`` via python-dotenv exactly once per process. + + No-op if ``python-dotenv`` is not installed or the file is missing. + Real environment variables take precedence over ``.env`` entries. + """ + global _DOTENV_LOADED + if _DOTENV_LOADED: + return + _DOTENV_LOADED = True + + env_path = REPO_ROOT / ".env" + if not env_path.is_file(): + return + + try: + from dotenv import load_dotenv # type: ignore[import-not-found] + except ImportError: + _log.debug( + "found %s but python-dotenv is not installed; ignoring", env_path + ) + return + + load_dotenv(env_path, override=False) + + +def get_config(key: str, default: Optional[str] = None) -> Optional[str]: + """Return the string value for ``key`` from env or ``.env``. + + Args: + key: Environment variable name. Convention: ``AUTOMATIONS__``. + default: Returned when neither ``os.environ`` nor ``.env`` provide + a value. ``None`` means "no default; caller handles missing". + + Returns: + The resolved string, or ``default`` when unset. + """ + _load_dotenv_once() + value = os.environ.get(key) + if value is not None: + return value + return default + + +def get_bool(key: str, default: bool = False) -> bool: + """Return a boolean value for ``key`` from env or ``.env``. + + Accepted truthy tokens (case-insensitive): ``1 true yes on y t``. + Accepted falsy tokens: ``0 false no off n f`` plus empty string. + Unknown values fall back to ``default`` and emit a WARNING. + + Args: + key: Environment variable name. + default: Value returned when unset or unparseable. + + Returns: + Parsed boolean. + """ + raw = get_config(key) + if raw is None: + return default + token = raw.strip().lower() + if token in _TRUTHY: + return True + if token in _FALSY: + return False + _log.warning( + "config %s=%r is not a recognized bool; using default %r", + key, raw, default, + ) + return default + + +def get_path(key: str, default: Union[str, Path]) -> Path: + """Return a :class:`~pathlib.Path` value for ``key``. + + ``~`` is expanded. The path is NOT required to exist; callers that + need directory creation should use ``mkdir(parents=True, exist_ok=True)`` + themselves. + + Args: + key: Environment variable name. + default: Fallback path when unset. ``str`` or ``Path`` accepted. + + Returns: + Resolved ``Path`` (user-expanded, not absolutized). + """ + raw = get_config(key) + if raw is None: + return Path(default).expanduser() if isinstance(default, str) else default + return Path(raw).expanduser() + + +__all__ = ["get_bool", "get_config", "get_path"] diff --git a/tools/_common/exceptions.py b/tools/_common/exceptions.py new file mode 100644 index 0000000..aa7fb41 --- /dev/null +++ b/tools/_common/exceptions.py @@ -0,0 +1,106 @@ +"""Narrow-except helpers used by the Tier B broad-except sweep. + +The toolbox has ~300 ``except Exception:`` sites that the audit wants +replaced with an explicit allow-list. Each site has its own local +behaviour (return ``None``, log, retry, etc.), so a one-size decorator +cannot replace them all. This module provides two thin primitives that +cover the common cases cleanly: + +* :func:`narrow_excepts` — decorator: wrap a callable so only listed + exception types are caught (optionally logged) and a default value + returned. Non-listed exceptions propagate unchanged. +* :func:`suppress_and_log` — context manager: suppress listed exceptions + inside a ``with`` block and emit a WARNING with the exception repr. + +Use the decorator when a whole helper function should degrade gracefully +on specific errors; use the context manager for a few lines inside a +larger function. +""" + +from __future__ import annotations + +import functools +import logging +from contextlib import contextmanager +from typing import Any, Callable, Iterator, Optional, Type, TypeVar + +_F = TypeVar("_F", bound=Callable[..., Any]) + + +def narrow_excepts( + *types: Type[BaseException], + default: Any = None, + logger: Optional[logging.Logger] = None, +) -> Callable[[_F], _F]: + """Decorator: catch only the listed exception types. + + A surgical replacement for a top-level ``except Exception`` inside a + function. Exceptions NOT in ``types`` propagate unchanged. + + Args: + *types: Exception classes to catch. At least one required. + default: Value returned when a listed exception is caught. + logger: Optional logger used to emit a WARNING with the exception + class and message. ``None`` means silent suppression — prefer + passing a logger in production code. + + Returns: + A decorator that wraps a callable with the narrow try/except. + + Raises: + ValueError: If no exception types are passed. + """ + if not types: + raise ValueError("narrow_excepts requires at least one exception type") + + def decorator(fn: _F) -> _F: + @functools.wraps(fn) + def wrapper(*args: Any, **kwargs: Any) -> Any: + try: + return fn(*args, **kwargs) + except types as exc: + if logger is not None: + logger.warning( + "%s swallowed %s: %s", + fn.__qualname__, + type(exc).__name__, + exc, + ) + return default + + return wrapper # type: ignore[return-value] + + return decorator + + +@contextmanager +def suppress_and_log( + logger: logging.Logger, + *types: Type[BaseException], + message: str = "suppressed", +) -> Iterator[None]: + """Context manager: suppress listed exceptions and log a WARNING. + + Example:: + + with suppress_and_log(log, FileNotFoundError, PermissionError, + message="failed to unlink temp"): + path.unlink() + + Args: + logger: Destination logger for the WARNING. + *types: Exception classes to suppress. At least one required. + message: Prefix for the log line so callers can identify the site. + + Raises: + ValueError: If no exception types are passed. + """ + if not types: + raise ValueError("suppress_and_log requires at least one exception type") + try: + yield + except types as exc: + logger.warning("%s: %s: %s", message, type(exc).__name__, exc) + + +__all__ = ["narrow_excepts", "suppress_and_log"] diff --git a/tools/_common/paths.py b/tools/_common/paths.py new file mode 100644 index 0000000..e911794 --- /dev/null +++ b/tools/_common/paths.py @@ -0,0 +1,38 @@ +"""Repository-layout path constants. + +Every helper module that needs to know where the repo root, the tools +package, or the tests live imports from here instead of re-deriving the +path from ``Path(__file__)``. Keeping the derivation in one place means +a layout change only has to be reflected once. + +Typical use:: + + from tools._common.paths import TOOLS_DIR, REPO_ROOT + settings_file = REPO_ROOT / "config" / "settings.json" +""" + +from __future__ import annotations + +from pathlib import Path + +# This file lives at ``/tools/_common/paths.py``; climbing three +# parents gets us the repo root. +_THIS = Path(__file__).resolve() + +REPO_ROOT: Path = _THIS.parent.parent.parent +TOOLS_DIR: Path = REPO_ROOT / "tools" +COMMON_DIR: Path = TOOLS_DIR / "_common" +TESTS_DIR: Path = REPO_ROOT / "tests" +PLANS_DIR: Path = REPO_ROOT / "plans" +AUDITS_DIR: Path = REPO_ROOT / "audits" +PORTABLE_DIR: Path = REPO_ROOT / "portable" + +__all__ = [ + "AUDITS_DIR", + "COMMON_DIR", + "PLANS_DIR", + "PORTABLE_DIR", + "REPO_ROOT", + "TESTS_DIR", + "TOOLS_DIR", +] diff --git a/tools/_common/subprocess.py b/tools/_common/subprocess.py new file mode 100644 index 0000000..46fd2c1 --- /dev/null +++ b/tools/_common/subprocess.py @@ -0,0 +1,63 @@ +"""Subprocess helpers that suppress the Windows console popup. + +Every tool that shells out currently duplicates the +``creationflags=0x08000000`` incantation (sometimes as ``_CNW``, +sometimes as ``_CREATE_NO_WINDOW``, sometimes via +``subprocess.CREATE_NO_WINDOW``). This module centralises the constant +and offers two thin wrappers that merge the flag with any +caller-provided ``creationflags`` so a custom flag set never silently +drops the no-window bit. + +On non-Windows platforms the flag is ``0`` and the wrappers behave +exactly like their stdlib counterparts — safe to import everywhere. +""" + +from __future__ import annotations + +import subprocess as _stdlib_subprocess +import sys +from typing import Any + +# Absolute value of WinAPI ``CREATE_NO_WINDOW``. On non-Windows it is +# meaningless; zero keeps the OR-merging below a no-op. +CREATE_NO_WINDOW: int = 0x08000000 if sys.platform == "win32" else 0 + + +def run_hidden(cmd: Any, **kwargs: Any) -> _stdlib_subprocess.CompletedProcess: + """Run ``cmd`` via :func:`subprocess.run` without popping a console. + + Any ``creationflags`` the caller passes are OR'd with + :data:`CREATE_NO_WINDOW` so the hidden-window bit is never dropped. + + Args: + cmd: Command sequence (list) or string — passed straight through. + **kwargs: Forwarded to :func:`subprocess.run` unchanged, except + ``creationflags`` which is merged. + + Returns: + The :class:`subprocess.CompletedProcess` from stdlib. + """ + flags = kwargs.pop("creationflags", 0) | CREATE_NO_WINDOW + return _stdlib_subprocess.run(cmd, creationflags=flags, **kwargs) + + +def popen_hidden(cmd: Any, **kwargs: Any) -> _stdlib_subprocess.Popen: + """Spawn ``cmd`` via :class:`subprocess.Popen` without popping a console. + + Same flag-merging contract as :func:`run_hidden`; useful when the + caller needs to interact with the process (read stdout, send signals) + rather than wait for completion. + + Args: + cmd: Command sequence or string. + **kwargs: Forwarded to :class:`subprocess.Popen` unchanged, except + ``creationflags`` which is merged. + + Returns: + The :class:`subprocess.Popen` instance. + """ + flags = kwargs.pop("creationflags", 0) | CREATE_NO_WINDOW + return _stdlib_subprocess.Popen(cmd, creationflags=flags, **kwargs) + + +__all__ = ["CREATE_NO_WINDOW", "popen_hidden", "run_hidden"] diff --git a/tools/_common/ui_theme.py b/tools/_common/ui_theme.py new file mode 100644 index 0000000..7d461df --- /dev/null +++ b/tools/_common/ui_theme.py @@ -0,0 +1,77 @@ +"""Shared ttk styling for the toolbox dark palette. + +Every tool currently sets up its own ``ttk.Style()`` with nearly-identical +Treeview colours (see audits/2026-04-17-debt-review.md §B0). This module +centralises the dark palette so a palette tweak touches one file instead +of ten. + +Typical use inside a tool's ``_build_ui``:: + + from tools._common.ui_theme import apply_dark_treeview_style + apply_dark_treeview_style(self) +""" + +from __future__ import annotations + +from tkinter import ttk +from typing import Optional + +# Colours match the customtkinter "dark blue" preset already used by the +# toolbox. Keep the constants so callers can reference the same palette +# when configuring non-Treeview widgets. +TREE_BG: str = "#2b2b2b" +TREE_FG: str = "white" +TREE_HEADING_BG: str = "#565b5e" +TREE_HEADING_FONT: tuple[str, int, str] = ("Arial", 10, "bold") + + +def apply_dark_treeview_style( + master: Optional[object] = None, + base_theme: str = "clam", +) -> ttk.Style: + """Configure a :class:`ttk.Style` with the toolbox dark-Treeview palette. + + Args: + master: Optional widget to scope the style to. Pass ``None`` to + configure the global (root-owned) style. + base_theme: ttk base theme to switch to before applying overrides. + ``"clam"`` exposes the most configuration knobs on Windows; + fall through silently if the theme is unavailable. + + Returns: + The configured :class:`ttk.Style`. Callers that need extra + overrides can chain additional ``configure`` calls. + """ + style = ttk.Style(master) if master is not None else ttk.Style() + + try: + style.theme_use(base_theme) + except Exception: + # Rare on stripped Python builds (e.g. some CI runners) — the + # default theme is still usable, so carry on. + pass + + style.configure( + "Treeview", + background=TREE_BG, + foreground=TREE_FG, + fieldbackground=TREE_BG, + borderwidth=0, + ) + style.configure( + "Treeview.Heading", + background=TREE_HEADING_BG, + foreground=TREE_FG, + relief="flat", + font=TREE_HEADING_FONT, + ) + return style + + +__all__ = [ + "TREE_BG", + "TREE_FG", + "TREE_HEADING_BG", + "TREE_HEADING_FONT", + "apply_dark_treeview_style", +] diff --git a/tools/_runner.py b/tools/_runner.py index 88170e0..edaa271 100644 --- a/tools/_runner.py +++ b/tools/_runner.py @@ -9,9 +9,9 @@ * A dotted module path (e.g. ``tools.decision_dice``) — imported via ``importlib.import_module``. * A filesystem path to a ``.py`` file — loaded via - ``importlib.util.spec_from_file_location``. This path is required - for tool filenames that are not valid Python identifiers (e.g. - ``NETWORK STABILITY MONITOR.py`` contains spaces). + ``importlib.util.spec_from_file_location``. This path is useful for + tool filenames that are not valid Python identifiers (e.g. files + containing spaces). Behavior: @@ -45,6 +45,34 @@ from typing import Optional +TOOLS_DIR = Path(__file__).resolve().parent + + +def _validate_under_tools(path: Path) -> Path: + """Resolve *path* and ensure it lives under ``tools/``. + + Collapses symlinks and ``..`` segments via ``resolve(strict=True)`` so that + a symlink inside ``tools/`` pointing outside is refused alongside a plain + external path. + + Args: + path: The candidate path (absolute or relative). + + Returns: + The resolved absolute path, guaranteed to be under ``tools/``. + + Raises: + FileNotFoundError: If *path* does not exist. + PermissionError: If the resolved path is outside ``tools/``. + """ + resolved = path.resolve(strict=True) + if not resolved.is_relative_to(TOOLS_DIR): + raise PermissionError( + f"refusing to load {resolved}: outside {TOOLS_DIR}" + ) + return resolved + + def _load_by_path(path: Path) -> ModuleType: """Load a module from an explicit file path. @@ -69,10 +97,15 @@ def _load_by_path(path: Path) -> ModuleType: def _load(target: str) -> ModuleType: - """Load *target* as either a dotted module name or a file path.""" + """Load *target* as either a dotted module name or a file path. + + File-path targets are gated by :func:`_validate_under_tools` so that the + runner refuses to execute arbitrary ``.py`` files from outside ``tools/``. + """ path_candidate = Path(target) if path_candidate.suffix == ".py" or path_candidate.exists(): - return _load_by_path(path_candidate) + safe_path = _validate_under_tools(path_candidate) + return _load_by_path(safe_path) return importlib.import_module(target) @@ -93,6 +126,37 @@ def _apply_ctk_theme() -> None: pass +def _ensure_hidden_root() -> None: + """Pre-create a withdrawn root so CTkToplevel tools don't spawn a visible blank window. + + When ``CTkToplevel()`` is instantiated and ``tk._default_root`` is ``None``, + tkinter auto-creates a plain ``Tk()`` root that appears as a blank window. + By pre-creating a withdrawn ``CTk`` root here, ``CTkToplevel`` attaches to it + and no blank window appears. + + Side-effect: tools like ``network_stability_monitor`` that only call + ``mainloop()`` when ``tk._default_root is None`` will skip their own + ``mainloop()`` call, letting ``_wait_for_gui()`` manage the event loop. + """ + try: + import tkinter as tk # noqa: WPS433 + if tk._default_root is not None: + return + except Exception: + return + try: + import customtkinter as ctk # noqa: WPS433 + root = ctk.CTk() + root.withdraw() + except Exception: + try: + import tkinter as tk # noqa: WPS433 + root = tk.Tk() + root.withdraw() + except Exception: + pass + + def _wait_for_gui() -> None: """If the tool created Tk windows but didn't enter mainloop, do it here. @@ -127,6 +191,16 @@ def _wait_for_gui() -> None: if not has_children: return # Tool imported tk but never built UI. + # Tools that use CTkToplevel leave an invisible CTk root as a container. + # Withdraw it now so it doesn't appear as a blank extra window. + # (Tools that use CTk() directly as their main window won't have Toplevel + # children yet at this point, so they are unaffected.) + try: + if any(isinstance(w, tk.Toplevel) for w in root.winfo_children()): + root.withdraw() + except tk.TclError: + pass + def _has_active_window() -> bool: try: if not root.winfo_exists(): @@ -197,6 +271,7 @@ def main(argv: Optional[list[str]] = None) -> int: target = args[0] _apply_ctk_theme() + _ensure_hidden_root() try: module = _load(target) diff --git a/tools/account_activity_monitor.py b/tools/account_activity_monitor.py index 2ebc51d..96fb1f2 100644 --- a/tools/account_activity_monitor.py +++ b/tools/account_activity_monitor.py @@ -1268,6 +1268,9 @@ def __init__(self, parent): self._poll_interval = 5000 # ms self._active_categories: Set[str] = set(ALL_CATEGORIES) self._active_severities: Set[str] = {"CRITICAL", "WARNING", "INFO"} + # Guards _active_categories / _active_severities against UI-toggle + # writes racing with worker-thread or after()-callback reads. + self._filter_lock = threading.RLock() self._search_text = "" self._hist_hours = 24 @@ -1370,30 +1373,41 @@ def _build_category_filters(self, parent, on_change) -> Dict[str, ctk.CTkButton] def _toggle_category(self, cat: str, buttons: Dict, on_change): icon = CATEGORY_ICONS.get(cat, "") - if cat in self._active_categories: - self._active_categories.discard(cat) - buttons[cat].configure(fg_color="#2a2a2a", text=f"[OFF] {cat}", - text_color="#555555", border_width=1, - border_color="#444444") - else: - self._active_categories.add(cat) - buttons[cat].configure(fg_color="#1f538d", text=f"{icon} {cat}", - text_color=C_WHITE, border_width=0) + with self._filter_lock: + if cat in self._active_categories: + self._active_categories.discard(cat) + buttons[cat].configure(fg_color="#2a2a2a", text=f"[OFF] {cat}", + text_color="#555555", border_width=1, + border_color="#444444") + else: + self._active_categories.add(cat) + buttons[cat].configure(fg_color="#1f538d", text=f"{icon} {cat}", + text_color=C_WHITE, border_width=0) on_change() def _toggle_severity(self, sev: str, on_change): colors = {"CRITICAL": C_RED, "WARNING": C_ORANGE, "INFO": C_GRAY} - if sev in self._active_severities: - self._active_severities.discard(sev) - self._sev_buttons[sev].configure(fg_color="#2a2a2a", text=f"[OFF] {sev}", - text_color="#555555", border_width=1, - border_color="#444444") - else: - self._active_severities.add(sev) - self._sev_buttons[sev].configure(fg_color=colors[sev], text=sev, - text_color=C_WHITE, border_width=0) + with self._filter_lock: + if sev in self._active_severities: + self._active_severities.discard(sev) + self._sev_buttons[sev].configure(fg_color="#2a2a2a", text=f"[OFF] {sev}", + text_color="#555555", border_width=1, + border_color="#444444") + else: + self._active_severities.add(sev) + self._sev_buttons[sev].configure(fg_color=colors[sev], text=sev, + text_color=C_WHITE, border_width=0) on_change() + def _snapshot_filters(self) -> Tuple[frozenset, frozenset]: + """Return an atomic snapshot of the active category/severity filters. + + Callers iterate or membership-test against the returned frozensets so + that concurrent toggle writes cannot mutate the sets mid-read. + """ + with self._filter_lock: + return frozenset(self._active_categories), frozenset(self._active_severities) + # ── Historical Timeline Tab ── # ── User Accounts Tab ── @@ -1791,14 +1805,15 @@ def _set_range(self, hours: int): def _load_historical(self): hours = self._hist_hours if self._hist_hours > 0 else 8760 # "All" = 1 year self._hist_progress.configure(text="Loading...") - self.work_q.put_nowait(("historical", hours, set(self._active_categories))) + cats, _ = self._snapshot_filters() + self.work_q.put_nowait(("historical", hours, cats)) def _filter_historical(self): """Apply category, severity, and text filters to historical events.""" self._search_text = self._hist_search.get().lower() if hasattr(self, '_hist_search') else "" + cats, sevs = self._snapshot_filters() self._populate_tree(self.hist_tree, self._hist_events, - self._active_categories, self._active_severities, - self._search_text) + cats, sevs, self._search_text) def _populate_tree(self, tree: ttk.Treeview, events: List[ParsedEvent], categories: Set[str], severities: Set[str], @@ -1837,9 +1852,10 @@ def _export_historical(self): return try: # Export filtered events + cats, sevs = self._snapshot_filters() filtered = [e for e in self._hist_events - if e.category in self._active_categories - and e.severity in self._active_severities] + if e.category in cats + and e.severity in sevs] self.engine.export_events(filtered, path) messagebox.showinfo("Export", f"Exported {len(filtered)} events to:\n{path}") except Exception as e: @@ -2121,8 +2137,8 @@ def _filter_live(self): search = self._live_search.get().lower() if hasattr(self, '_live_search') else "" # Reverse so _populate_tree shows newest events at the top. snap = tuple(reversed(self._live_events.snapshot())) - self._populate_tree(self.live_tree, snap, - self._active_categories, self._active_severities, search) + cats, sevs = self._snapshot_filters() + self._populate_tree(self.live_tree, snap, cats, sevs, search) def _export_live(self): path = filedialog.asksaveasfilename( @@ -2146,7 +2162,8 @@ def _start_live_poll(self): if not self.running: return if not self._live_paused: - self.work_q.put_nowait(("live", set(self._active_categories))) + cats, _ = self._snapshot_filters() + self.work_q.put_nowait(("live", cats)) self.after(self._poll_interval, self._start_live_poll) # ── Settings Tab ── @@ -2270,12 +2287,14 @@ def _process_queue(self): self._live_counter += len(new_events) self._live_count_lbl.configure( text=f"{self._live_counter} events captured") - # Insert at top of tree + # Insert at top of tree — snapshot filters once so a toggle + # firing mid-loop can't mutate the sets during iteration. search = self._live_search.get().lower() if hasattr(self, '_live_search') else "" + cats, sevs = self._snapshot_filters() for e in reversed(new_events): - if e.category not in self._active_categories: + if e.category not in cats: continue - if e.severity not in self._active_severities: + if e.severity not in sevs: continue if search: searchable = f"{e.title} {e.details} {e.user} {e.category}".lower() diff --git a/tools/claude_usage_monitor.py b/tools/claude_usage_monitor.py index d6cbb57..6be3ca7 100644 --- a/tools/claude_usage_monitor.py +++ b/tools/claude_usage_monitor.py @@ -9,11 +9,18 @@ import glob import threading import time -from datetime import datetime, timezone +from datetime import datetime, timedelta, timezone from pathlib import Path import customtkinter as ctk from tkinter import ttk +try: + from winotify import Notification as _WinotifyNotification # type: ignore + _HAS_TOAST = True +except Exception: + _WinotifyNotification = None # type: ignore + _HAS_TOAST = False + TOOL_NAME = "Claude Usage Monitor" TOOL_DESC = "Live monitor for Claude Code session costs, tokens & waste" @@ -71,6 +78,12 @@ CLAUDE_DIR = Path.home() / ".claude" PROJECTS_DIR = CLAUDE_DIR / "projects" SESSIONS_DIR = CLAUDE_DIR / "sessions" +ORDER_FILE = CLAUDE_DIR / "claude_usage_monitor_order.json" + +# A session is considered "live" if its last activity is within this window. +# PID-based liveness was too sticky — stale session files kept rows marked LIVE +# long after the CLI exited. +_LIVE_WINDOW_SECONDS = 3600 # 1 hour # Capture the local timezone once at import time so every comparison uses the # same wall-clock reference — avoids DST shift mid-run ambiguity. @@ -304,6 +317,68 @@ def _parse_timestamp(ts_str: str) -> datetime | None: return None +def _is_recently_active(last_timestamp: str | None) -> bool: + """True if the session had activity within the last _LIVE_WINDOW_SECONDS.""" + ts = _parse_timestamp(last_timestamp) + if not ts: + return False + return (datetime.now(timezone.utc) - ts).total_seconds() < _LIVE_WINDOW_SECONDS + + +def _latest_session_id_per_project(sessions: list[dict]) -> set[str]: + """Return the session_id of the most recent session for each project.""" + latest: dict[str, tuple[str, str]] = {} # proj -> (sid, ts) + for s in sessions: + proj = s.get("project") or "" + ts = s.get("last_timestamp") or "" + cur = latest.get(proj) + if cur is None or ts > cur[1]: + latest[proj] = (s["session_id"], ts) + return {sid for sid, _ in latest.values()} + + +def _load_project_state() -> tuple[list[str], list[str], dict]: + """Load persisted state: project order, collapsed projects, notification state. + + Shape (all keys optional for backward compat): + {"order": [...], "collapsed": [...], "notif_state": {sid: level}} + """ + try: + data = json.loads(ORDER_FILE.read_text(encoding="utf-8")) + order = [str(p) for p in data.get("order", []) if isinstance(p, str)] + collapsed = [str(p) for p in data.get("collapsed", []) if isinstance(p, str)] + notif = data.get("notif_state", {}) + if not isinstance(notif, dict): + notif = {} + return order, collapsed, notif + except Exception: + return [], [], {} + + +def _save_project_state(order: list[str], collapsed: list[str], + notif_state: dict) -> None: + """Persist ordered projects, collapsed set, and notification dedup state.""" + try: + ORDER_FILE.write_text( + json.dumps({ + "order": order, + "collapsed": collapsed, + "notif_state": notif_state, + }, indent=2), + encoding="utf-8", + ) + except Exception: + pass # non-fatal + +# Back-compat wrappers so existing call sites continue to work during refactor. +def _load_project_order() -> list[str]: + return _load_project_state()[0] + +def _save_project_order(order: list[str]) -> None: + _, collapsed, notif = _load_project_state() + _save_project_state(order, collapsed, notif) + + def _format_tokens(n: int) -> str: if n >= 1_000_000: return f"{n / 1_000_000:.1f}M" @@ -546,6 +621,85 @@ def _rotate_tag(score: float) -> str | None: return None +# Notification tiers (higher = more urgent). "none" means below amber threshold. +_NOTIF_TIERS = {"none": 0, "amber": 1, "red": 2} + + +def _rotate_level(score: float) -> str: + """Map a rotate score to a notification tier name.""" + if score >= _ROTATE_RED: + return "red" + if score >= _ROTATE_AMBER: + return "amber" + return "none" + + +def _should_notify(state: dict, session_id: str, new_level: str) -> bool: + """Return True if a new_level alert should fire for this session. + + Fires only on upgrades: none→amber, none→red, amber→red. Downgrades + (red→amber, amber→none) are silent so the banner doesn't re-trigger + when a session drops below a threshold and climbs back. + """ + if new_level == "none": + return False + last = state.get(session_id, {}).get("level", "none") + return _NOTIF_TIERS.get(new_level, 0) > _NOTIF_TIERS.get(last, 0) + + +def _record_notified(state: dict, session_id: str, level: str) -> None: + """Stamp the dedup state with the level just fired.""" + state[session_id] = { + "level": level, + "fired_at": datetime.now(timezone.utc).isoformat(), + } + + +def _evict_inactive_notifs(state: dict, live_ids: set) -> dict: + """Drop dedup entries for sessions that are no longer LIVE. + + Returns the pruned dict (same object, mutated in place) so callers can + chain or reassign. + """ + for sid in [s for s in state if s not in live_ids]: + state.pop(sid, None) + return state + + +def _cost_composition(turn_costs: list) -> dict: + """Break session cost down by token type by re-applying per-turn model pricing. + + Returns {"input": $, "output": $, "cache_read": $, "cache_write": $, "total": $}. + Walks turn-by-turn so sessions with multi-model turns are costed correctly. + """ + buckets = {"input": 0.0, "output": 0.0, "cache_read": 0.0, "cache_write": 0.0} + for tc in turn_costs: + model = tc[6] if len(tc) > 6 else "" + p = _get_pricing(model) + buckets["input"] += tc[2] * p.get("input", 0.0) / 1_000_000 + buckets["output"] += tc[3] * p.get("output", 0.0) / 1_000_000 + buckets["cache_read"] += tc[4] * p.get("cache_read", 0.0) / 1_000_000 + # Assume 5m cache-writes when breakdown is unknown (matches _calc_turn_cost). + cw_rate = p.get("cache_write_5m", p.get("cache_write", 0.0)) + buckets["cache_write"] += tc[5] * cw_rate / 1_000_000 + buckets["total"] = sum(buckets.values()) + return buckets + + +def _turn_cost_stats(turn_costs: list) -> dict | None: + """Return {first, last, avg, peak, n} turn costs, or None for empty sessions.""" + if not turn_costs: + return None + costs = [tc[1] for tc in turn_costs] + return { + "first": costs[0], + "last": costs[-1], + "avg": sum(costs) / len(costs), + "peak": max(costs), + "n": len(costs), + } + + def _rotate_explanation(sub: dict) -> str: """One-line explanation of the dominant weighted factor.""" weighted = sub["weighted"] @@ -593,20 +747,46 @@ def _friendly_project(dirname: str, cwd: str | None = None) -> str: return "/".join(segments) +# Cache parsed sessions by file path; keyed by mtime so unchanged files +# skip the JSONL re-parse on every 30s refresh. +_SESSION_CACHE: dict[str, tuple[float, dict]] = {} + + +def _parse_session_file_cached(filepath: str) -> dict: + """_parse_session_file with an mtime-based cache.""" + try: + mtime = os.path.getmtime(filepath) + except OSError: + return _parse_session_file(filepath) + cached = _SESSION_CACHE.get(filepath) + if cached is not None and cached[0] == mtime: + return cached[1] + sess = _parse_session_file(filepath) + _SESSION_CACHE[filepath] = (mtime, sess) + return sess + + def load_all_sessions() -> list[dict]: """Scan all projects and return parsed session data, newest first.""" sessions = [] if not PROJECTS_DIR.exists(): return sessions + seen_paths: set[str] = set() for proj_dir in PROJECTS_DIR.iterdir(): if not proj_dir.is_dir(): continue for jsonl_file in proj_dir.glob("*.jsonl"): - sess = _parse_session_file(str(jsonl_file)) + path = str(jsonl_file) + seen_paths.add(path) + sess = _parse_session_file_cached(path) if sess["assistant_turns"] > 0: sessions.append(sess) + # Evict cache entries for files that have disappeared. + for stale in [p for p in _SESSION_CACHE if p not in seen_paths]: + _SESSION_CACHE.pop(stale, None) + # Sort by last timestamp descending sessions.sort( key=lambda s: s.get("last_timestamp") or "", @@ -615,8 +795,22 @@ def load_all_sessions() -> list[dict]: return sessions +def _pid_alive(pid: int) -> bool: + """Return True if the process *pid* is still running.""" + try: + import psutil # available via launcher requirements + return psutil.pid_exists(pid) + except ImportError: + pass + try: + os.kill(pid, 0) + return True + except OSError: + return False + + def _get_active_session_ids() -> set: - """Read ~/.claude/sessions/*.json to find currently active sessions.""" + """Read ~/.claude/sessions/*.json and check pid liveness for each entry.""" active = set() if not SESSIONS_DIR.exists(): return active @@ -624,13 +818,39 @@ def _get_active_session_ids() -> set: try: data = json.loads(f.read_text(encoding="utf-8")) sid = data.get("sessionId") - if sid: - active.add(sid) + pid = data.get("pid") + if not sid: + continue + if pid and not _pid_alive(int(pid)): + continue # process is gone — session is not live + active.add(sid) except Exception: pass return active +# --------------------------------------------------------------------------- +# Sessions-tab column spec (shared header + per-project Treeview) +# --------------------------------------------------------------------------- +# Fields: (col_id, heading_text, width_px, is_numeric) +# The shared heading row and every per-card Treeview iterate this tuple so +# pixel-widths stay aligned. Change here to change both sides at once. +_SESSION_COLUMNS = ( + ("status", "", 30, False), + ("session_name", "Session", 200, False), + ("model", "Model", 110, False), + ("turns", "Turns", 55, True), + ("tokens", "Tokens", 80, True), + ("init", "Init", 60, True), + ("cost", "Cost", 70, True), + ("last_turn", "Last Turn", 75, True), + ("waste", "Waste", 55, True), + ("rotate", "Rotate", 70, True), + ("duration", "Duration", 70, True), + ("date", "Date", 110, False), +) + + # --------------------------------------------------------------------------- # GUI # --------------------------------------------------------------------------- @@ -646,9 +866,15 @@ def __init__(self): self._active_ids: set = set() self._loading = False self._auto_refresh = True - self._sort_col = "date" # default sort column + self._sort_col = "date" # default sort column (kept for compat) self._sort_reverse = True # newest first self._plan = "Max 5x ($100/mo)" # default plan + self._window = "All" # time-window filter: Today / Week / Month / All + order, collapsed, notif = _load_project_state() + self._project_order: list[str] = order + self._collapsed_projects: set[str] = set(collapsed) + self._notif_state: dict = notif # {session_id: "amber"|"red"} + self._project_cards: dict = {} # proj_name -> card widgets self._build_ui() self._start_load() @@ -673,6 +899,16 @@ def _build_ui(self): btn_frame = ctk.CTkFrame(top, fg_color="transparent") btn_frame.pack(side="right", padx=12) + # Time-window filter — affects dashboard totals and sessions list + ctk.CTkLabel(btn_frame, text="Window:", font=ctk.CTkFont(size=11)).pack(side="left", padx=(0, 4)) + self._window_var = ctk.StringVar(value=self._window) + window_seg = ctk.CTkSegmentedButton( + btn_frame, values=["Today", "Week", "Month", "All"], + variable=self._window_var, command=self._on_window_change, + font=ctk.CTkFont(size=11), height=28, + ) + window_seg.pack(side="left", padx=(0, 12)) + # Plan selector self._plan_var = ctk.StringVar(value=self._plan) ctk.CTkLabel(btn_frame, text="Plan:", font=ctk.CTkFont(size=11)).pack(side="left", padx=(0, 4)) @@ -704,6 +940,33 @@ def _build_ui(self): fg_color="#3a7ebf", hover_color="#2b6194", ).pack(side="left") + # Rotate-alert banner — hidden until _fire_rotate_notification surfaces + # one. Placed between the top bar and the tabs so it can't be scrolled + # off the visible area. + self._banner = ctk.CTkFrame(self, height=36, corner_radius=0, fg_color="#e09a1a") + self._banner_msg = ctk.CTkLabel( + self._banner, text="", font=ctk.CTkFont(size=12, weight="bold"), + text_color="#1e1e1e", anchor="w", + ) + self._banner_msg.pack(side="left", padx=12, pady=6, fill="x", expand=True) + self._banner_btn = ctk.CTkButton( + self._banner, text="Jump to session", width=120, height=24, + fg_color="#1e1e1e", hover_color="#333", text_color="#ffffff", + font=ctk.CTkFont(size=11, weight="bold"), + command=self._banner_jump, + ) + self._banner_btn.pack(side="right", padx=(4, 8), pady=6) + self._banner_dismiss = ctk.CTkButton( + self._banner, text="X", width=28, height=24, + fg_color="transparent", hover_color="#3a3a3a", + text_color="#1e1e1e", + font=ctk.CTkFont(size=11, weight="bold"), + command=self._banner_dismiss_click, + ) + self._banner_dismiss.pack(side="right", padx=(0, 6), pady=6) + self._banner_target_sid: str | None = None # which session Jump goes to + # Not packed yet — _show_banner handles pack(fill="x") when firing. + # Tabs self._tabs = ctk.CTkTabview(self, corner_radius=10) self._tabs.pack(fill="both", expand=True, padx=12, pady=(4, 12)) @@ -823,10 +1086,23 @@ def _build_dashboard(self): # Top projects proj_frame = ctk.CTkFrame(parent, corner_radius=10, fg_color="#2b2b2b") proj_frame.pack(fill="both", expand=True, padx=8, pady=(0, 8)) + + proj_header = ctk.CTkFrame(proj_frame, fg_color="transparent") + proj_header.pack(fill="x", padx=12, pady=(10, 4)) ctk.CTkLabel( - proj_frame, text="Cost by Project", + proj_header, text="Cost by Project", font=ctk.CTkFont(size=13, weight="bold"), - ).pack(anchor="w", padx=12, pady=(10, 4)) + ).pack(side="left") + + # Toggle: Top 10 (default) vs Show all. Gets a dynamic label in _render_dashboard. + self._proj_show_all = ctk.BooleanVar(value=False) + self._proj_toggle_btn = ctk.CTkButton( + proj_header, text="Show all", width=90, height=24, + fg_color="#3a3a3a", hover_color="#4a4a4a", + font=ctk.CTkFont(size=11), + command=self._toggle_proj_show_all, + ) + self._proj_toggle_btn.pack(side="right") self._project_breakdown_frame = ctk.CTkFrame(proj_frame, fg_color="transparent") self._project_breakdown_frame.pack(fill="both", expand=True, padx=12, pady=(0, 10)) @@ -849,19 +1125,19 @@ def _build_sessions_tab(self): self._hide_archived_var = ctk.BooleanVar(value=False) ctk.CTkCheckBox( - filt, text="Active only", + filt, text="Recent only (latest per project + <1h active)", variable=self._hide_archived_var, command=self._render_sessions, font=ctk.CTkFont(size=11), ).pack(side="left", padx=(8, 0)) - # Treeview + # Treeview style shared across per-project cards style = ttk.Style() style.theme_use("clam") style.configure("Dark.Treeview", background="#1e1e1e", foreground="#e0e0e0", fieldbackground="#1e1e1e", borderwidth=0, - font=("Segoe UI", 10)) + font=("Segoe UI", 10), rowheight=22) style.configure("Dark.Treeview.Heading", background="#2b2b2b", foreground="#ffffff", font=("Segoe UI", 10, "bold")) @@ -869,49 +1145,44 @@ def _build_sessions_tab(self): background=[("selected", "#3a7ebf")], foreground=[("selected", "#ffffff")]) - cols = ("status", "project", "session_name", "model", "turns", - "tokens", "init", "cost", "waste", "rotate", - "duration", "date") - self._sess_tree = ttk.Treeview( - parent, columns=cols, show="headings", - style="Dark.Treeview", height=20, - ) - - headings = { - "status": ("", 30), - "project": ("Project", 160), - "session_name": ("Session", 220), - "model": ("Model", 120), - "turns": ("Turns", 55), - "tokens": ("Tokens", 80), - "init": ("Init Tok", 70), - "cost": ("Est. Cost", 75), - "waste": ("Waste", 55), - "rotate": ("Rotate", 70), - "duration": ("Duration", 70), - "date": ("Date", 120), - } - numeric_cols = ("turns", "tokens", "init", "cost", "waste", "rotate") - for col, (text, width) in headings.items(): - self._sess_tree.heading( - col, text=text, - command=lambda c=col: self._on_heading_click(c), + # Shared column header — one row at the top of the sessions tab. + # Each per-project card drops its own heading so we save vertical space + # and stop repeating column labels for every project. + header_bar = ctk.CTkFrame(parent, fg_color="#2b2b2b", height=26) + header_bar.pack(fill="x", padx=2, pady=(4, 0)) + header_bar.pack_propagate(False) + x_px = 8 # matches per-card tree's padx + for col_id, text, w, numeric in _SESSION_COLUMNS: + lbl = ctk.CTkLabel( + header_bar, text=text, width=w, height=26, + font=ctk.CTkFont(size=10, weight="bold"), + text_color="#ffffff", + anchor=("e" if numeric else "w"), ) - anchor = "e" if col in numeric_cols else "w" - self._sess_tree.column(col, width=width, anchor=anchor, minwidth=40) + lbl.place(x=x_px, y=0) + x_px += w - self._sess_tree.pack(fill="both", expand=True) - self._sess_tree.bind("", self._on_session_double_click) + # Scrollable container — each project gets its own card inside. + self._sess_scroll = ctk.CTkScrollableFrame( + parent, fg_color="#151515", corner_radius=0, + ) + self._sess_scroll.pack(fill="both", expand=True, padx=2, pady=(0, 2)) hint = ctk.CTkLabel( - parent, text="Double-click a session to see per-turn detail", + parent, + text="Double-click a session for detail · ↑/↓ to reorder projects", font=ctk.CTkFont(size=10), text_color="gray", ) hint.pack(pady=(2, 4)) # ------ Detail tab def _build_detail_tab(self): - parent = self._tab_detail + # Wrap the entire detail body in a scrollable frame so cards + chart + + # tools panel + turn table are reachable on small window sizes. + outer = ctk.CTkScrollableFrame(self._tab_detail, fg_color="transparent") + outer.pack(fill="both", expand=True) + parent = outer + self._detail_scroll = outer self._detail_header = ctk.CTkLabel( parent, text="Select a session from the Sessions tab", @@ -952,6 +1223,7 @@ def _build_detail_tab(self): ("d_cache_read", "Cache Read"), ("d_cache_write", "Cache Write"), ("d_cost", "Total Cost"), + ("d_last_turn", "Last Turn Tok"), ("d_waste", "Waste Factor"), ]): card = ctk.CTkFrame(self._detail_cards_frame, corner_radius=8, fg_color="#2b2b2b") @@ -979,6 +1251,76 @@ def _build_detail_tab(self): v.pack(pady=(0, 6), padx=6) self._detail_card_widgets[key] = v + # Cost composition — where the $ actually went (per token-type). + cost_frame = ctk.CTkFrame(parent, corner_radius=10, fg_color="#2b2b2b") + cost_frame.pack(fill="x", padx=8, pady=4) + ctk.CTkLabel( + cost_frame, text="Cost Composition", + font=ctk.CTkFont(size=12, weight="bold"), + ).pack(anchor="w", padx=12, pady=(8, 2)) + + self._cost_rows: dict[str, dict] = {} # token-type -> {bar, label} + cost_body = ctk.CTkFrame(cost_frame, fg_color="transparent") + cost_body.pack(fill="x", padx=12, pady=(0, 6)) + # Visually distinct swatch per token-type so the reader scans quickly. + palette = { + "input": "#3a7ebf", + "output": "#bf6a3a", + "cache_read": "#2a8a2a", + "cache_write": "#9e6a3a", + } + for key, label in ( + ("input", "Input"), + ("output", "Output"), + ("cache_read", "Cache Read"), + ("cache_write", "Cache Write"), + ): + row = ctk.CTkFrame(cost_body, fg_color="transparent") + row.pack(fill="x", pady=2) + ctk.CTkLabel( + row, text=label, width=95, anchor="w", + font=ctk.CTkFont(size=11), + ).pack(side="left") + bar = ctk.CTkProgressBar( + row, height=12, progress_color=palette[key], + fg_color="#1e1e1e", + ) + bar.set(0.0) + bar.pack(side="left", fill="x", expand=True, padx=(0, 8)) + val = ctk.CTkLabel( + row, text="$0.00 (0%)", width=110, anchor="e", + font=ctk.CTkFont(size=11), + ) + val.pack(side="right") + self._cost_rows[key] = {"bar": bar, "val": val} + + # Turn-cost stats (first / last / avg / peak) — spots session inflation. + stats_frame = ctk.CTkFrame(parent, corner_radius=10, fg_color="#2b2b2b") + stats_frame.pack(fill="x", padx=8, pady=4) + ctk.CTkLabel( + stats_frame, text="Turn Cost Stats", + font=ctk.CTkFont(size=12, weight="bold"), + ).pack(anchor="w", padx=12, pady=(8, 2)) + + turn_stats_body = ctk.CTkFrame(stats_frame, fg_color="transparent") + turn_stats_body.pack(fill="x", padx=12, pady=(0, 10)) + self._turn_stat_widgets: dict[str, ctk.CTkLabel] = {} + for i, (key, label) in enumerate([ + ("first", "First Turn"), + ("last", "Last Turn"), + ("avg", "Avg Turn"), + ("peak", "Peak Turn"), + ]): + cell = ctk.CTkFrame(turn_stats_body, corner_radius=6, fg_color="#1e1e1e") + cell.grid(row=0, column=i, padx=4, pady=2, sticky="nsew") + turn_stats_body.grid_columnconfigure(i, weight=1) + ctk.CTkLabel( + cell, text=label, font=ctk.CTkFont(size=10), text_color="gray", + ).pack(pady=(6, 1), padx=6) + v = ctk.CTkLabel(cell, text="—", font=ctk.CTkFont(size=14, weight="bold")) + v.pack(pady=(0, 6), padx=6) + self._turn_stat_widgets[key] = v + # Cost growth chart (text-based sparkline) chart_frame = ctk.CTkFrame(parent, corner_radius=10, fg_color="#2b2b2b") chart_frame.pack(fill="x", padx=8, pady=4) @@ -1023,7 +1365,9 @@ def _build_detail_tab(self): self._turn_tree.column(col, width=w, anchor=anchor) self._turn_tree.tag_configure("cold", foreground="#ff6666") self._turn_tree.tag_configure("warm", foreground="#888888") - self._turn_tree.pack(fill="both", expand=True, padx=8, pady=(0, 8)) + # fill="x" (not "both" + expand) because we now live inside a + # CTkScrollableFrame — the outer scroll handles vertical overflow. + self._turn_tree.pack(fill="x", padx=8, pady=(0, 8)) # ------------------------------------------------------------------ Data loading def _start_load(self): @@ -1035,21 +1379,32 @@ def _start_load(self): def _bg_load(self): sessions = load_all_sessions() - active = _get_active_session_ids() - self.after(0, lambda: self._on_loaded(sessions, active)) + # Time-based liveness: PID-based detection was too sticky, leaving + # LIVE badges on rows long after the CLI actually exited. + self.after(0, lambda: self._on_loaded(sessions)) - def _on_loaded(self, sessions, active): + def _on_loaded(self, sessions, _legacy=None): self._sessions = sessions - self._active_ids = active + self._active_ids = { + s["session_id"] for s in sessions + if _is_recently_active(s.get("last_timestamp")) + } self._loading = False total = len(sessions) + in_window = sum(1 for s in sessions if self._session_in_window(s)) + window_str = self._window + if window_str == "All": + status = f"{total} sessions" + else: + status = f"{in_window} in {window_str} (of {total})" self._status_label.configure( - text=f"{total} sessions loaded | Last refresh: {datetime.now().strftime('%H:%M:%S')}" + text=f"{status} | Last refresh: {datetime.now().strftime('%H:%M:%S')}" ) self._render_dashboard() self._render_sessions() + self._scan_rotate_notifications() # Schedule next auto-refresh if self._auto_refresh: @@ -1073,6 +1428,49 @@ def _on_plan_change(self, _val=None): self._plan = self._plan_var.get() self._render_dashboard() + def _toggle_proj_show_all(self): + self._proj_show_all.set(not self._proj_show_all.get()) + self._render_dashboard() + + def _on_window_change(self, _val=None): + self._window = self._window_var.get() + self._render_dashboard() + self._render_sessions() + + @staticmethod + def _window_cutoff(window: str) -> datetime | None: + """UTC cutoff timestamp for a time-window label. None = no filter.""" + if window == "All": + return None + now = datetime.now(timezone.utc) + if window == "Today": + return now.replace(hour=0, minute=0, second=0, microsecond=0) + if window == "Week": + return now - timedelta(days=7) + if window == "Month": + return now - timedelta(days=30) + return None + + def _session_in_window(self, s: dict) -> bool: + """True if the session's last activity falls within the active window.""" + cutoff = self._window_cutoff(self._window) + if cutoff is None: + return True + ts = _parse_timestamp(s.get("last_timestamp")) + if ts is None: + return False + return ts >= cutoff + + def _window_months(self) -> float: + """Fraction of a month covered by the active window (for plan-cost scaling).""" + if self._window == "Today": + return 1.0 / 30.0 + if self._window == "Week": + return 7.0 / 30.0 + if self._window == "Month": + return 1.0 + return self._months_spanned() + @staticmethod def _plan_monthly_cost(plan: str) -> float | None: """Return monthly $ cost for a plan, or None for API.""" @@ -1103,16 +1501,19 @@ def _months_spanned(self) -> float: # ------------------------------------------------------------------ Renderers def _render_dashboard(self): - api_value = sum(s["total_cost"] for s in self._sessions) - total_turns = sum(s["assistant_turns"] for s in self._sessions) + # Apply the time-window filter to the dashboard view. + windowed = [s for s in self._sessions if self._session_in_window(s)] + + api_value = sum(s["total_cost"] for s in windowed) + total_turns = sum(s["assistant_turns"] for s in windowed) total_tokens = sum( s["total_input"] + s["total_output"] + s["total_cache_read"] + s["total_cache_write"] - for s in self._sessions + for s in windowed ) - # Plan calculations + # Plan calculations — scale by the active window (Today / Week / Month / All). monthly = self._plan_monthly_cost(self._plan) - months = self._months_spanned() + months = self._window_months() if monthly is not None: plan_total = monthly * months @@ -1138,19 +1539,19 @@ def _render_dashboard(self): self._card_widgets["savings"].configure(text="N/A", text_color="gray") self._card_widgets["value_ratio"].configure(text="N/A", text_color="gray") - self._card_widgets["total_sessions"].configure(text=str(len(self._sessions))) + self._card_widgets["total_sessions"].configure(text=str(len(windowed))) self._card_widgets["total_turns"].configure(text=f"{total_turns:,}") self._card_widgets["total_tokens"].configure(text=_format_tokens(total_tokens)) # Peak hours - self._render_peak_hours() + self._render_peak_hours(windowed) # Model breakdown for w in self._model_breakdown_frame.winfo_children(): w.destroy() model_costs: dict[str, float] = {} - for s in self._sessions: + for s in windowed: for m in s["models_used"]: model_costs[m] = model_costs.get(m, 0) + s["total_cost"] if not model_costs: @@ -1175,7 +1576,7 @@ def _render_dashboard(self): tool_costs: dict[str, float] = {} tool_calls: dict[str, int] = {} - for s in self._sessions: + for s in windowed: for name, st in (s.get("tool_stats") or {}).items(): tool_costs[name] = tool_costs.get(name, 0.0) + st["est_cost"] tool_calls[name] = tool_calls.get(name, 0) + st["calls"] @@ -1202,41 +1603,66 @@ def _render_dashboard(self): info = f"~{_format_cost(cost)} ({tool_calls[name]} calls)" ctk.CTkLabel(row, text=info, font=ctk.CTkFont(size=11)).pack(side="left", padx=4) - # Project breakdown + # Project breakdown (2-col grid; Top 10 by default, Show all toggle) for w in self._project_breakdown_frame.winfo_children(): w.destroy() proj_costs: dict[str, float] = {} proj_sessions: dict[str, int] = {} proj_cwd: dict[str, str | None] = {} - for s in self._sessions: + for s in windowed: p = s["project"] proj_costs[p] = proj_costs.get(p, 0) + s["total_cost"] proj_sessions[p] = proj_sessions.get(p, 0) + 1 if p not in proj_cwd: proj_cwd[p] = s.get("cwd") - max_pc = max(proj_costs.values()) if proj_costs else 1 - for proj, cost in sorted(proj_costs.items(), key=lambda x: -x[1]): + ranked = sorted(proj_costs.items(), key=lambda x: -x[1]) + total_projects = len(ranked) + show_all = self._proj_show_all.get() + visible = ranked if show_all else ranked[:10] + + # Update toggle label with accurate count + if total_projects > 10: + self._proj_toggle_btn.configure( + text=f"Top 10" if show_all else f"Show all ({total_projects})" + ) + self._proj_toggle_btn.pack(side="right") + else: + # Nothing to toggle — hide the button. + self._proj_toggle_btn.pack_forget() + + max_pc = max((c for _, c in visible), default=1) + self._project_breakdown_frame.grid_columnconfigure(0, weight=1, uniform="pcol") + self._project_breakdown_frame.grid_columnconfigure(1, weight=1, uniform="pcol") + for idx, (proj, cost) in enumerate(visible): row = ctk.CTkFrame(self._project_breakdown_frame, fg_color="transparent") - row.pack(fill="x", pady=2) + row.grid(row=idx // 2, column=idx % 2, sticky="ew", padx=4, pady=2) friendly = _friendly_project(proj, proj_cwd.get(proj)) - ctk.CTkLabel(row, text=friendly, font=ctk.CTkFont(size=11), width=220, anchor="w").pack(side="left") + ctk.CTkLabel( + row, text=friendly, font=ctk.CTkFont(size=11), + width=160, anchor="w", + ).pack(side="left") - bar_width = max(4, int(250 * (cost / max_pc))) if max_pc > 0 else 4 - bar = ctk.CTkFrame(row, width=bar_width, height=16, corner_radius=4, fg_color="#bf6a3a") - bar.pack(side="left", padx=(8, 4)) + bar_width = max(4, int(140 * (cost / max_pc))) if max_pc > 0 else 4 + bar = ctk.CTkFrame(row, width=bar_width, height=14, corner_radius=4, fg_color="#bf6a3a") + bar.pack(side="left", padx=(6, 4)) bar.pack_propagate(False) - info = f"{_format_cost(cost)} ({proj_sessions[proj]} sessions)" - ctk.CTkLabel(row, text=info, font=ctk.CTkFont(size=11)).pack(side="left", padx=4) + info = f"{_format_cost(cost)} ({proj_sessions[proj]})" + ctk.CTkLabel(row, text=info, font=ctk.CTkFont(size=11)).pack(side="left", padx=2) + + def _render_peak_hours(self, sessions: list[dict] | None = None): + """Draw 24-bar chart of total cost per hour-of-day and update peak pill. - def _render_peak_hours(self): - """Draw 24-bar chart of total cost per hour-of-day and update peak pill.""" + If `sessions` is omitted, aggregate across everything; the dashboard + passes a window-filtered subset so the peak view honors Today/Week/Month. + """ + sessions = sessions if sessions is not None else self._sessions # Aggregate cost per local hour across every turn hour_costs = [0.0] * 24 - for s in self._sessions: + for s in sessions: for tc in s["turn_costs"]: ts_str, cost = tc[0], tc[1] ts = _parse_timestamp(ts_str) @@ -1312,18 +1738,33 @@ def _render_peak_hours(self): ) def _render_sessions(self): - for item in self._sess_tree.get_children(): - self._sess_tree.delete(item) + """Reconcile-by-key: update existing cards in place, only create/destroy deltas. + + Destroy-rebuild reset the outer scroll position every 30s whenever a + new project appeared. Now we diff self._project_cards against the + freshly-computed order, so scroll position is preserved. + """ + # Belt-and-braces: save and restore outer scroll position (T7). + try: + saved_yview = self._sess_scroll._parent_canvas.yview() + except Exception: + saved_yview = None query = self._sess_search_var.get().strip().lower() - hide_archived = self._hide_archived_var.get() + recent_only = self._hide_archived_var.get() - # Build rows with both display values and raw sortable values - rows = [] + # "Recent only" = latest session per project OR <1h since last activity. + latest_ids = _latest_session_id_per_project(self._sessions) + + # Filter + enrich (with time-window filter from T9). + enriched = [] for s in self._sessions: - is_active = s["session_id"] in self._active_ids + if not self._session_in_window(s): + continue + is_live = s["session_id"] in self._active_ids # time-based (<1h) + is_latest = s["session_id"] in latest_ids - if hide_archived and not is_active: + if recent_only and not (is_live or is_latest): continue proj = _friendly_project(s["project"], s.get("cwd")) @@ -1332,121 +1773,440 @@ def _render_sessions(self): if query and query not in proj.lower() and query not in model.lower() and query not in sess_name.lower(): continue - status = "LIVE" if is_active else "" - tok_in = s["total_input"] - tok_out = s["total_output"] - tok_cr = s["total_cache_read"] - tok_cw = s["total_cache_write"] + + enriched.append({ + "s": s, "proj": proj, "model": model, + "sess_name": sess_name, "is_live": is_live, + }) + + # Bucket by project + buckets: dict[str, list[dict]] = {} + for item in enriched: + buckets.setdefault(item["proj"], []).append(item) + + # Resolve display order: user-saved first, then remaining by total cost desc. + saved = [p for p in self._project_order if p in buckets] + remaining = sorted( + [p for p in buckets if p not in saved], + key=lambda p: sum(i["s"]["total_cost"] for i in buckets[p]), + reverse=True, + ) + ordered = saved + remaining + + # Persist (merges newly-seen projects into the saved order). + if ordered != self._project_order: + self._project_order = ordered + _save_project_state( + ordered, sorted(self._collapsed_projects), self._notif_state, + ) + + # --- Reconcile --- + # Destroy cards for projects that no longer appear. + for proj in list(self._project_cards.keys()): + if proj not in buckets: + self._project_cards[proj]["card"].destroy() + self._project_cards.pop(proj, None) + + # Create or update cards, in order. + total = len(ordered) + for index, proj in enumerate(ordered): + items = buckets[proj] + if proj in self._project_cards: + self._update_project_card(proj, items, index, total) + else: + self._build_project_card(proj, items, index, total) + + # Re-pack in desired order. pack_forget + pack preserves widget + # identity (scroll, selection, etc.) and only reshuffles layout. + for proj in ordered: + card = self._project_cards[proj]["card"] + card.pack_forget() + card.pack(fill="x", pady=6, padx=4) + + # Restore scroll position after layout settles. + if saved_yview is not None: + def _restore(): + try: + self._sess_scroll._parent_canvas.yview_moveto(saved_yview[0]) + except Exception: + pass + self.after_idle(_restore) + + def _build_project_card(self, proj: str, items: list[dict], + index: int, total: int) -> None: + """Build one project card (header + embedded Treeview). Stores widget + handles in self._project_cards[proj] so _update_project_card can refresh + in place without destroy+rebuild. + """ + card = ctk.CTkFrame( + self._sess_scroll, fg_color="#1e1e1e", + corner_radius=10, border_width=1, border_color="#333", + ) + card.pack(fill="x", pady=6, padx=4) + + # ---- Header + header = ctk.CTkFrame(card, fg_color="transparent") + header.pack(fill="x", padx=10, pady=(8, 4)) + + # Collapse toggle (T8). Keeps tree hidden when user prefers. + collapse_btn = ctk.CTkButton( + header, text="▼", width=24, height=24, + fg_color="transparent", hover_color="#3a3a3a", + font=ctk.CTkFont(size=11), + command=lambda p=proj: self._toggle_project_collapsed(p), + ) + collapse_btn.pack(side="left", padx=(0, 4)) + + title_lbl = ctk.CTkLabel( + header, text=proj, + font=ctk.CTkFont(size=14, weight="bold"), + text_color="#ffd479", + ) + title_lbl.pack(side="left") + + meta_lbl = ctk.CTkLabel( + header, text="", + font=ctk.CTkFont(size=11), text_color="gray", + ) + meta_lbl.pack(side="left") + + live_lbl = ctk.CTkLabel( + header, text=" LIVE", + font=ctk.CTkFont(size=11, weight="bold"), + text_color="#44ee44", + ) + # Packed/unpacked in _update_project_card based on any_live. + + # Reorder arrows (rightmost) + down_btn = ctk.CTkButton( + header, text="↓", width=26, height=26, + fg_color="#2b2b2b", hover_color="#3a3a3a", + command=lambda p=proj: self._move_project(p, +1), + ) + down_btn.pack(side="right", padx=2) + up_btn = ctk.CTkButton( + header, text="↑", width=26, height=26, + fg_color="#2b2b2b", hover_color="#3a3a3a", + command=lambda p=proj: self._move_project(p, -1), + ) + up_btn.pack(side="right", padx=2) + + # ---- Body: Treeview (columns come from shared _SESSION_COLUMNS so the + # hoisted header bar stays pixel-aligned). show="" hides the per-card + # heading row — we rely on the shared bar above _sess_scroll instead. + cols = tuple(c[0] for c in _SESSION_COLUMNS) + tree = ttk.Treeview( + card, columns=cols, show="", + style="Dark.Treeview", height=1, + ) + for col_id, _text, w, numeric in _SESSION_COLUMNS: + tree.column( + col_id, width=w, minwidth=40, + anchor=("e" if numeric else "w"), + stretch=False, + ) + + tree.pack(fill="x", padx=8, pady=(0, 8)) + tree.bind("", self._on_session_double_click) + tree.tag_configure("active", foreground="#44ee44") + tree.tag_configure("rot_red", background="#4a1a1a") + tree.tag_configure("rot_amber", background="#4a3a1a") + tree.tag_configure("group_model", background="#222222", foreground="#89c2ff") + + self._project_cards[proj] = { + "card": card, + "header": header, + "title": title_lbl, + "meta": meta_lbl, + "live": live_lbl, + "up": up_btn, + "down": down_btn, + "collapse": collapse_btn, + "tree": tree, + "tree_multi_model": False, # show style may need swap on update + } + self._update_project_card(proj, items, index, total) + + def _update_project_card(self, proj: str, items: list[dict], + index: int, total: int) -> None: + """Refresh an existing project card in place without destroying widgets. + + Preserves outer scroll position: we only mutate text, state, and + Treeview rows — never destroy the card frame. + """ + handles = self._project_cards[proj] + card = handles["card"] + meta_lbl = handles["meta"] + live_lbl = handles["live"] + up_btn = handles["up"] + down_btn = handles["down"] + tree = handles["tree"] + + n = len(items) + turns = sum(i["s"]["assistant_turns"] for i in items) + tokens = sum( + i["s"]["total_input"] + i["s"]["total_output"] + + i["s"]["total_cache_read"] + i["s"]["total_cache_write"] + for i in items + ) + cost = sum(i["s"]["total_cost"] for i in items) + any_live = any(i["is_live"] for i in items) + + meta = ( + f" · {n} session{'s' if n != 1 else ''}" + f" · {turns} turns" + f" · {_format_tokens(tokens)} tok" + f" · {_format_cost(cost)}" + ) + meta_lbl.configure(text=meta) + + if any_live: + # Pack after the meta label (which is `side="left"`), before the ↑↓ buttons. + if not live_lbl.winfo_ismapped(): + live_lbl.pack(side="left") + else: + if live_lbl.winfo_ismapped(): + live_lbl.pack_forget() + + up_btn.configure(state="disabled" if index == 0 else "normal") + down_btn.configure(state="disabled" if index == total - 1 else "normal") + + # Border tint by max live rotate score (T13). + card.configure(border_color=self._project_border_color(items)) + + # Re-populate the tree contents. We keep the Treeview widget alive + # so its own state (scroll/selection) is intact within the card. + self._populate_project_tree(tree, proj, items) + + # Collapse state (T8) — honor user preference. + if proj in self._collapsed_projects: + if tree.winfo_ismapped(): + tree.pack_forget() + handles["collapse"].configure(text="▶") + else: + if not tree.winfo_ismapped(): + tree.pack(fill="x", padx=8, pady=(0, 8)) + handles["collapse"].configure(text="▼") + + def _populate_project_tree(self, tree: "ttk.Treeview", proj: str, + items: list[dict]) -> None: + """Clear and re-fill the per-project Treeview with current items. + + Flat list (no model-grouping) — the model is shown in its own column, + which keeps every row aligned with the hoisted header bar. + """ + # Preserve selection inside this card if it survives the refresh. + prior_sel = tree.selection() + prior_focus = tree.focus() + + # show="" is enforced at build time; height follows row count. + tree.configure(height=max(1, len(items))) + + # Clear old rows (flat — no nested children after the switch). + for iid in tree.get_children(): + tree.delete(iid) + + # Newest first inside each project card. + items_sorted = sorted( + items, key=lambda i: i["s"]["last_timestamp"] or "", reverse=True, + ) + + for i in items_sorted: + s = i["s"] + is_live = i["is_live"] + tok_in = s["total_input"]; tok_out = s["total_output"] + tok_cr = s["total_cache_read"]; tok_cw = s["total_cache_write"] + total_tokens = tok_in + tok_cr + tok_cw + tok_out + wf = _waste_factor(s["turn_costs"]) waste_str = f"{wf:.1f}x" if wf is not None else "—" init_tokens = _turn_total_tokens(s["turn_costs"][0]) if s["turn_costs"] else 0 + last_turn_tokens = _turn_total_tokens(s["turn_costs"][-1]) if s["turn_costs"] else 0 duration = _duration_str(s["first_timestamp"], s["last_timestamp"]) - total_tokens = tok_in + tok_cr + tok_cw + tok_out - - # Rotation signal: bar + % per row; colored tag for amber/red rot_sub = _rotate_subscores(s) if rot_sub: - rot_score = rot_sub["total"] - rot_display = _rotate_bar(rot_score) - rot_tag_name = _rotate_tag(rot_score) + rot_display = _rotate_bar(rot_sub["total"]) + rot_tag_name = _rotate_tag(rot_sub["total"]) else: - rot_score = -1 rot_display = "" rot_tag_name = None date_str = "" ts = _parse_timestamp(s["last_timestamp"]) if ts: - local = ts.astimezone(LOCAL_TZ) - date_str = local.strftime("%Y-%m-%d %H:%M") + date_str = ts.astimezone(LOCAL_TZ).strftime("%Y-%m-%d %H:%M") - row_tags = [] - if is_active: - row_tags.append("active") + tags = [] + if is_live: + tags.append("active") if rot_tag_name: - row_tags.append(rot_tag_name) - tags = tuple(row_tags) - - display = (status, proj, sess_name, model, s["assistant_turns"], - _format_tokens(total_tokens), - _format_tokens(init_tokens), - _format_cost(s["total_cost"]), - waste_str, rot_display, duration, date_str) - - # Raw values for sorting (numeric where applicable) - sort_vals = { - "status": (0 if is_active else 1), - "project": proj.lower(), - "session_name": sess_name.lower(), - "model": model.lower(), - "turns": s["assistant_turns"], - "tokens": total_tokens, - "init": init_tokens, - "cost": s["total_cost"], - "waste": wf if wf is not None else 0, - "rotate": rot_score, - "duration": (ts.timestamp() if ts else 0) - (_parse_timestamp(s["first_timestamp"]).timestamp() if _parse_timestamp(s["first_timestamp"]) else 0), - "date": s["last_timestamp"] or "", - } - - rows.append((s["session_id"], display, tags, sort_vals)) - - # Apply current sort - col, reverse = self._sort_col, self._sort_reverse - if col: - rows.sort(key=lambda r: r[3].get(col, ""), reverse=reverse) - - for sid, display, tags, _ in rows: - self._sess_tree.insert("", "end", iid=sid, values=display, tags=tags) - - self._sess_tree.tag_configure("active", foreground="#44ee44") - # Rotation status — background tint, compatible with "active" foreground - self._sess_tree.tag_configure("rot_red", background="#4a1a1a") - self._sess_tree.tag_configure("rot_amber", background="#4a3a1a") - - def _on_heading_click(self, col): - """Sort sessions table by clicked column header.""" - if self._sort_col == col: - self._sort_reverse = not self._sort_reverse + tags.append(rot_tag_name) + + display = ( + "LIVE" if is_live else "", + i["sess_name"], i["model"], s["assistant_turns"], + _format_tokens(total_tokens), + _format_tokens(init_tokens), + _format_cost(s["total_cost"]), + _format_tokens(last_turn_tokens), + waste_str, rot_display, duration, date_str, + ) + tree.insert( + "", "end", iid=s["session_id"], + text="", values=display, tags=tuple(tags), + ) + + # Restore selection if the iid still exists. + try: + survivors = [iid for iid in prior_sel if tree.exists(iid)] + if survivors: + tree.selection_set(survivors) + if prior_focus and tree.exists(prior_focus): + tree.focus(prior_focus) + except Exception: + pass + + def _project_border_color(self, items: list[dict]) -> str: + """Ambient rotation border: reflect worst LIVE rotate score for this project.""" + worst = 0.0 + for i in items: + if not i["is_live"]: + continue + sub = _rotate_subscores(i["s"]) + if sub is None: + continue + if sub["total"] > worst: + worst = sub["total"] + if worst >= _ROTATE_RED: + return "#cc3333" + if worst >= _ROTATE_AMBER: + return "#e09a1a" + return "#333" + + def _toggle_project_collapsed(self, proj: str) -> None: + if proj in self._collapsed_projects: + self._collapsed_projects.discard(proj) else: - self._sort_col = col - self._sort_reverse = False - - # Update heading arrows - cols = ("status", "project", "session_name", "model", "turns", - "tokens", "init", "cost", "waste", "rotate", - "duration", "date") - base_headings = { - "status": "", "project": "Project", "session_name": "Session", - "model": "Model", "turns": "Turns", - "tokens": "Tokens", - "init": "Init Tok", - "cost": "Est. Cost", "waste": "Waste", - "rotate": "Rotate", - "duration": "Duration", "date": "Date", - } - for c in cols: - arrow = "" - if c == self._sort_col: - arrow = " v" if self._sort_reverse else " ^" - self._sess_tree.heading(c, text=base_headings[c] + arrow) + self._collapsed_projects.add(proj) + _save_project_state( + self._project_order, + sorted(self._collapsed_projects), + self._notif_state, + ) + self._render_sessions() + def _move_project(self, proj: str, delta: int) -> None: + """Swap project with its neighbour in the saved order, then re-render.""" + order = list(self._project_order) + if proj not in order: + return + idx = order.index(proj) + new_idx = idx + delta + if new_idx < 0 or new_idx >= len(order): + return + order[idx], order[new_idx] = order[new_idx], order[idx] + self._project_order = order + _save_project_state(order, sorted(self._collapsed_projects), self._notif_state) self._render_sessions() + # ------------------------------------------------------------------ Rotate banner + toast + def _show_banner(self, level: str, proj: str, session_id: str) -> None: + """Display the top-of-window rotate banner targeting a specific session.""" + color = "#cc3333" if level == "red" else "#e09a1a" + label = "ROTATE NOW" if level == "red" else "CONSIDER ROTATING" + self._banner.configure(fg_color=color) + self._banner_msg.configure(text=f"{label} — {proj}") + self._banner_target_sid = session_id + try: + self._banner.pack(fill="x", before=self._tabs) + except Exception: + # Fallback if _tabs isn't packed yet; banner simply won't show. + pass + + def _banner_jump(self) -> None: + sid = self._banner_target_sid + if not sid: + return + session = next((s for s in self._sessions if s["session_id"] == sid), None) + if session is None: + self._banner_dismiss_click() + return + self._show_session_detail(session) + self._tabs.set("Session Detail") + self._banner_dismiss_click() + + def _banner_dismiss_click(self) -> None: + self._banner_target_sid = None + try: + self._banner.pack_forget() + except Exception: + pass + + def _fire_toast(self, level: str, proj: str, explanation: str) -> None: + """OS-level toast via winotify. No-op if the dep is missing.""" + if not _HAS_TOAST: + return + try: + label = "ROTATE NOW" if level == "red" else "Consider rotating" + toast = _WinotifyNotification( + app_id="Claude Usage Monitor", + title=f"{label} — {proj}", + msg=explanation, + ) + toast.show() + except Exception: + # Never let a flaky toast crash the refresh loop. + pass + + def _scan_rotate_notifications(self) -> None: + """Walk LIVE sessions, fire dedup'd toasts + banner on threshold upgrades.""" + live_sessions = [s for s in self._sessions if s["session_id"] in self._active_ids] + live_ids = {s["session_id"] for s in live_sessions} + _evict_inactive_notifs(self._notif_state, live_ids) + + # Find the single highest-tier unfired upgrade to surface in the banner. + # Multiple hot sessions are still each toasted; banner shows the worst. + banner_candidate: tuple[int, dict, str] | None = None # (tier, session, level) + fired = False + + for s in live_sessions: + sub = _rotate_subscores(s) + if sub is None: + continue + level = _rotate_level(sub["total"]) + sid = s["session_id"] + if _should_notify(self._notif_state, sid, level): + proj = _friendly_project(s["project"], s.get("cwd")) + self._fire_toast(level, proj, _rotate_explanation(sub)) + _record_notified(self._notif_state, sid, level) + fired = True + tier = _NOTIF_TIERS[level] + if banner_candidate is None or tier > banner_candidate[0]: + banner_candidate = (tier, s, level) + + if banner_candidate is not None: + _, s, level = banner_candidate + proj = _friendly_project(s["project"], s.get("cwd")) + self._show_banner(level, proj, s["session_id"]) + + if fired: + _save_project_state( + self._project_order, + sorted(self._collapsed_projects), + self._notif_state, + ) + def _on_session_double_click(self, event): - sel = self._sess_tree.selection() + # Each project card owns its own Treeview — find the one that fired. + tree = event.widget + sel = tree.selection() if not sel: return sid = sel[0] - session = None - for s in self._sessions: - if s["session_id"] == sid: - session = s - break + session = next((s for s in self._sessions if s["session_id"] == sid), None) if not session: return - self._show_session_detail(session) self._tabs.set("Session Detail") @@ -1493,6 +2253,29 @@ def _show_session_detail(self, s: dict): self._detail_card_widgets["d_cache_write"].configure(text=_format_tokens(s["total_cache_write"])) self._detail_card_widgets["d_cost"].configure(text=_format_cost(s["total_cost"])) + last_turn_tokens = _turn_total_tokens(s["turn_costs"][-1]) if s["turn_costs"] else 0 + self._detail_card_widgets["d_last_turn"].configure(text=_format_tokens(last_turn_tokens)) + + # Cost composition + turn stats + comp = _cost_composition(s["turn_costs"]) + comp_total = comp["total"] or 1e-9 # avoid div-by-zero when session is empty + for key in ("input", "output", "cache_read", "cache_write"): + amt = comp[key] + pct = amt / comp_total + row = self._cost_rows[key] + row["bar"].set(pct) + row["val"].configure(text=f"{_format_cost(amt)} ({pct * 100:.0f}%)") + + stats = _turn_cost_stats(s["turn_costs"]) + if stats is None: + for w in self._turn_stat_widgets.values(): + w.configure(text="—") + else: + self._turn_stat_widgets["first"].configure(text=_format_cost(stats["first"])) + self._turn_stat_widgets["last"].configure(text=_format_cost(stats["last"])) + self._turn_stat_widgets["avg"].configure(text=_format_cost(stats["avg"])) + self._turn_stat_widgets["peak"].configure(text=_format_cost(stats["peak"])) + wf = _waste_factor(s["turn_costs"]) if wf is not None: waste_text = f"{wf:.1f}x" diff --git a/tools/decision_dice.py b/tools/decision_dice.py index d34f757..b5b7cf1 100644 --- a/tools/decision_dice.py +++ b/tools/decision_dice.py @@ -23,8 +23,12 @@ import customtkinter as ctk from datetime import datetime +from tools._common.exceptions import narrow_excepts +from tools._common.logging import get_logger + TOOL_NAME = "Decision Dice" TOOL_DESCRIPTION = "Premium weighted RNG dice — custom profiles, 3D animation, decision journal" +log = get_logger(__name__) # ─── Paths ──────────────────────────────────────────────────────────────────── @@ -44,7 +48,8 @@ def _beep_thread(freq, dur): if HAS_SOUND: try: winsound.Beep(freq, dur) - except Exception: + except (OSError, RuntimeError, ValueError): + # winsound.Beep can raise on invalid frequency or audio subsystem absence. pass def play_sound(kind: str): @@ -114,16 +119,22 @@ def load_profiles(): try: with open(PROFILES_PATH, "r") as f: return json.load(f) - except Exception: + except (OSError, json.JSONDecodeError, UnicodeDecodeError): + # Corrupt or unreadable file → fall back to defaults. pass return dict(DEFAULT_PROFILES) + +@narrow_excepts(OSError, TypeError) def save_profiles(profiles): - try: - with open(PROFILES_PATH, "w") as f: - json.dump(profiles, f, indent=2) - except Exception: - pass + """Persist dice profiles JSON; swallow write errors silently. + + Caught: ``OSError`` (disk full, permission), ``TypeError`` (non- + serializable value slipped in). Profiles persistence is best-effort + and must not crash the UI thread. + """ + with open(PROFILES_PATH, "w") as f: + json.dump(profiles, f, indent=2) # ─── Journal persistence ───────────────────────────────────────────────────── @@ -133,16 +144,21 @@ def load_journal(): try: with open(JOURNAL_PATH, "r") as f: return json.load(f) - except Exception: + except (OSError, json.JSONDecodeError, UnicodeDecodeError): + # Corrupt or unreadable file → return empty history. pass return [] + +@narrow_excepts(OSError, TypeError) def save_journal(entries): - try: - with open(JOURNAL_PATH, "w") as f: - json.dump(entries[-500:], f, indent=2) - except Exception: - pass + """Persist decision journal JSON; swallow write errors silently. + + Same policy as :func:`save_profiles` — best-effort persistence that + must not crash the UI. + """ + with open(JOURNAL_PATH, "w") as f: + json.dump(entries[-500:], f, indent=2) # ─── 3D Dice Renderer ──────────────────────────────────────────────────────── @@ -1105,7 +1121,8 @@ def run_tool(): root.protocol("WM_DELETE_WINDOW", app.force_stop) if tk._default_root is None: root.mainloop() - except Exception as e: + except Exception as e: # noqa: BLE001 - boundary: surface any startup fault to UI + log.exception("Decision Dice startup failed") from tkinter import messagebox messagebox.showerror("Decision Dice", f"Startup error:\n{e}") diff --git a/tools/ffmpeg_studio.py b/tools/ffmpeg_studio.py index c2ef29a..2675f6e 100644 --- a/tools/ffmpeg_studio.py +++ b/tools/ffmpeg_studio.py @@ -15,11 +15,68 @@ import threading import time import tkinter as tk -import customtkinter as ctk +from pathlib import Path from tkinter import filedialog, messagebox, scrolledtext +import customtkinter as ctk + +from tools._common.config import get_path +from tools._common.exceptions import narrow_excepts +from tools._common.logging import get_logger + + +class OutputDirError(ValueError): + """Raised when an output directory fails containment validation.""" + + +def _validate_output_dir(raw, allowed_bases=None): + """Validate a user-supplied output directory. + + Blocks the two defenses the attack needs: traversal segments (``..``) + and relative paths. If ``allowed_bases`` is given (tests / PoCs), the + resolved path must sit under one of them. + + Returns the resolved :class:`pathlib.Path`. Raises + :class:`OutputDirError` on any violation — callers should catch it + and surface a user-friendly error (messagebox). + """ + if raw is None or not str(raw).strip(): + raise OutputDirError("Output folder is empty.") + + raw_str = str(raw).strip() + + if ".." in Path(raw_str).parts: + raise OutputDirError( + "Output folder cannot contain '..' path segments." + ) + + expanded = Path(raw_str).expanduser() + if not expanded.is_absolute(): + raise OutputDirError("Output folder must be an absolute path.") + + try: + candidate = expanded.resolve() + except (OSError, RuntimeError) as e: + raise OutputDirError(f"Cannot resolve output folder: {e}") from e + + if allowed_bases: + bases = [Path(b).expanduser().resolve() for b in allowed_bases] + for base in bases: + try: + candidate.relative_to(base) + return candidate + except ValueError: + continue + raise OutputDirError( + "Output folder must be inside one of: " + + ", ".join(str(b) for b in bases) + ) + + return candidate + TOOL_NAME = "FFmpeg Studio" TOOL_DESC = "Record gameplay and convert videos using FFmpeg" +log = get_logger(__name__) # ── Presets ─────────────────────────────────────────────────────────────────── PRESETS = { @@ -172,19 +229,24 @@ def _guess_ratio(w, h): ] # ── FFmpeg capability detection ─────────────────────────────────────────────── +@narrow_excepts( + OSError, subprocess.SubprocessError, subprocess.TimeoutExpired, + default=False, +) def _test_encoder(encoder_name, pix_fmt="yuv420p"): - """Actually run a 0.1-second dummy encode to confirm the encoder works.""" - try: - r = subprocess.run( - ["ffmpeg", "-f", "lavfi", "-i", "nullsrc=s=320x240", - "-t", "0.1", "-vcodec", encoder_name, - "-pix_fmt", pix_fmt, "-f", "null", "-"], - capture_output=True, timeout=10, - creationflags=subprocess.CREATE_NO_WINDOW, - ) - return r.returncode == 0 - except Exception: - return False + """Actually run a 0.1-second dummy encode to confirm the encoder works. + + Returns ``False`` on any subprocess/OS failure (caught by the + decorator) so capability probes never raise into the UI thread. + """ + r = subprocess.run( + ["ffmpeg", "-f", "lavfi", "-i", "nullsrc=s=320x240", + "-t", "0.1", "-vcodec", encoder_name, + "-pix_fmt", pix_fmt, "-f", "null", "-"], + capture_output=True, timeout=10, + creationflags=subprocess.CREATE_NO_WINDOW, + ) + return r.returncode == 0 def _probe_hardware(): """ @@ -198,7 +260,7 @@ def _probe_hardware(): r = subprocess.run(["ffmpeg", "-encoders"], capture_output=True, text=True, creationflags=subprocess.CREATE_NO_WINDOW) listed = r.stdout + r.stderr - except Exception: + except (OSError, subprocess.SubprocessError): listed = "" gpu_options = [] @@ -250,27 +312,30 @@ def _probe_hardware(): return gpu_options, gpu_label_to_key, hevc_ok, best_gpu_key +@narrow_excepts(OSError, subprocess.SubprocessError, default=[]) def _detect_audio_devices(): - try: - r = subprocess.run( - ["ffmpeg", "-list_devices", "true", "-f", "dshow", "-i", "dummy"], - capture_output=True, text=True, - creationflags=subprocess.CREATE_NO_WINDOW, - ) - devices, in_audio = [], False - for line in r.stderr.splitlines(): - if "DirectShow audio devices" in line: - in_audio = True - continue - if "DirectShow video devices" in line: - in_audio = False - if in_audio: - m = re.search(r'"([^"]+)"', line) - if m and "Alternative name" not in line: - devices.append(m.group(1)) - return devices or [] - except Exception: - return [] + """Enumerate DShow audio input devices via ``ffmpeg -list_devices``. + + Returns an empty list on subprocess/OS failure (caught by the + decorator). + """ + r = subprocess.run( + ["ffmpeg", "-list_devices", "true", "-f", "dshow", "-i", "dummy"], + capture_output=True, text=True, + creationflags=subprocess.CREATE_NO_WINDOW, + ) + devices, in_audio = [], False + for line in r.stderr.splitlines(): + if "DirectShow audio devices" in line: + in_audio = True + continue + if "DirectShow video devices" in line: + in_audio = False + if in_audio: + m = re.search(r'"([^"]+)"', line) + if m and "Alternative name" not in line: + devices.append(m.group(1)) + return devices or [] def _gpu_key_from_label(label): l = label.lower() @@ -347,7 +412,8 @@ def run_tool(): if gpu_label_to_key.get(g) == best_gpu_key), gpu_options[0]) # ── variables ──────────────────────────────────────────────────────────── - output_dir_var = ctk.StringVar(value=os.path.expanduser("~/Videos")) + output_dir_var = ctk.StringVar(value=str(get_path( + "AUTOMATIONS_FFMPEG_OUTPUT_DIR", default=Path.home() / "Videos"))) fps_var = ctk.IntVar(value=60) crf_var = ctk.IntVar(value=23) resolution_var = ctk.StringVar(value="Native (your screen)") @@ -471,13 +537,13 @@ def _show_ffmpeg_log(log_path, title="FFmpeg Error"): try: with open(log_path, "r", encoding="utf-8", errors="replace") as f: content = f.read() - except Exception: + except OSError: content = "(could not read log file)" # Clean up the temp log file now that we have read it try: os.remove(log_path) - except Exception: + except OSError: pass log_win = ctk.CTkToplevel(win) @@ -499,9 +565,10 @@ def _show_ffmpeg_log(log_path, title="FFmpeg Error"): def start_recording(): nonlocal recording_proc, record_start_time, timer_running - out_dir = output_dir_var.get().strip() - if not out_dir: - messagebox.showwarning("No output folder", "Please select an output folder.", parent=win) + try: + out_dir = str(_validate_output_dir(output_dir_var.get())) + except OutputDirError as e: + messagebox.showerror("Invalid output folder", str(e), parent=win) return # Warn about known-unsupported combinations before starting @@ -560,7 +627,7 @@ def start_recording(): messagebox.showerror("FFmpeg not found", "FFmpeg was not found. Make sure it is installed and in your PATH.", parent=win) return - except Exception as e: + except (OSError, subprocess.SubprocessError, ValueError) as e: messagebox.showerror("Error", str(e), parent=win) return finally: @@ -568,7 +635,7 @@ def start_recording(): if log_file and recording_proc is None: try: log_file.close() - except Exception: + except OSError: pass # Disable controls immediately while we verify startup @@ -586,13 +653,13 @@ def _verify_startup(): try: log_file.flush() log_file.close() - except Exception: + except OSError: pass # Remove 0-byte output file if it exists try: if os.path.exists(out_file) and os.path.getsize(out_file) == 0: os.remove(out_file) - except Exception: + except OSError: pass def _on_fail(): @@ -634,20 +701,20 @@ def stop_recording(): recording_proc.stdin.write(b"q") recording_proc.stdin.flush() recording_proc.wait(timeout=15) - except Exception: + except (OSError, BrokenPipeError, subprocess.TimeoutExpired): recording_proc.kill() # Close the log file handle and clean up temp log lf = getattr(recording_proc, "_log_file", None) if lf: try: lf.close() - except Exception: + except OSError: pass lp = getattr(recording_proc, "_log_path", None) if lp: try: os.remove(lp) - except Exception: + except OSError: pass recording_proc = None start_rec_btn.configure(state="normal") @@ -659,7 +726,7 @@ def _set_lock(state): for w in _lockable_widgets: try: w.configure(state=state) - except Exception: + except tk.TclError: pass # ════════════════════════════════════════════════════════════════════════ @@ -897,7 +964,8 @@ def _label(parent, text, width=145): cap_scroll.pack(fill="both", expand=True) cp = cap_scroll - cap_out_var = ctk.StringVar(value=os.path.expanduser("~/Pictures")) + cap_out_var = ctk.StringVar(value=str(get_path( + "AUTOMATIONS_FFMPEG_CAPTURE_DIR", default=Path.home() / "Pictures"))) cap_fmt_var = ctk.StringVar(value="PNG (lossless — best quality)") cap_quality_var = ctk.IntVar(value=90) cap_delay_var = ctk.IntVar(value=0) @@ -1067,7 +1135,7 @@ def _show_visual_overlay(sw, sh, bg_path): bg_img = tk.PhotoImage(file=bg_path) cv.create_image(0, 0, anchor="nw", image=bg_img, tags="bg") cv._bg_ref = bg_img # prevent garbage-collection - except Exception: + except (OSError, tk.TclError): pass # fallback: plain black canvas # ── Initial full-screen dim (stipple = 50 % grey dots over screenshot) ── @@ -1195,7 +1263,7 @@ def on_release(e): ex, ey = _apply_snap(state["sx"], state["sy"], e.x, e.y, bool(e.state & 0x1)) try: ov.destroy() - except Exception: + except tk.TclError: pass win.deiconify() win.lift() @@ -1206,18 +1274,18 @@ def on_release(e): win.after(100, _update_region_display) try: os.remove(bg_path) - except Exception: + except OSError: pass def on_escape(e): try: ov.destroy() - except Exception: + except tk.TclError: pass win.deiconify() try: os.remove(bg_path) - except Exception: + except OSError: pass cv.bind("", on_press) @@ -1282,7 +1350,11 @@ def on_escape(e): cap_btn_row.pack(pady=(4, 6)) def _do_capture(): - out_d = cap_out_var.get().strip() + try: + out_d = str(_validate_output_dir(cap_out_var.get())) + except OutputDirError as e: + messagebox.showerror("Invalid output folder", str(e), parent=win) + return fmt_key = cap_fmt_var.get() ext, _ = CAP_FORMATS[fmt_key] scale = RESOLUTIONS.get(cap_res_var.get()) @@ -1341,7 +1413,7 @@ def _do_capture(): cap_result_label.configure(text=e, text_color="#FF6B6B"), cap_btn.configure(state="normal", text="📷 Capture Screenshot"), )) - except Exception as e: + except (OSError, subprocess.SubprocessError, ValueError) as e: win.after(0, lambda: ( set_status(f"Error: {e}", "red"), cap_btn.configure(state="normal", text="📷 Capture Screenshot"), @@ -1434,7 +1506,8 @@ def _countdown(remaining): } input_file_var = ctk.StringVar() - conv_out_var = ctk.StringVar(value=os.path.expanduser("~/Videos")) + conv_out_var = ctk.StringVar(value=str(get_path( + "AUTOMATIONS_FFMPEG_CONVERT_DIR", default=Path.home() / "Videos"))) conv_fmt_var = ctk.StringVar(value="mp4") conv_codec_var = ctk.StringVar(value="libx264") conv_crf_var = ctk.IntVar(value=18) @@ -1560,7 +1633,7 @@ def run(): creationflags=subprocess.CREATE_NO_WINDOW) ok = proc.returncode == 0 err = proc.stderr.decode("utf-8", errors="replace") if not ok else "" - except Exception as e: + except (OSError, subprocess.SubprocessError, ValueError) as e: ok, err = False, str(e) def done(): diff --git a/tools/folder_size_analyzer.py b/tools/folder_size_analyzer.py index 710ad37..49d5d25 100644 --- a/tools/folder_size_analyzer.py +++ b/tools/folder_size_analyzer.py @@ -28,9 +28,12 @@ import customtkinter as ctk import psutil +from tools._common.exceptions import narrow_excepts +from tools._common.logging import get_logger from tools._common.threadsafe import BoundedDeque TOOL_NAME = "Folder Size Analyzer Pro" +log = get_logger(__name__) # ============================= # Utilities @@ -97,17 +100,24 @@ def get_folder_size(folder_path: str, progress_callback=None) -> Tuple[int, int, return total_size, file_count, created_time, modified_time, accessed_time +@narrow_excepts( + OSError, + PermissionError, + default={'total': 0, 'used': 0, 'free': 0}, +) def get_drive_info(path: str) -> Dict[str, int]: - """Get drive information (total, used, free space)""" - try: - stat = psutil.disk_usage(path) - return { - 'total': stat.total, - 'used': stat.used, - 'free': stat.free - } - except: - return {'total': 0, 'used': 0, 'free': 0} + """Get drive information (total, used, free space). + + Returns a zeroed dict on missing path or permission denied (caught by + the decorator). ``psutil.disk_usage`` raises ``OSError`` for invalid + paths and ``PermissionError`` for protected mount points. + """ + stat = psutil.disk_usage(path) + return { + 'total': stat.total, + 'used': stat.used, + 'free': stat.free, + } # ============================= # Data Models @@ -367,7 +377,7 @@ def _scan_directory(self): self.after(0, lambda data=folder_data: self._after_scan(data)) - except Exception as e: + except (OSError, PermissionError, TypeError) as e: self.after(0, lambda: self._after_scan([])) self.after(0, lambda err=e: messagebox.showerror("Scan Error", f"Error scanning directory:\n{str(err)}")) @@ -528,8 +538,8 @@ def _export_results(self): self._export_csv(file_path) messagebox.showinfo("Export Successful", f"Results exported to:\n{file_path}") - - except Exception as e: + + except (OSError, TypeError, ValueError) as e: messagebox.showerror("Export Error", f"Error exporting results:\n{str(e)}") def _export_csv(self, file_path: str): @@ -613,8 +623,9 @@ def run_tool(): root.focus_force() root.attributes("-topmost", True) root.after(250, lambda: root.attributes("-topmost", False)) - - except Exception as e: + + except Exception as e: # noqa: BLE001 - boundary: surface any startup fault to UI + log.exception("Folder Size Analyzer Pro startup failed") messagebox.showerror("Folder Size Analyzer Pro", f"Startup error:\n{e}") if __name__ == "__main__": diff --git a/tools/network_pattern_analyzer.py b/tools/network_pattern_analyzer.py index 903af0f..e4ae47d 100644 --- a/tools/network_pattern_analyzer.py +++ b/tools/network_pattern_analyzer.py @@ -22,7 +22,11 @@ from tkinter import ttk, messagebox, filedialog import customtkinter as ctk +from tools._common.exceptions import narrow_excepts +from tools._common.logging import get_logger + TOOL_NAME = "Network Pattern Analyzer" +log = get_logger(__name__) class NetworkPatternAnalyzer: def __init__(self): @@ -102,7 +106,7 @@ def load_exports(self): self.export_data.append(data) incidents_count += len(data.get('incidents', [])) events_count += len(data.get('events', [])) - except Exception as e: + except (OSError, json.JSONDecodeError, UnicodeDecodeError) as e: print(f"Error loading {file_path}: {e}") self.status_label.configure( @@ -120,8 +124,8 @@ def load_exports(self): self.results_text.insert(tk.END, f"\nClick 'Analyze' to detect patterns...") self.results_text.configure(state="disabled") - - except Exception as e: + + except (OSError, tk.TclError) as e: messagebox.showerror("Load Error", f"Error loading export files:\n{e}") self.status_label.configure(text="Error loading files") @@ -157,8 +161,8 @@ def analyze_patterns(self): # Display results self._display_results() self.status_label.configure(text="Analysis complete!") - - except Exception as e: + + except (KeyError, ValueError, TypeError, AttributeError) as e: messagebox.showerror("Analysis Error", f"Error during analysis:\n{e}") self.status_label.configure(text="Analysis failed") @@ -375,17 +379,20 @@ def export_analysis(self): messagebox.showinfo("Export Complete", f"Analysis results saved to:\n{file_path}") self.status_label.configure(text="Analysis exported successfully") - - except Exception as e: + + except (OSError, TypeError) as e: messagebox.showerror("Export Error", f"Error exporting analysis:\n{e}") # Helper methods + @narrow_excepts(ValueError, TypeError, default=None) def _parse_timestamp(self, timestamp_str: str) -> datetime: - """Parse timestamp string to datetime object""" - try: - return datetime.strptime(timestamp_str, "%Y-%m-%d %H:%M:%S") - except Exception: - return None + """Parse timestamp string to datetime object. + + Returns ``None`` on malformed or non-string input (caught by the + decorator). ``ValueError`` covers format mismatches; ``TypeError`` + covers non-string values slipping through loose JSON input. + """ + return datetime.strptime(timestamp_str, "%Y-%m-%d %H:%M:%S") def _calculate_duration_seconds(self, start_time: str, end_time: str) -> float: """Calculate duration in seconds between two timestamps""" @@ -426,7 +433,8 @@ def run_tool(): try: app = NetworkPatternAnalyzer() app.run() - except Exception as e: + except Exception as e: # noqa: BLE001 - boundary: surface any startup fault to UI + log.exception("Network Pattern Analyzer startup failed") messagebox.showerror("Network Pattern Analyzer", f"Startup error:\n{e}") if __name__ == "__main__": diff --git a/portable/network_stability_monitor.py b/tools/network_stability_monitor.py similarity index 98% rename from portable/network_stability_monitor.py rename to tools/network_stability_monitor.py index bbcfdea..34081d8 100644 --- a/portable/network_stability_monitor.py +++ b/tools/network_stability_monitor.py @@ -27,6 +27,9 @@ from datetime import datetime from typing import Dict, List, Optional, Tuple, Any +from tools._common.config import get_bool, get_config +from tools._common.threadsafe import BoundedDeque + import tkinter as tk from tkinter import ttk, messagebox, filedialog @@ -65,9 +68,10 @@ # ========================= # Auto Export Configuration # ========================= -AUTO_EXPORT_ENABLED = True -AUTO_EXPORT_TIME = "23:30" # HH:MM -EXPORT_FOLDER = "exports" +# Overridable via env or /.env — see .env.example. +AUTO_EXPORT_ENABLED = get_bool("AUTOMATIONS_NSM_AUTO_EXPORT", default=True) +AUTO_EXPORT_TIME = get_config("AUTOMATIONS_NSM_EXPORT_TIME", default="23:30") +EXPORT_FOLDER = get_config("AUTOMATIONS_NSM_EXPORT_DIR", default="exports") # ========================= # Helpers @@ -441,9 +445,9 @@ class Incident: class NetworkStabilityEngine: def __init__(self, state_path: str): self.state_path = state_path - self.samples: List[Sample] = [] - self.events: List[Event] = [] - self.incidents: List[Incident] = [] + self.samples: BoundedDeque = BoundedDeque(maxlen=3000) + self.events: BoundedDeque = BoundedDeque(maxlen=2000) + self.incidents: BoundedDeque = BoundedDeque(maxlen=1000) self.baseline_gateway: str = "" self.baseline_dns: List[str] = [] @@ -618,14 +622,12 @@ def _db_load_incidents(self, limit: int = 500, return [] def log_event(self, severity: str, category: str, title: str, details: Dict): + # BoundedDeque enforces maxlen=2000 automatically. self.events.append(Event(now_ts(), severity, category, title, details)) - if len(self.events) > 2000: - self.events = self.events[-1600:] def add_sample(self, s: Sample): + # BoundedDeque enforces maxlen=3000 automatically. self.samples.append(s) - if len(self.samples) > 3000: - self.samples = self.samples[-2400:] def set_baseline(self, gateway: str, dns_servers: List[str], local_ip: str = ""): self.baseline_gateway = gateway @@ -927,7 +929,7 @@ def on_state_update(self, status: str, category: str, severity: str, reason: str del self.active_incidents[(cat, rsn)] def _find_incident(self, incident_id: int) -> Optional[Incident]: - for inc in reversed(self.incidents): + for inc in reversed(self.incidents.snapshot()): if inc.id == incident_id: return inc return None @@ -988,26 +990,28 @@ def run_auto_export(self, folder_var): def _prepare_export_data(self, current_time: datetime) -> Dict[str, Any]: """Prepare data for export""" # Filter incidents since last export + incidents_snap = self.incidents.snapshot() incidents_to_export = [] if self.last_export_timestamp: # Only include incidents after last export - for inc in self.incidents: + for inc in incidents_snap: inc_start = parse_ts(inc.start_time) if inc_start and inc_start >= self.last_export_timestamp: incidents_to_export.append(asdict(inc)) else: # Include all incidents if no previous export - incidents_to_export = [asdict(inc) for inc in self.incidents] + incidents_to_export = [asdict(inc) for inc in incidents_snap] - # Filter events since last export + # Filter events since last export — snapshot once, iterate the copy. + events_snap = self.events.snapshot() events_to_export = [] if self.last_export_timestamp: - for event in self.events: + for event in events_snap: event_time = parse_ts(event.timestamp) if event_time and event_time >= self.last_export_timestamp: events_to_export.append(asdict(event)) else: - events_to_export = [asdict(event) for event in self.events] + events_to_export = [asdict(event) for event in events_snap] # Calculate statistics stats = self._calculate_export_statistics(incidents_to_export) @@ -1088,7 +1092,7 @@ def get_rolling_stats(self) -> Dict[str, Any]: def _get_dns_fail_rate(self) -> float: """Calculate DNS failure rate from recent samples""" - recent_samples = [s for s in self.samples[-30:] if s.dns_state in ['OK', 'FAIL', 'SLOW']] + recent_samples = [s for s in self.samples.snapshot()[-30:] if s.dns_state in ['OK', 'FAIL', 'SLOW']] if not recent_samples: return 0.0 fails = sum(1 for s in recent_samples if s.dns_state == 'FAIL') @@ -1149,13 +1153,14 @@ def enhance_sample_with_intelligence(self, sample: Sample) -> Sample: def generate_ai_export(self) -> Dict[str, Any]: """Generate AI-friendly export data""" - if not self.samples: + samples_snap = self.samples.snapshot() + if not samples_snap: return {} - - current_sample = self.samples[-1] + + current_sample = samples_snap[-1] rolling_stats = self.get_rolling_stats() - incidents_data = [asdict(inc) for inc in self.incidents] - + incidents_data = [asdict(inc) for inc in self.incidents.snapshot()] + # Handle missing intelligence engine gracefully if self.intelligence: try: @@ -2190,9 +2195,9 @@ def export_report(self): "dns_servers": self.engine.baseline_dns, }, "last_sample": asdict(self._last_sample) if self._last_sample else None, - "incidents": [asdict(i) for i in self.engine.incidents], - "events": [asdict(e) for e in self.engine.events], - "samples_tail": [asdict(s) for s in self.engine.samples[-500:]], + "incidents": [asdict(i) for i in self.engine.incidents.snapshot()], + "events": [asdict(e) for e in self.engine.events.snapshot()], + "samples_tail": [asdict(s) for s in self.engine.samples.snapshot()[-500:]], } try: @@ -2385,7 +2390,7 @@ def _do_sample(self, params: Dict[str, Any]): "Monitoring Status Update", { "samples_collected": sample_count, - "active_incidents": len([i for i in self.engine.incidents if not i.end_time]), + "active_incidents": len([i for i in self.engine.incidents.snapshot() if not i.end_time]), "total_incidents": incident_count, "total_events": event_count, "current_status": status, @@ -2595,7 +2600,7 @@ def refresh_incidents(self): fc = self.filter_category.get() fs = self.filter_severity.get() - incidents = list(self.engine.incidents) + incidents = list(self.engine.incidents.snapshot()) # newest first incidents.reverse() @@ -2641,7 +2646,7 @@ def show_incident_details(self): return inc = None - for x in self.engine.incidents: + for x in self.engine.incidents.snapshot(): if x.id == inc_id: inc = x break @@ -2816,7 +2821,7 @@ def refresh_events(self): fc = self.filter_category.get() fs = self.filter_severity.get() - events = list(self.engine.events) + events = list(self.engine.events.snapshot()) events.reverse() for idx, e in enumerate(events[:1200]): @@ -2841,7 +2846,7 @@ def show_event_details(self): return iid = sel[0] idx = int(iid.split("-")[1]) - recent = list(reversed(self.engine.events[-1600:])) + recent = list(reversed(self.engine.events.snapshot()[-1600:])) if idx < 0 or idx >= len(recent): return e = recent[idx] @@ -3016,7 +3021,7 @@ def _update_live_chart(self): return cutoff = time.time() - 300 - recent = self._samples_to_ts(self.engine.samples) + recent = self._samples_to_ts(self.engine.samples.snapshot()) recent = [(t, s) for t, s in recent if t >= cutoff] series = [ @@ -3053,7 +3058,7 @@ def _draw_incident_graph(self, inc): start_f = start_dt.timestamp() - 30 end_f = end_dt.timestamp() + 30 - all_ts = self._samples_to_ts(self.engine.samples) + all_ts = self._samples_to_ts(self.engine.samples.snapshot()) incident_data = [(t, s) for t, s in all_ts if start_f <= t <= end_f] if not incident_data: diff --git a/tools/screen_lock.py b/tools/screen_lock.py index 1f73a30..e2b1ed7 100644 --- a/tools/screen_lock.py +++ b/tools/screen_lock.py @@ -865,21 +865,19 @@ def _on_click(self, event): # Key combos we actively swallow while the lock is up. # Critical combos: must all be blocked or lock is unsafe. Fail-fast on any failure. + # NOTE: block_key() only accepts single key names, not combo strings like "alt+tab". + # Combos are already suppressed by the suppress=True hook; only single keys go here. _CRITICAL_COMBOS = ( - "alt+tab", - "alt+f4", - "ctrl+shift+esc", "left windows", "right windows", ) - # Nice-to-have combos: best-effort. If these fail, lock still works. + # Nice-to-have combos: best-effort via block_key (single keys only) or rely on suppress hook. + # If these fail, lock still works because the suppress=True hook intercepts everything. _EXTRA_COMBOS = ( "ctrl+esc", # opens Start menu "alt+esc", # cycles windows - "win", # redundant on most keyboard library versions (covered by "left windows"/"right windows") - # but kept as best-effort — some versions accept it, some don't. The two " windows" - # entries already cover both physical meta keys, so removal of "win" does not introduce a bypass. + "win", # redundant with left/right windows but harmless to try ) def _install_hook(self) -> bool: diff --git a/tools/security_audit.py b/tools/security_audit.py index 01f9c5c..656abc2 100644 --- a/tools/security_audit.py +++ b/tools/security_audit.py @@ -31,8 +31,12 @@ import tkinter as tk from tkinter import ttk +from tools._common.exceptions import narrow_excepts +from tools._common.logging import get_logger + TOOL_NAME = "Security Audit" TOOL_DESCRIPTION = "Comprehensive security audit — checks startup, processes, ports, files, DNS, accounts, Wi-Fi, USB, browser, event logs" +log = get_logger(__name__) _CREATE_NO_WINDOW = 0x08000000 @@ -49,21 +53,25 @@ def safe_run(cmd: List[str], timeout: int = 30) -> Tuple[int, str, str]: return cp.returncode, cp.stdout, cp.stderr except subprocess.TimeoutExpired: return 1, "", "timeout" - except Exception as e: + except (OSError, subprocess.SubprocessError) as e: return 1, "", str(e) +@narrow_excepts(AttributeError, OSError, default=False) def is_admin() -> bool: - try: - import ctypes - return ctypes.windll.shell32.IsUserAnAdmin() != 0 - except Exception: - return False + """Return True when running with Windows admin rights. + Caught: ``AttributeError`` (non-Windows: ``ctypes.windll`` missing), + ``OSError`` (ctypes shell32 call failure). Non-admin falls through + the decorator with ``default=False``. + """ + import ctypes + return ctypes.windll.shell32.IsUserAnAdmin() != 0 + + +@narrow_excepts(OSError, ValueError, default=9999) def _age_days(path: str) -> float: - try: - return (time.time() - os.path.getmtime(path)) / 86400 - except Exception: - return 9999 + """Return file age in days; 9999 sentinel on stat failure.""" + return (time.time() - os.path.getmtime(path)) / 86400 SAFE_PROCESS_PATHS = [ "\\microsoft\\", "\\windows defender\\", "\\windows\\system32\\", @@ -185,14 +193,14 @@ def _load_state(self) -> dict: try: with open(self.state_path, "r") as f: return json.load(f) - except Exception: + except (OSError, json.JSONDecodeError, UnicodeDecodeError): return {"baseline": None, "last_scan": None, "last_findings": []} def save_state(self): try: with open(self.state_path, "w") as f: json.dump(self.state, f, indent=2, default=str) - except Exception: + except (OSError, TypeError): pass def save_baseline(self, findings: List[Finding]): @@ -346,9 +354,9 @@ def check_startup(self) -> List[Finding]: suspicious_tasks.append((task_name, task_run, author)) else: normal_task_count += 1 - except Exception: + except (KeyError, ValueError, AttributeError): continue - except Exception: + except (OSError, subprocess.SubprocessError, csv.Error): findings.append(Finding("startup", "INFO", "Could not enumerate scheduled tasks", "schtasks command failed")) @@ -421,7 +429,7 @@ def check_processes(self) -> List[Finding]: f"Unsigned executable: {os.path.basename(exe)}", f"Path: {exe}\nDigital signature: Not signed", "Unsigned executables from unusual locations should be investigated.")) - except Exception: + except (OSError, TypeError, AttributeError): pass except (psutil.NoSuchProcess, psutil.AccessDenied, psutil.ZombieProcess): @@ -454,7 +462,7 @@ def check_ports_firewall(self) -> List[Finding]: pname = "" try: pname = psutil.Process(pid).name() if pid else "unknown" - except Exception: + except (psutil.NoSuchProcess, psutil.AccessDenied, psutil.ZombieProcess, OSError): pname = f"PID {pid}" listeners.append((port, addr, pname, pid)) @@ -509,7 +517,7 @@ def check_ports_firewall(self) -> List[Finding]: "RDP allows remote access to this computer.\n" "If you don't use RDP, this is a security risk.", "Disable RDP: Settings → System → Remote Desktop → Off")) - except Exception: + except (OSError, FileNotFoundError): pass # Firewall rules (inbound allow from any) @@ -552,7 +560,7 @@ def check_ports_firewall(self) -> List[Finding]: f"Top apps accepting inbound connections:\n{top_apps}" + (f"\n ... and {len(rule_counts)-10} more" if len(rule_counts) > 10 else ""), "Normal for dev/gaming PCs. Review unused apps in Windows Firewall.")) - except Exception: + except (OSError, subprocess.SubprocessError, ValueError): pass return findings @@ -592,7 +600,7 @@ def check_filesystem(self) -> List[Finding]: for f in files: try: ext = os.path.splitext(f)[1].lower() - except Exception: + except (TypeError, AttributeError): continue if ext in exec_exts: try: @@ -600,7 +608,7 @@ def check_filesystem(self) -> List[Finding]: # Ensure safe string representation fp_safe = fp.encode("utf-8", errors="replace").decode("utf-8") f_safe = f.encode("utf-8", errors="replace").decode("utf-8") - except Exception: + except (AttributeError, UnicodeError): continue age = _age_days(fp) if age < 7: @@ -757,7 +765,7 @@ def check_dns_network(self) -> List[Finding]: findings.append(Finding("dns", "INFO", "No system proxy configured", "ProxyEnable = 0 (good)")) winreg.CloseKey(key) - except Exception: + except (OSError, FileNotFoundError): pass # DNS resolution verification @@ -1119,7 +1127,7 @@ def check_browser(self) -> List[Finding]: exp_date = datetime.strptime(notafter, "%Y-%m-%d") if exp_date < datetime.now(): expired_certs.append(f"{subject[:70]} (expired {notafter})") - except Exception: + except (ValueError, TypeError): pass cert_lower = subject.lower() @@ -1161,7 +1169,7 @@ def check_browser(self) -> List[Finding]: else: findings.append(Finding("browser", "INFO", "No unknown root certificates", "All CAs are recognized.")) - except Exception: + except (OSError, subprocess.SubprocessError, ValueError): findings.append(Finding("browser", "INFO", "Could not check certificate store", "PowerShell command failed.")) @@ -1181,7 +1189,7 @@ def check_browser(self) -> List[Finding]: except FileNotFoundError: pass winreg.CloseKey(key) - except Exception: + except OSError: pass if not findings: @@ -1257,7 +1265,7 @@ def check_event_logs(self) -> List[Finding]: f"{desc}: {count} events", f"{count} {desc.lower()} events found.")) - except Exception: + except (OSError, subprocess.SubprocessError, ValueError): continue if not admin: @@ -1504,7 +1512,8 @@ def _scan_worker(self): cat_key = futures[future] try: findings = future.result() - except Exception as e: + except Exception as e: # noqa: BLE001 - boundary: any check may raise, we must still report + log.exception("Category check %s failed", cat_key) findings = [Finding(cat_key, "INFO", "Check failed", str(e))] self.result_q.put(("category_done", cat_key, findings)) @@ -1513,7 +1522,8 @@ def _scan_worker(self): def _safe_check(self, method): try: return method() - except Exception as e: + except Exception as e: # noqa: BLE001 - boundary: safe wrapper for worker-thread checks + log.exception("Safe-check wrapper caught fault in %s", getattr(method, "__qualname__", method)) return [Finding("unknown", "INFO", "Check error", str(e))] def _ui_tick(self): @@ -1811,7 +1821,7 @@ def export_report(self): with open(filepath, "w") as f: json.dump(report, f, indent=2, default=str) self.status_label.configure(text=f"Exported: {filepath}") - except Exception as e: + except (OSError, TypeError, ValueError) as e: self.status_label.configure(text=f"Export failed: {e}") def destroy(self): diff --git a/tools/system_cleaner.py b/tools/system_cleaner.py index a439749..8aeb604 100644 --- a/tools/system_cleaner.py +++ b/tools/system_cleaner.py @@ -39,13 +39,17 @@ try: # send2trash is the default (safe) delete path; optional at import time from send2trash import send2trash as _send2trash # type: ignore _HAS_SEND2TRASH = True -except Exception: +except ImportError: _send2trash = None # type: ignore _HAS_SEND2TRASH = False +from tools._common.exceptions import narrow_excepts +from tools._common.logging import get_logger + # ────────────────────────────────────────────── TOOL_NAME = "System Cleaner Pro" TOOL_DESC = "Safely free disk space and optimise RAM — preview sizes before deleting" +log = get_logger(__name__) _CREATE_NO_WINDOW = 0x08000000 @@ -75,11 +79,31 @@ def _log_struct(event: str, **fields) -> None: try: payload = {"event": event, **fields} print(json.dumps(payload, default=str), file=sys.stderr) - except Exception: + except (OSError, TypeError, ValueError): # Never let logging raise. pass +def _tree_has_symlink(root_path: str) -> Optional[str]: + """Return the first symlink found anywhere under ``root_path``, or None. + + Uses ``os.walk(followlinks=False)`` so the walk never crosses a symlink. + We check both directory and file entries at every level because a + symlink that points outside ``root_path`` would otherwise allow an + ``rmtree``/``send2trash`` call to affect state beyond the intended + target. Returns the absolute path of the first offender for logging. + """ + try: + for dirpath, dirnames, filenames in os.walk(root_path, followlinks=False): + for name in dirnames + filenames: + full = os.path.join(dirpath, name) + if os.path.islink(full): + return full + except OSError: + pass + return None + + # ────────────────────────────────────────────── # ctypes struct for Recycle Bin query # ────────────────────────────────────────────── @@ -94,11 +118,14 @@ class _SHQUERYRBINFO(ctypes.Structure): # ────────────────────────────────────────────── # Module-level helpers # ────────────────────────────────────────────── +@narrow_excepts(AttributeError, OSError, default=False) def is_admin() -> bool: - try: - return bool(ctypes.windll.shell32.IsUserAnAdmin()) - except Exception: - return False + """Return True when the current process has Windows admin rights. + + Caught: ``AttributeError`` (``ctypes.windll`` absent on non-Windows), + ``OSError`` (shell32 call failure). + """ + return bool(ctypes.windll.shell32.IsUserAnAdmin()) def format_size(n: int) -> str: @@ -120,7 +147,7 @@ def _dir_size(path: str) -> int: total += os.path.getsize(os.path.join(root, f)) except OSError: pass - except Exception: + except OSError: pass return total @@ -132,8 +159,13 @@ def _delete_dir_contents( ) -> int: """Delete everything inside ``path`` (not the folder itself). - Symlinks are always skipped (never followed) to avoid escaping the - target directory. Behavior branches on ``mode``: + Symlink-at-top entries are skipped (never followed). Directory entries + are additionally tree-walked via ``os.walk(followlinks=False)`` and + **refused entirely** if any symlink is present anywhere below — this + is stricter than ``rmtree``'s built-in "stop at the link" behavior and + emits a structured ``symlink_in_tree_refused`` log entry. + + Behavior branches on ``mode``: * ``DRY_RUN`` — accumulate size only, do not touch disk. * ``TRASH`` — move each entry to the recycle bin via ``send2trash``. @@ -185,11 +217,23 @@ def _delete_dir_contents( freed += size continue + # Refuse to touch any directory entry whose tree contains a symlink. + # rmtree/send2trash stop at the link itself, but pre-refusal gives a + # louder, auditable signal and matches the Plan A/A1 stance. + if os.path.isdir(full): + offender = _tree_has_symlink(full) + if offender is not None: + skipped += 1 + log_cb(f" Refused (symlink inside tree): {entry}") + _log_struct("symlink_in_tree_refused", path=full, symlink=offender) + continue + if mode is DeleteMode.TRASH: try: _send2trash(full) freed += size - except Exception as exc: # send2trash raises its own TrashPermissionError etc. + _log_struct("trashed", path=full, bytes=size) + except OSError as exc: # send2trash.TrashPermissionError subclasses OSError skipped += 1 _log_struct( "trash_failed", @@ -206,6 +250,7 @@ def _delete_dir_contents( else: os.remove(full) freed += size + _log_struct("permanent_deleted", path=full, bytes=size) except (OSError, PermissionError) as exc: skipped += 1 _log_struct( @@ -262,7 +307,8 @@ def _delete_glob_files( try: _send2trash(fp) freed += size - except Exception as exc: + _log_struct("trashed", path=fp, bytes=size) + except OSError as exc: # send2trash.TrashPermissionError subclasses OSError skipped += 1 _log_struct( "trash_failed", @@ -276,6 +322,7 @@ def _delete_glob_files( try: os.remove(fp) freed += size + _log_struct("permanent_deleted", path=fp, bytes=size) except (OSError, PermissionError) as exc: skipped += 1 _log_struct( @@ -301,17 +348,20 @@ def _query_recycle_bin_size() -> int: try: ctypes.windll.shell32.SHQueryRecycleBinW(None, ctypes.byref(info)) return max(0, info.i64Size) - except Exception: + except (AttributeError, OSError): + # AttributeError: ctypes.windll missing on non-Windows. OSError: shell32 call failure. return 0 +@narrow_excepts(psutil.Error, OSError, default=timedelta(0)) def _get_system_uptime() -> timedelta: - """Return system uptime as a timedelta.""" - try: - boot = psutil.boot_time() - return timedelta(seconds=time.time() - boot) - except Exception: - return timedelta(0) + """Return system uptime as a timedelta. + + Caught: ``psutil.Error`` (boot_time unavailable), ``OSError`` (clock read + failure). Returns ``timedelta(0)`` on failure so callers can still render. + """ + boot = psutil.boot_time() + return timedelta(seconds=time.time() - boot) def _format_uptime(td: timedelta) -> str: @@ -344,48 +394,50 @@ def _resolve_gpu_shader_paths() -> list: return [p for p in candidates if os.path.isdir(p)] +@narrow_excepts(AttributeError, OSError, default=False) def _enable_privilege(privilege_name: str) -> bool: - """Enable a Windows privilege (e.g. SeProfileSingleProcessPrivilege). Needs admin.""" - try: - TOKEN_ADJUST_PRIVILEGES = 0x0020 - TOKEN_QUERY = 0x0008 - SE_PRIVILEGE_ENABLED = 0x00000002 - - class LUID(ctypes.Structure): - _fields_ = [("LowPart", ctypes.wintypes.DWORD), - ("HighPart", ctypes.wintypes.LONG)] - - class LUID_AND_ATTRIBUTES(ctypes.Structure): - _fields_ = [("Luid", LUID), - ("Attributes", ctypes.wintypes.DWORD)] + """Enable a Windows privilege (e.g. SeProfileSingleProcessPrivilege). Needs admin. - class TOKEN_PRIVILEGES(ctypes.Structure): - _fields_ = [("PrivilegeCount", ctypes.wintypes.DWORD), - ("Privileges", LUID_AND_ATTRIBUTES * 1)] - - advapi32 = ctypes.windll.advapi32 - kernel32 = ctypes.windll.kernel32 - - token = ctypes.wintypes.HANDLE() - advapi32.OpenProcessToken( - kernel32.GetCurrentProcess(), - TOKEN_ADJUST_PRIVILEGES | TOKEN_QUERY, - ctypes.byref(token), - ) + Caught: ``AttributeError`` (``ctypes.windll`` absent on non-Windows), + ``OSError`` (advapi32/kernel32 call failure). Returns ``False`` on failure. + """ + TOKEN_ADJUST_PRIVILEGES = 0x0020 + TOKEN_QUERY = 0x0008 + SE_PRIVILEGE_ENABLED = 0x00000002 + + class LUID(ctypes.Structure): + _fields_ = [("LowPart", ctypes.wintypes.DWORD), + ("HighPart", ctypes.wintypes.LONG)] + + class LUID_AND_ATTRIBUTES(ctypes.Structure): + _fields_ = [("Luid", LUID), + ("Attributes", ctypes.wintypes.DWORD)] + + class TOKEN_PRIVILEGES(ctypes.Structure): + _fields_ = [("PrivilegeCount", ctypes.wintypes.DWORD), + ("Privileges", LUID_AND_ATTRIBUTES * 1)] + + advapi32 = ctypes.windll.advapi32 + kernel32 = ctypes.windll.kernel32 + + token = ctypes.wintypes.HANDLE() + advapi32.OpenProcessToken( + kernel32.GetCurrentProcess(), + TOKEN_ADJUST_PRIVILEGES | TOKEN_QUERY, + ctypes.byref(token), + ) - luid = LUID() - advapi32.LookupPrivilegeValueW(None, privilege_name, ctypes.byref(luid)) + luid = LUID() + advapi32.LookupPrivilegeValueW(None, privilege_name, ctypes.byref(luid)) - tp = TOKEN_PRIVILEGES() - tp.PrivilegeCount = 1 - tp.Privileges[0].Luid = luid - tp.Privileges[0].Attributes = SE_PRIVILEGE_ENABLED + tp = TOKEN_PRIVILEGES() + tp.PrivilegeCount = 1 + tp.Privileges[0].Luid = luid + tp.Privileges[0].Attributes = SE_PRIVILEGE_ENABLED - advapi32.AdjustTokenPrivileges(token, False, ctypes.byref(tp), 0, None, None) - kernel32.CloseHandle(token) - return True - except Exception: - return False + advapi32.AdjustTokenPrivileges(token, False, ctypes.byref(tp), 0, None, None) + kernel32.CloseHandle(token) + return True # ────────────────────────────────────────────── @@ -405,7 +457,7 @@ class TOKEN_PRIVILEGES(ctypes.Structure): { "key": "win_temp", "label": "Windows TEMP", - "path": r"C:\Windows\Temp", + "path": os.path.expandvars(r"%SystemRoot%\Temp"), "type": "dir_contents", "admin": True, "desc": "System-wide temp files -- requires Administrator", @@ -432,7 +484,7 @@ class TOKEN_PRIVILEGES(ctypes.Structure): { "key": "prefetch", "label": "Windows Prefetch", - "path": r"C:\Windows\Prefetch", + "path": os.path.expandvars(r"%SystemRoot%\Prefetch"), "type": "dir_contents", "admin": True, "desc": "App launch optimisation files -- stale ones slow things down", @@ -442,7 +494,7 @@ class TOKEN_PRIVILEGES(ctypes.Structure): { "key": "win_update", "label": "Windows Update Cache", - "path": r"C:\Windows\SoftwareDistribution\Download", + "path": os.path.expandvars(r"%SystemRoot%\SoftwareDistribution\Download"), "type": "dir_contents", "admin": True, "desc": "Downloaded update files -- already installed, safe to remove", @@ -836,11 +888,13 @@ def _update_uptime(self): color = "#4caf50" self._uptime_label.configure(text=text, text_color=color) - except Exception: + except (tk.TclError, AttributeError): + # Widget destroyed during shutdown or not yet attached. pass try: self.after(30000, self._update_uptime) - except Exception: + except tk.TclError: + # Tk root gone — stop rescheduling. pass # ────────────────────────────────────────── @@ -1107,7 +1161,8 @@ def _run_category_clean(self, cat: dict, mode: DeleteMode = DEFAULT_DELETE_MODE) try: ctypes.windll.shell32.SHEmptyRecycleBinW(None, None, 0x00000007) log(" Recycle Bin emptied") - except Exception as e: + except (AttributeError, OSError) as e: + # AttributeError on non-Windows; OSError on shell32 call failure. log(f" Error: {e}") return 0 return size @@ -1177,7 +1232,8 @@ def _run_category_clean(self, cat: dict, mode: DeleteMode = DEFAULT_DELETE_MODE) if os.path.isdir(do_path): return _delete_dir_contents(do_path, log, mode) log(" No Delivery Optimisation cache found") - except Exception as e: + except (OSError, subprocess.SubprocessError) as e: + # OSError: spawn failure. SubprocessError: TimeoutExpired etc. log(f" Error: {e}") return 0 @@ -1202,7 +1258,8 @@ def _run_category_clean(self, cat: dict, mode: DeleteMode = DEFAULT_DELETE_MODE) cleared += 1 else: log(f" Could not clear {logname}: {result.stderr.strip()}") - except Exception as e: + except (OSError, subprocess.SubprocessError) as e: + # wevtutil spawn failure or timeout per-log. log(f" Error clearing {logname}: {e}") log(f" {cleared} event log(s) cleared") return 0 @@ -1216,7 +1273,8 @@ def _run_category_clean(self, cat: dict, mode: DeleteMode = DEFAULT_DELETE_MODE) ctypes.windll.user32.EmptyClipboard() ctypes.windll.user32.CloseClipboard() log(" Clipboard cleared") - except Exception as e: + except (AttributeError, OSError) as e: + # AttributeError on non-Windows; OSError on user32 call failure. log(f" {e}") return 0 @@ -1235,7 +1293,8 @@ def _run_category_clean(self, cat: dict, mode: DeleteMode = DEFAULT_DELETE_MODE) line = line.strip() if line: log(f" {line}") - except Exception as e: + except (OSError, subprocess.SubprocessError) as e: + # ipconfig spawn failure or timeout. log(f" {e}") return 0 @@ -1255,11 +1314,14 @@ def _start_ram_update_loop(self): text=f"RAM: {used:.1f} GB / {tot:.1f} GB ({pct:.0f}% used)", text_color=color, ) - except Exception: + except (OSError, AttributeError, tk.TclError): + # OSError: psutil read failure. AttributeError: label not attached. + # TclError: widget destroyed during shutdown. pass try: self.after(2000, self._start_ram_update_loop) - except Exception: + except tk.TclError: + # Tk root gone — stop rescheduling. pass def _start_ram_optimize(self): @@ -1291,7 +1353,9 @@ def _ram_optimize_worker(self): ) kernel32.CloseHandle(handle) trimmed += 1 - except Exception: + except (psutil.Error, OSError, AttributeError): + # psutil.Error: process vanished. OSError: kernel32 call rejected. + # AttributeError: windll missing on non-Windows. failed += 1 time.sleep(0.8) @@ -1353,7 +1417,8 @@ def _standby_purge_worker(self): self._log(" Standby memory purged via NtSetSystemInformation") else: self._log(f" NtSetSystemInformation returned status: 0x{status & 0xFFFFFFFF:08X}") - except Exception as e: + except (AttributeError, OSError) as e: + # AttributeError on non-Windows; OSError on ntdll syscall failure. self._log(f" Error: {e}") time.sleep(0.5) @@ -1401,7 +1466,8 @@ def _restart_explorer_worker(self): if proc.info["name"] and proc.info["name"].lower() == "explorer.exe": try: explorer_mem += proc.info["memory_info"].rss - except Exception: + except (psutil.Error, AttributeError): + # Process vanished mid-iter, or memory_info unavailable. pass if explorer_mem: @@ -1447,7 +1513,8 @@ def _restart_explorer_worker(self): if proc.info["name"] and proc.info["name"].lower() == "explorer.exe": try: new_mem += proc.info["memory_info"].rss - except Exception: + except (psutil.Error, AttributeError): + # Process vanished mid-iter, or memory_info unavailable. pass if explorer_mem and new_mem: @@ -1457,7 +1524,12 @@ def _restart_explorer_worker(self): self._log(f" RAM recovered: {format_size(saved)}") self._log(" Thumbnail and icon caches are now unlocked") - except Exception as e: + except Exception as e: # noqa: BLE001 + # Thread-worker boundary: taskkill/explorer.exe spawn + psutil iter + # raise a wide surface (OSError, subprocess.SubprocessError, + # psutil.Error, AttributeError). Catch-all keeps the restart UX + # coherent and logs a full traceback for diagnosis. + log.exception("restart_explorer_worker failed") self._log(f" Error: {e}") self.after(0, lambda: self._explorer_btn.configure( @@ -1470,7 +1542,8 @@ def force_stop(self): self._is_ram_opt = False try: self.parent.destroy() - except Exception: + except tk.TclError: + # Already destroyed. pass