#!/bin/bash
# MyAI privileged helper - called via pkexec by the Flask backend.
# All output (stdout + stderr) is streamed to the UI via NDJSON.
#
# We bypass Ollama's official install.sh entirely. install.sh proactively
# installs CUDA/proprietary NVIDIA drivers when nvidia-smi isn't already
# working, which clobbers Linux Lite's existing driver state (whether
# nouveau or Lite-Driver-Manager-installed proprietary) and breaks X on
# next boot. Instead we ask the server to download the binary tarball
# and just extract it into /usr ourselves. Ollama detects libnvidia-ml
# at runtime regardless of who installed the drivers — if Lite Driver
# Manager set them up, GPU acceleration "just works".

export LANG=C.UTF-8
export LC_ALL=C.UTF-8
export PATH=/usr/sbin:/usr/bin:/sbin:/bin

OLLAMA_API=http://127.0.0.1:11434/api/tags
MYAI_UNIT=myai-ollama.service
OLLAMA_HOME=/usr/share/ollama

action="${1:-}"

api_responds() {
    curl -fsS -o /dev/null --max-time 2 "$OLLAMA_API" 2>/dev/null
}

wait_for_api() {
    local max=${1:-15}
    local i=0
    while [ "$i" -lt "$max" ]; do
        api_responds && return 0
        sleep 1
        i=$((i + 1))
    done
    return 1
}

ensure_ollama_user() {
    if id -u ollama >/dev/null 2>&1; then
        return 0
    fi
    echo ">>> Creating ollama system user"
    if ! getent group ollama >/dev/null 2>&1; then
        groupadd --system ollama || { echo "!!! groupadd failed"; return 1; }
    fi
    useradd --system \
            --shell /usr/sbin/nologin \
            --home-dir "$OLLAMA_HOME" \
            --no-create-home \
            --gid ollama \
            ollama \
        || { echo "!!! useradd failed"; return 1; }
    mkdir -p "$OLLAMA_HOME"
    chown ollama:ollama "$OLLAMA_HOME"
    chmod 755 "$OLLAMA_HOME"
    echo ">>> ollama user created"
}

stop_conflicting() {
    # Disable upstream's ollama.service if it exists (a previous install.sh
    # may have left one), so it doesn't fight ours for port 11434.
    if systemctl list-unit-files ollama.service >/dev/null 2>&1; then
        systemctl disable --now ollama.service >/dev/null 2>&1 || true
    fi
    pkill -x ollama 2>/dev/null
    sleep 1
}

bring_up_myai_unit() {
    systemctl daemon-reload >/dev/null 2>&1 || true
    systemctl reset-failed "$MYAI_UNIT" 2>/dev/null || true
    echo ">>> Enabling and starting $MYAI_UNIT"
    if ! systemctl enable --now "$MYAI_UNIT" 2>&1; then
        echo "!!! systemctl enable --now failed"
        systemctl --no-pager --lines=20 status "$MYAI_UNIT" 2>&1 || true
        return 1
    fi
    if wait_for_api 15; then
        echo ">>> Ollama API is responding on port 11434"
        return 0
    fi
    echo "!!! Ollama API did not respond"
    systemctl --no-pager --lines=20 status "$MYAI_UNIT" 2>&1 || true
    return 1
}

extract_tarball() {
    local tarball="$1"
    case "$tarball" in
        *.tar.zst|*.tzst)
            if ! command -v zstd >/dev/null 2>&1; then
                echo "!!! zstd is required to extract this archive"
                return 1
            fi
            zstd -d --stdout "$tarball" | tar -x -C /usr
            ;;
        *.tgz|*.tar.gz)
            tar -xzf "$tarball" -C /usr
            ;;
        *)
            echo "!!! Unknown tarball format: $tarball"
            return 1
            ;;
    esac
}

install_ollama() {
    # The server downloads the tarball (unprivileged), then invokes us
    # with the path. Restrict accepted paths so a compromised caller
    # can't ask us to untar an arbitrary archive into /usr.
    local tarball="${2:-}"
    case "$tarball" in
        /tmp/myai-ollama-download.*) ;;
        *) echo "!!! Tarball path must be /tmp/myai-ollama-download.*"; return 1 ;;
    esac
    if [ ! -f "$tarball" ]; then
        echo "!!! Tarball not found: $tarball"
        return 1
    fi

    echo ">>> Preparing ollama system user"
    ensure_ollama_user || return 1

    stop_conflicting

    # Remove any prior runtime layout so we don't leave orphaned files
    # from a different Ollama version. The binary at /usr/bin/ollama
    # gets overwritten by tar; /usr/lib/ollama may contain version-
    # specific helper libs that don't migrate cleanly.
    if [ -d /usr/lib/ollama ]; then
        echo ">>> Cleaning up previous /usr/lib/ollama"
        rm -rf /usr/lib/ollama
    fi

    echo ">>> Extracting Ollama into /usr"
    if ! extract_tarball "$tarball"; then
        echo "!!! Extraction failed"
        return 1
    fi
    rm -f "$tarball"

    if ! command -v ollama >/dev/null 2>&1; then
        echo "!!! ollama binary not found after extract"
        return 1
    fi
    echo ">>> Verified $(ollama --version 2>&1 | head -1)"

    bring_up_myai_unit
}

start_ollama() {
    ensure_ollama_user || return 1
    if ! command -v ollama >/dev/null 2>&1; then
        echo "!!! ollama binary not installed — run install first"
        return 1
    fi
    stop_conflicting
    bring_up_myai_unit
}

case "$action" in
    install-ollama)
        install_ollama "$@"
        ;;
    start-ollama)
        start_ollama
        ;;
    *)
        echo "Unknown action: $action" >&2
        exit 1
        ;;
esac
