diff --git a/Dockerfile b/Dockerfile index 34151a77..60a4f642 100644 --- a/Dockerfile +++ b/Dockerfile @@ -4,21 +4,28 @@ FROM python:3.11-slim-bullseye # Set the working directory WORKDIR /app -# Install Node.js based on platform -RUN apt-get update && apt-get install -y curl build-essential libffi-dev redis-server ca-certificates gnupg \ +# ------------------------------------------------------------------ +# 1. Install system packages and Node.js +# ------------------------------------------------------------------ +RUN apt-get update && apt-get install -y \ + curl build-essential libffi-dev redis-server ca-certificates gnupg \ && mkdir -p /etc/apt/keyrings \ && curl -fsSL https://deb.nodesource.com/gpgkey/nodesource-repo.gpg.key | gpg --dearmor -o /etc/apt/keyrings/nodesource.gpg \ && NODE_MAJOR=18 \ - && echo "deb [signed-by=/etc/apt/keyrings/nodesource.gpg] https://deb.nodesource.com/node_$NODE_MAJOR.x nodistro main" | tee /etc/apt/sources.list.d/nodesource.list \ + && echo "deb [signed-by=/etc/apt/keyrings/nodesource.gpg] https://deb.nodesource.com/node_$NODE_MAJOR.x nodistro main" \ + | tee /etc/apt/sources.list.d/nodesource.list \ && apt-get update \ && apt-get install -y nodejs -# Install Nim +# ------------------------------------------------------------------ +# 2. Install Nim from source +# ------------------------------------------------------------------ RUN apt-get update && \ - apt-get install -y curl xz-utils gcc openssl ca-certificates git # && + apt-get install -y curl xz-utils gcc openssl ca-certificates git RUN mkdir -p /opt/nim && \ - curl -L https://nim-lang.org/download/nim-2.2.0.tar.xz | tar -xJf - -C /opt/nim --strip-components=1 && \ + curl -L https://nim-lang.org/download/nim-2.2.2.tar.xz \ + | tar -xJf - -C /opt/nim --strip-components=1 && \ cd /opt/nim && \ sh build.sh && \ bin/nim c koch && \ @@ -27,26 +34,71 @@ RUN mkdir -p /opt/nim && \ ENV PATH="/opt/nim/bin:${PATH}" -RUN nim --version \ - nimble --version +RUN nim --version && nimble --version -# Copy the requirements file and install using pip +# ------------------------------------------------------------------ +# 3. Add BOTH arm64 + armhf architectures & install cross toolchains +# ------------------------------------------------------------------ +RUN dpkg --add-architecture arm64 \ + && dpkg --add-architecture armhf \ + && apt-get update \ + && apt-get install -y \ + crossbuild-essential-arm64 \ + crossbuild-essential-armhf \ + libc6-dev:arm64 \ + libc6-dev:armhf \ + libevdev-dev:arm64 \ + libevdev-dev:armhf \ + pkg-config \ + wget + +# ------------------------------------------------------------------ +# 4. Build + install liblgpio for ARM64 (aarch64) to DESTDIR +# so we don't overwrite the default /usr/local/lib +# ------------------------------------------------------------------ +# Build + install liblgpio for ARM64 +RUN mkdir -p /tmp/lgpio-arm64 && cd /tmp/lgpio-arm64 && \ + wget -q -O v0.2.2.tar.gz https://github.com/joan2937/lg/archive/refs/tags/v0.2.2.tar.gz && \ + tar -xzf v0.2.2.tar.gz && cd lg-0.2.2 && \ + make clean && make CROSS_PREFIX=aarch64-linux-gnu- && \ + make DESTDIR=/tmp/install-arm64 install && \ + # Remove any /usr/local/lib stuff (if placed) and move final libs + rm -f /usr/local/lib/liblg*.so* && \ + mkdir -p /usr/lib/aarch64-linux-gnu /usr/include/aarch64-linux-gnu && \ + cp /tmp/install-arm64/usr/local/lib/liblg*.so* /usr/lib/aarch64-linux-gnu/ && \ + cp /tmp/install-arm64/usr/local/include/lgpio.h /usr/include/aarch64-linux-gnu/ && \ + ldconfig && cd / && rm -rf /tmp/lgpio-arm64 /tmp/install-arm64 + +# ------------------------------------------------------------------ +# 5. Build + install liblgpio for ARMHF (arm-linux-gnueabihf) to DESTDIR +# ------------------------------------------------------------------ + RUN mkdir -p /tmp/lgpio-armhf && cd /tmp/lgpio-armhf && \ + wget -q -O v0.2.2.tar.gz https://github.com/joan2937/lg/archive/refs/tags/v0.2.2.tar.gz && \ + tar -xzf v0.2.2.tar.gz && cd lg-0.2.2 && \ + make clean && make CROSS_PREFIX=arm-linux-gnueabihf- && \ + make DESTDIR=/tmp/install-armhf install && \ + rm -f /usr/local/lib/liblg*.so* && \ + mkdir -p /usr/lib/arm-linux-gnueabihf /usr/include/arm-linux-gnueabihf && \ + cp /tmp/install-armhf/usr/local/lib/liblg*.so* /usr/lib/arm-linux-gnueabihf/ && \ + cp /tmp/install-armhf/usr/local/include/lgpio.h /usr/include/arm-linux-gnueabihf/ && \ + ldconfig && cd / && rm -rf /tmp/lgpio-armhf /tmp/install-armhf + +# ------------------------------------------------------------------ +# 6. Install Python dependencies +# ------------------------------------------------------------------ WORKDIR /app/backend COPY backend/requirements.txt . RUN pip3 install --upgrade uv \ && uv venv \ && uv pip install --no-cache-dir -r requirements.txt -# Change the working directory for npm install +# ------------------------------------------------------------------ +# 7. Install and build frontend +# ------------------------------------------------------------------ WORKDIR /tmp/frontend - -# Copy the npm configuration files COPY frontend/package.json frontend/package-lock.json /tmp/frontend/ - -# Install npm packages RUN npm install -# Copy frontend source files and run build COPY frontend/ ./ COPY version.json ../ RUN npm run build @@ -54,16 +106,20 @@ RUN npm run build # Delete all files except the dist and schema folders RUN find . -maxdepth 1 ! -name 'dist' ! -name 'schema' ! -name '.' ! -name '..' -exec rm -rf {} \; -# Cleanup node installations and build tools -RUN apt-get remove -y nodejs curl build-essential libffi-dev ca-certificates gnupg \ +# ------------------------------------------------------------------ +# 8. Clean up unneeded Nodejs & other packages +# (Keeping crossbuild-essential-* and build-essential) +# ------------------------------------------------------------------ +RUN apt-get remove -y nodejs curl libffi-dev ca-certificates gnupg \ && apt-get autoremove -y \ && apt-get clean \ && rm -rf /app/frontend/node_modules \ && rm -rf /var/lib/apt/lists/* /root/.npm -# Change back to the main directory +# ------------------------------------------------------------------ +# 9. Prepare Nim environment +# ------------------------------------------------------------------ WORKDIR /app/frameos - COPY frameos/frameos.nimble ./ COPY frameos/nimble.lock ./ COPY frameos/nim.cfg ./ @@ -71,12 +127,11 @@ COPY frameos/nim.cfg ./ # Cache nimble deps for when deploying on frame RUN nimble install -d -y && nimble setup -# Change back to the main directory +# ------------------------------------------------------------------ +# 10. Move final built frontend into /app +# ------------------------------------------------------------------ WORKDIR /app - -# Copy the rest of the application to the container COPY . . - RUN rm -rf /app/frontend && mv /tmp/frontend /app/ EXPOSE 8989 diff --git a/backend/app/tasks/deploy_frame.py b/backend/app/tasks/deploy_frame.py index 599d0f7d..5b4abd92 100644 --- a/backend/app/tasks/deploy_frame.py +++ b/backend/app/tasks/deploy_frame.py @@ -1,6 +1,5 @@ from datetime import datetime, timezone import json -import hashlib import os import random import re @@ -30,10 +29,21 @@ async def deploy_frame(id: int, redis: Redis): + """Queue a job to deploy a frame by ID.""" await redis.enqueue_job("deploy_frame", id=id) async def deploy_frame_task(ctx: dict[str, Any], id: int): + """ + Main deployment logic for building, packaging, and deploying + the Nim (FrameOS) application onto a target device via SSH. + Changes made: + 1) If cross-compiling, only the final `frameos` binary (and vendor if needed) + is uploaded, not the full C source code. + 2) Download minimal `libevdev.so.*` and `liblgpio.so.*` plus relevant headers + from the Pi to local sysroot so we can link the same version that the Pi has. + 3) If apt fails for `liblgpio-dev`, compile from source on the Pi. + """ db: Session = ctx['db'] redis: Redis = ctx['redis'] @@ -49,11 +59,10 @@ async def deploy_frame_task(ctx: dict[str, Any], id: int): if frame.status == 'deploying': raise Exception("Already deploying. Request again to force redeploy.") - frame_dict = frame.to_dict() # persisted as frame.last_successful_deploy if successful - if "last_successful_deploy" in frame_dict: - del frame_dict["last_successful_deploy"] - if "last_successful_deploy_at" in frame_dict: - del frame_dict["last_successful_deploy_at"] + # We do not want to persist these fields if successful. + frame_dict = frame.to_dict() + frame_dict.pop("last_successful_deploy", None) + frame_dict.pop("last_successful_deploy_at", None) frame.status = 'deploying' await update_frame(db, redis, frame) @@ -64,280 +73,316 @@ async def deploy_frame_task(ctx: dict[str, Any], id: int): nim_path = find_nim_v2() ssh = await get_ssh_connection(db, redis, frame) - async def install_if_necessary(pkg: str, raise_on_error=True) -> int: - return await exec_command( - db, redis, frame, ssh, - f"dpkg -l | grep -q \"^ii {pkg}\" || sudo apt-get install -y {pkg}", - raise_on_error=raise_on_error + # 1. Determine the remote CPU architecture + await log(db, redis, id, "stdout", "- Getting target architecture") + uname_output: list[str] = [] + await exec_command(db, redis, frame, ssh, "uname -m", uname_output) + arch = "".join(uname_output).strip() + cpu = get_target_cpu(arch) + + # For ARM Pi: pass extra march flags for ARMv6 or ARMv7 + pass_c_l_flags = "" + if arch == "armv6l": + pass_c_l_flags = "-march=armv6 -mfpu=vfp -mfloat-abi=hard -mtune=arm1176jzf-s -marm" + elif arch == "armv7l": + pass_c_l_flags = "-march=armv7-a -mfloat-abi=hard -mfpu=vfpv3 -mtune=cortex-a7 -marm" + + # 2. We will install needed dependencies on the Pi: + # build-essential is only needed if we end up *not* cross-compiling. + # But let's ensure the Pi can also run code that uses evdev, lgpio, etc. + # We'll also handle the possibility that `liblgpio-dev` is missing in apt. + await log(db, redis, id, "stdout", "- Installing required packages on the Pi (if available)") + # We'll do a helper function for apt installs: + pkgs = ["ntp", "libevdev-dev"] + # We do NOT add "build-essential" here by default. We'll do it conditionally if we need on-device build. + for pkg in pkgs: + await install_if_necessary(db, redis, frame, ssh, pkg, raise_on_error=False) + + # 2B. Try installing `liblgpio-dev`, if not found -> compile from source + rc = await install_if_necessary(db, redis, frame, ssh, "liblgpio-dev", raise_on_error=False) + if rc != 0: + # We'll do the same approach we used for waveshare: + await log(db, redis, id, "stdout", "--> Could not find liblgpio-dev. Installing from source.") + command = ( + "if [ ! -f /usr/local/include/lgpio.h ]; then " + " rm -rf /tmp/lgpio-install && " + " mkdir -p /tmp/lgpio-install && " + " cd /tmp/lgpio-install && " + " wget -q -O v0.2.2.tar.gz https://github.com/joan2937/lg/archive/refs/tags/v0.2.2.tar.gz && " + " tar -xzf v0.2.2.tar.gz && " + " cd lg-0.2.2 && " + " make && " + " sudo make install && " + " sudo rm -rf /tmp/lgpio-install; " + "fi" ) + await exec_command(db, redis, frame, ssh, command) + + # 2C. Scenes might require apt packages + all_deps = get_apt_dependencies_from_scenes(db, redis, frame) + for dep in all_deps: + await install_if_necessary(db, redis, frame, ssh, dep) + + # 3. Check if we can cross-compile. Otherwise we’ll compile on the device. + cross_compiler = get_cross_compiler_for_cpu(cpu) + do_cross_compile = False + if cross_compiler: + rc, _, _ = await exec_local_command(db, redis, frame, f"{cross_compiler} --version", generate_log=False) + if rc == 0: + do_cross_compile = True + + # 4. If do_cross_compile, fetch minimal libs+headers from Pi for local linking + # (we only need libevdev & liblgpio plus their includes). + local_sysroot_dir = None + if do_cross_compile: + await log(db, redis, id, "stdout", f"- Found cross-compiler '{cross_compiler}' for {cpu}") + # TODO: delete this later? preserve it? + local_sysroot_dir = os.path.join(tempfile.gettempdir(), f"sysroot_{frame.id}_{build_id}") + # local_sysroot_dir = os.path.abspath(f"./sysroot_{frame.id}_{build_id}") + if not os.path.exists(local_sysroot_dir): + os.makedirs(local_sysroot_dir, exist_ok=True) + + # 4A. Download the relevant .so libs from the Pi + # We'll store them in e.g. sysroot/usr/lib/arm-linux-gnueabihf + remote_libs_tar = f"/tmp/libs_{build_id}.tar.gz" + cmd = ( + f"sudo tar -czf {remote_libs_tar} " + f"/usr/lib/arm-linux-gnueabihf/libarmmem* " + f"/usr/lib/arm-linux-gnueabihf/libm.so* " + f"/usr/lib/arm-linux-gnueabihf/libd.so* " + f"/usr/lib/arm-linux-gnueabihf/libpthread.so* " + f"/usr/lib/arm-linux-gnueabihf/libc.so* " + f"/usr/lib/arm-linux-gnueabihf/liblgpio.so* " + "2>/dev/null || true" # just in case some file is missing + ) + await exec_command(db, redis, frame, ssh, cmd) + local_libs_tar = os.path.join(local_sysroot_dir, "libs.tar.gz") + await asyncssh.scp((ssh, remote_libs_tar), local_libs_tar) + # Clean up remote tar + await exec_command(db, redis, frame, ssh, f"sudo rm -f {remote_libs_tar}") + + # Extract to sysroot/usr/lib/arm-linux-gnueabihf + sysroot_lib_dir = os.path.join(local_sysroot_dir, "usr", "lib", "arm-linux-gnueabihf") + os.makedirs(sysroot_lib_dir, exist_ok=True) + shutil.unpack_archive(local_libs_tar, local_sysroot_dir) + os.remove(local_libs_tar) + + # 4B. Download relevant includes: often /usr/include/libevdev-1.0 & the lgpio.h + remote_inc_tar = f"/tmp/includes_{build_id}.tar.gz" + cmd = ( + f"sudo tar -czf {remote_inc_tar} " + f"/usr/include/libevdev-1.0 " + f"/usr/include/arm-linux-gnueabihf/lgpio.h " + f"/usr/local/include/lgpio.h " + "2>/dev/null || true" + ) + await exec_command(db, redis, frame, ssh, cmd) + local_inc_tar = os.path.join(local_sysroot_dir, "includes.tar.gz") + await asyncssh.scp((ssh, remote_inc_tar), local_inc_tar) + await exec_command(db, redis, frame, ssh, f"sudo rm -f {remote_inc_tar}") + # Extract them into local sysroot + shutil.unpack_archive(local_inc_tar, local_sysroot_dir) + os.remove(local_inc_tar) + + # 5. Possibly handle low memory Pi if we are building on-device + total_memory = 0 + try: + mem_output: list[str] = [] + await exec_command(db, redis, frame, ssh, "free -m", mem_output) + total_memory = int(mem_output[1].split()[1]) + except Exception as e: + await log(db, redis, id, "stderr", str(e)) + low_memory = (total_memory < 512) - with tempfile.TemporaryDirectory() as temp_dir: - await log(db, redis, id, "stdout", "- Getting target architecture") - uname_output: list[str] = [] - await exec_command(db, redis, frame, ssh, "uname -m", uname_output) - arch = "".join(uname_output).strip() - if arch in ("aarch64", "arm64"): - cpu = "arm64" - elif arch in ("armv6l", "armv7l"): - cpu = "arm" - elif arch == "i386": - cpu = "i386" - else: - cpu = "amd64" - - total_memory = 0 - try: - mem_output: list[str] = [] - await exec_command(db, redis, frame, ssh, "free -m", mem_output) - total_memory = int(mem_output[1].split()[1]) # line 1 => "Mem: ... 991 ..." - except Exception as e: - await log(db, redis, id, "stderr", str(e)) - low_memory = total_memory < 512 + if not do_cross_compile: + # We may need to compile on the Pi + await install_if_necessary(db, redis, frame, ssh, "build-essential") + if low_memory: + await log(db, redis, id, "stdout", "- Low memory device, stopping FrameOS for compilation") + await exec_command(db, redis, frame, ssh, "sudo service frameos stop", raise_on_error=False) - drivers = drivers_for_frame(frame) + # 6. Generate Nim -> C code locally and optionally cross-compile + drivers = drivers_for_frame(frame) + with tempfile.TemporaryDirectory() as temp_dir: + await log(db, redis, id, "stdout", "- Creating local Nim build (C sources)") - # 1. Create build tar.gz locally - await log(db, redis, id, "stdout", "- Copying build folders") build_dir, source_dir = create_build_folders(temp_dir, build_id) - await log(db, redis, id, "stdout", "- Applying local modifications") await make_local_modifications(db, redis, frame, source_dir) - await log(db, redis, id, "stdout", "- Creating build archive") - archive_path = await create_local_build_archive( - db, redis, frame, build_dir, build_id, nim_path, source_dir, temp_dir, cpu - ) - if low_memory: - await log(db, redis, id, "stdout", "- Low memory device, stopping FrameOS for compilation") - await exec_command(db, redis, frame, ssh, "sudo service frameos stop", raise_on_error=False) + # Just produce C code + Makefile + c_archive_path = await create_local_build_archive( + db, redis, frame, + build_dir, build_id, nim_path, source_dir, temp_dir, cpu, + pass_c_l_flags, + do_cross_compile + ) - # 2. Remote steps - await install_if_necessary("ntp") - await install_if_necessary("build-essential") + frameos_binary_path = os.path.join(build_dir, "frameos") - if drivers.get("evdev"): - await install_if_necessary("libevdev-dev") + if do_cross_compile and local_sysroot_dir: + # 6A. Actually compile locally with cross_compiler + await log(db, redis, id, "stdout", "- Cross compiling `frameos` with the Pi's libraries + headers") - if drivers.get("waveshare") or drivers.get("gpioButton"): - check_lgpio = await exec_command( + # Provide CFLAGS with path to local sysroot + sysroot_flags = ( + f"--sysroot={local_sysroot_dir} " + f"-I{local_sysroot_dir}/usr/include " + f"-L{local_sysroot_dir}/usr/lib/arm-linux-gnueabihf " + ) + # We also apply our pass_c_l_flags (-march=...) + # plus the libraries Nim might link: -levdev -llgpio + make_cmd = ( + f"cd {build_dir} && make clean && " + f"make -j$(nproc) CC={cross_compiler} " + f"\"SYSROOT={sysroot_flags}\" " + ) + status, _, _ = await exec_local_command(db, redis, frame, make_cmd) + if status != 0: + raise Exception("Cross-compilation with sysroot failed.") + else: + # 6B. On-device compile approach + await exec_command(db, redis, frame, ssh, "mkdir -p /srv/frameos/build/ /srv/frameos/logs/") + await log(db, redis, id, "stdout", f"> add /srv/frameos/build/build_{build_id}.tar.gz") + + # Upload the entire C code tar to compile on Pi + await asyncssh.scp( + c_archive_path, + (ssh, f"/srv/frameos/build/build_{build_id}.tar.gz"), + recurse=False + ) + await exec_command( db, redis, frame, ssh, - '[[ -f "/usr/local/include/lgpio.h" || -f "/usr/include/lgpio.h" ]] && exit 0 || exit 1', - raise_on_error=False + f"cd /srv/frameos/build && tar -xzf build_{build_id}.tar.gz && rm build_{build_id}.tar.gz" ) - if check_lgpio != 0: - # Try installing liblgpio-dev - if await install_if_necessary("liblgpio-dev", raise_on_error=False) != 0: - await log(db, redis, id, "stdout", - "--> Could not find liblgpio-dev. Installing from source.") - command = ( - "if [ ! -f /usr/local/include/lgpio.h ]; then " - " rm -rf /tmp/lgpio-install && " - " mkdir -p /tmp/lgpio-install && " - " cd /tmp/lgpio-install && " - " wget -q -O v0.2.2.tar.gz https://github.com/joan2937/lg/archive/refs/tags/v0.2.2.tar.gz && " - " tar -xzf v0.2.2.tar.gz && " - " cd lg-0.2.2 && " - " make && " - " sudo make install && " - " sudo rm -rf /tmp/lgpio-install; " - "fi" - ) - await exec_command(db, redis, frame, ssh, command) - - # Any app dependencies - all_deps = set() - for scene in frame.scenes: - try: - for node in scene.get('nodes', []): - try: - config: Optional[dict[str, str]] = None - if node.get('type') == 'app': - app = node.get('data', {}).get('keyword') - if app: - json_config = get_one_app_sources(app).get('config.json') - if json_config: - config = json.loads(json_config) - if node.get('type') == 'source': - json_config = node.get('sources', {}).get('config.json') - if json_config: - config = json.loads(json_config) - if config: - if config.get('apt'): - for dep in config['apt']: - all_deps.add(dep) - except Exception as e: - await log(db, redis, id, "stderr", f"Error parsing node: {e}") - except Exception as e: - await log(db, redis, id, "stderr", f"Error parsing scene: {e}") - for dep in all_deps: - await install_if_necessary(dep) - - # Ensure /srv/frameos - await exec_command(db, redis, frame, ssh, - "if [ ! -d /srv/frameos/ ]; then " - " sudo mkdir -p /srv/frameos/ && sudo chown $(whoami):$(whoami) /srv/frameos/; " - "fi") - - await exec_command(db, redis, frame, ssh, "mkdir -p /srv/frameos/build/ /srv/frameos/logs/") - await log(db, redis, id, "stdout", f"> add /srv/frameos/build/build_{build_id}.tar.gz") - - # 3. Upload the local tarball - await asyncssh.scp( - archive_path, - (ssh, f"/srv/frameos/build/build_{build_id}.tar.gz"), - recurse=False - ) + compile_cmd = ( + f"cd /srv/frameos/build/build_{build_id} && " + "PARALLEL_MEM=$(awk '/MemTotal/{printf \"%.0f\\n\", $2/1024/250}' /proc/meminfo) && " + "PARALLEL=$(($PARALLEL_MEM < $(nproc) ? $PARALLEL_MEM : $(nproc))) && " + "make -j$PARALLEL" + ) + await exec_command(db, redis, frame, ssh, compile_cmd) - # Unpack & compile on device - await exec_command(db, redis, frame, ssh, - f"cd /srv/frameos/build && tar -xzf build_{build_id}.tar.gz && rm build_{build_id}.tar.gz") - await exec_command(db, redis, frame, ssh, - f"cd /srv/frameos/build/build_{build_id} && " - "PARALLEL_MEM=$(awk '/MemTotal/{printf \"%.0f\\n\", $2/1024/250}' /proc/meminfo) && " - "PARALLEL=$(($PARALLEL_MEM < $(nproc) ? $PARALLEL_MEM : $(nproc))) && " - "make -j$PARALLEL") + # 7. Upload final `frameos` executable (if cross-compiled), plus vendor if needed + release_path = f"/srv/frameos/releases/release_{build_id}" + if do_cross_compile: + # We skip uploading the entire build_{build_id} folder. Just upload the `frameos`. + await exec_command(db, redis, frame, ssh, + f"mkdir -p {release_path}") + # TODO: compress + await asyncssh.scp( + frameos_binary_path, + (ssh, f"{release_path}/frameos"), + recurse=False + ) + # If there's vendor code (e.g. inky) we still need to copy that to the Pi, + # because e.g. the Python environment is needed at runtime. + vendor_tar = None + if requires_vendor_upload(drivers): + vendor_tar = os.path.join(temp_dir, f"vendor_{build_id}.tar.gz") + vendor_folder_temp = os.path.join(temp_dir, "vendor") + os.makedirs(vendor_folder_temp, exist_ok=True) + copy_vendor_folders(drivers, vendor_folder_temp) + shutil.make_archive( + base_name=os.path.join(temp_dir, f"vendor_{build_id}"), + format='gztar', + root_dir=temp_dir, + base_dir="vendor" + ) + await exec_command(db, redis, frame, ssh, "mkdir -p /srv/frameos/build/vendor_temp") + await asyncssh.scp(vendor_tar, + (ssh, f"/srv/frameos/build/vendor_temp/vendor_{build_id}.tar.gz"), + recurse=False) + await exec_command( + db, redis, frame, ssh, + f"cd /srv/frameos/build/vendor_temp && " + f"tar -xzf vendor_{build_id}.tar.gz && rm vendor_{build_id}.tar.gz" + ) + # Then we can move that vendor code to the new release + await exec_command( + db, redis, frame, ssh, + "mkdir -p /srv/frameos/vendor && " + "cp -r /srv/frameos/build/vendor_temp/vendor/* /srv/frameos/vendor/" + ) + await exec_command(db, redis, frame, ssh, "rm -rf /srv/frameos/build/vendor_temp") - await exec_command(db, redis, frame, ssh, f"mkdir -p /srv/frameos/releases/release_{build_id}") - await exec_command(db, redis, frame, ssh, - f"cp /srv/frameos/build/build_{build_id}/frameos " - f"/srv/frameos/releases/release_{build_id}/frameos") + else: + # We compiled on the Pi. The final binary is at /srv/frameos/build/build_{build_id}/frameos + await exec_command(db, redis, frame, ssh, f"mkdir -p {release_path}") + await exec_command( + db, redis, frame, ssh, + f"cp /srv/frameos/build/build_{build_id}/frameos {release_path}/frameos" + ) - # 4. Upload frame.json using a TEMP FILE approach + # 8. Upload frame.json frame_json_data = (json.dumps(get_frame_json(db, frame), indent=4) + "\n").encode('utf-8') with tempfile.NamedTemporaryFile(suffix=".json", delete=False) as tmpf: local_json_path = tmpf.name tmpf.write(frame_json_data) - await asyncssh.scp( - local_json_path, (ssh, f"/srv/frameos/releases/release_{build_id}/frame.json"), - recurse=False - ) - os.remove(local_json_path) # remove local temp file - await log(db, redis, id, "stdout", f"> add /srv/frameos/releases/release_{build_id}/frame.json") - # Driver-specific vendor steps - if inkyPython := drivers.get("inkyPython"): - await exec_command(db, redis, frame, ssh, - f"mkdir -p /srv/frameos/vendor && " - f"cp -r /srv/frameos/build/build_{build_id}/vendor/inkyPython /srv/frameos/vendor/") - await install_if_necessary("python3-pip") - await install_if_necessary("python3-venv") - await exec_command(db, redis, frame, ssh, - f"cd /srv/frameos/vendor/{inkyPython.vendor_folder} && " - "([ ! -d env ] && python3 -m venv env || echo 'env exists') && " - "(sha256sum -c requirements.txt.sha256sum 2>/dev/null || " - "(echo '> env/bin/pip3 install -r requirements.txt' && " - "env/bin/pip3 install -r requirements.txt && " - "sha256sum requirements.txt > requirements.txt.sha256sum))") - - if inkyHyperPixel2r := drivers.get("inkyHyperPixel2r"): - await exec_command(db, redis, frame, ssh, - f"mkdir -p /srv/frameos/vendor && " - f"cp -r /srv/frameos/build/build_{build_id}/vendor/inkyHyperPixel2r /srv/frameos/vendor/") - await install_if_necessary("python3-dev") - await install_if_necessary("python3-pip") - await install_if_necessary("python3-venv") - await exec_command(db, redis, frame, ssh, - f"cd /srv/frameos/vendor/{inkyHyperPixel2r.vendor_folder} && " - "([ ! -d env ] && python3 -m venv env || echo 'env exists') && " - "(sha256sum -c requirements.txt.sha256sum 2>/dev/null || " - "(echo '> env/bin/pip3 install -r requirements.txt' && " - "env/bin/pip3 install -r requirements.txt && " - "sha256sum requirements.txt > requirements.txt.sha256sum))") - - # 5. Upload frameos.service with a TEMP FILE approach - with open("../frameos/frameos.service", "r") as f: - service_contents = f.read().replace("%I", frame.ssh_user) - service_data = service_contents.encode('utf-8') - with tempfile.NamedTemporaryFile(suffix=".service", delete=False) as tmpservice: - local_service_path = tmpservice.name - tmpservice.write(service_data) await asyncssh.scp( - local_service_path, - (ssh, f"/srv/frameos/releases/release_{build_id}/frameos.service"), + local_json_path, + (ssh, f"{release_path}/frame.json"), recurse=False ) - os.remove(local_service_path) + os.remove(local_json_path) + await log(db, redis, id, "stdout", f"> add {release_path}/frame.json") - await exec_command(db, redis, frame, ssh, - f"mkdir -p /srv/frameos/state && ln -s /srv/frameos/state " - f"/srv/frameos/releases/release_{build_id}/state") - await exec_command(db, redis, frame, ssh, - f"sudo cp /srv/frameos/releases/release_{build_id}/frameos.service " - f"/etc/systemd/system/frameos.service") - await exec_command(db, redis, frame, ssh, "sudo chown root:root /etc/systemd/system/frameos.service") - await exec_command(db, redis, frame, ssh, "sudo chmod 644 /etc/systemd/system/frameos.service") - - # 6. Link new release - await exec_command(db, redis, frame, ssh, - f"rm -rf /srv/frameos/current && " - f"ln -s /srv/frameos/releases/release_{build_id} /srv/frameos/current") + # 9. If inky vendor, set up Python venv on the Pi + await install_inky_vendors(db, redis, frame, ssh, build_id, drivers) - # Figure out the difference between /srv/assets and the local assets folder - await sync_assets(db, redis, frame, ssh) + # Clean old builds if we did on-device compile + if not do_cross_compile: + await exec_command(db, redis, frame, ssh, + "cd /srv/frameos/build && ls -dt1 build_* | tail -n +11 | xargs rm -rf") + await exec_command(db, redis, frame, ssh, + "cd /srv/frameos/build/cache && " + "find . -type f \\( -atime +0 -a -mtime +0 \\) | xargs rm -rf") - # Clean old builds - await exec_command(db, redis, frame, ssh, - "cd /srv/frameos/build && ls -dt1 build_* | tail -n +11 | xargs rm -rf") - await exec_command(db, redis, frame, ssh, - "cd /srv/frameos/build/cache && find . -type f \\( -atime +0 -a -mtime +0 \\) | xargs rm -rf") + # We also remove old releases, except the current symlink await exec_command(db, redis, frame, ssh, "cd /srv/frameos/releases && " "ls -dt1 release_* | grep -v \"$(basename $(readlink ../current))\" " "| tail -n +11 | xargs rm -rf") - boot_config = "/boot/config.txt" - if await exec_command(db, redis, frame, ssh, "test -f /boot/firmware/config.txt", raise_on_error=False) == 0: - boot_config = "/boot/firmware/config.txt" + # 10. systemd service, link new release + with open("../frameos/frameos.service", "r") as f: + service_contents = f.read().replace("%I", frame.ssh_user) + service_data = service_contents.encode('utf-8') + with tempfile.NamedTemporaryFile(suffix=".service", delete=False) as tmpservice: + local_service_path = tmpservice.name + tmpservice.write(service_data) + await asyncssh.scp( + local_service_path, + (ssh, f"{release_path}/frameos.service"), + recurse=False + ) + os.remove(local_service_path) - # Additional device config - if drivers.get("i2c"): - await exec_command(db, redis, frame, ssh, - 'grep -q "^dtparam=i2c_vc=on$" ' + boot_config + ' ' - '|| echo "dtparam=i2c_vc=on" | sudo tee -a ' + boot_config) - await exec_command(db, redis, frame, ssh, - 'command -v raspi-config > /dev/null && ' - 'sudo raspi-config nonint get_i2c | grep -q "1" && { ' - ' sudo raspi-config nonint do_i2c 0; echo "I2C enabled"; ' - '} || echo "I2C already enabled"') - - if drivers.get("spi"): - await exec_command(db, redis, frame, ssh, 'sudo raspi-config nonint do_spi 0') - elif drivers.get("noSpi"): - await exec_command(db, redis, frame, ssh, 'sudo raspi-config nonint do_spi 1') - - if low_memory: - await exec_command( - db, redis, frame, ssh, - "sudo systemctl mask apt-daily-upgrade && " - "sudo systemctl mask apt-daily && " - "sudo systemctl disable apt-daily.service apt-daily.timer apt-daily-upgrade.timer apt-daily-upgrade.service" - ) + await exec_command(db, redis, frame, ssh, + f"mkdir -p /srv/frameos/state && ln -s /srv/frameos/state {release_path}/state") + await exec_command(db, redis, frame, ssh, + f"sudo cp {release_path}/frameos.service /etc/systemd/system/frameos.service") + await exec_command(db, redis, frame, ssh, "sudo chown root:root /etc/systemd/system/frameos.service") + await exec_command(db, redis, frame, ssh, "sudo chmod 644 /etc/systemd/system/frameos.service") - if frame.reboot and frame.reboot.get('enabled') == 'true': - cron_schedule = frame.reboot.get('crontab', '0 0 * * *') - if frame.reboot.get('type') == 'raspberry': - crontab = f"{cron_schedule} root /sbin/shutdown -r now" - else: - crontab = f"{cron_schedule} root systemctl restart frameos.service" - await exec_command(db, redis, frame, ssh, - f"echo '{crontab}' | sudo tee /etc/cron.d/frameos-reboot") - else: - await exec_command(db, redis, frame, ssh, "sudo rm -f /etc/cron.d/frameos-reboot") + await exec_command(db, redis, frame, ssh, + f"rm -rf /srv/frameos/current && ln -s {release_path} /srv/frameos/current") - must_reboot = False - if drivers.get("bootconfig"): - for line in drivers["bootconfig"].lines: - if await exec_command(db, redis, frame, ssh, - f'grep -q "^{line}" ' + boot_config, raise_on_error=False) != 0: - await exec_command(db, redis, frame, ssh, command=f'echo "{line}" | sudo tee -a ' + boot_config, log_output=False) - must_reboot = True + # 11. Sync assets + await sync_assets(db, redis, frame, ssh) - await exec_command(db, redis, frame, ssh, "sudo systemctl daemon-reload") - await exec_command(db, redis, frame, ssh, "sudo systemctl enable frameos.service") + # 12. Additional config (SPI, I2C, apt timers, etc.) + await handle_additional_device_config(db, redis, frame, ssh, arch, drivers) frame.status = 'starting' frame.last_successful_deploy = frame_dict frame.last_successful_deploy_at = datetime.now(timezone.utc) + # Possibly reboot if bootconfig lines changed + must_reboot = drivers.get("bootconfig") and drivers["bootconfig"].needs_reboot + await exec_command(db, redis, frame, ssh, "sudo systemctl daemon-reload") + await exec_command(db, redis, frame, ssh, "sudo systemctl enable frameos.service") + if must_reboot: await update_frame(db, redis, frame) await log(db, redis, int(frame.id), "stdinfo", "Deployed! Rebooting device after boot config changes") @@ -357,17 +402,148 @@ async def install_if_necessary(pkg: str, raise_on_error=True) -> int: await remove_ssh_connection(db, redis, ssh, frame) +# --------------------------------------------------------------------- +# Helpers +# --------------------------------------------------------------------- + +async def install_if_necessary(db: Session, redis: Redis, frame: Frame, ssh, pkg: str, raise_on_error=True) -> int: + """ + Installs package `pkg` on the remote device if it's not already installed. + Return code is from `exec_command`. + """ + cmd = f"dpkg -l | grep -q \"^ii {pkg}\" || sudo apt-get install -y {pkg}" + return await exec_command(db, redis, frame, ssh, cmd, raise_on_error=raise_on_error) + + +def get_apt_dependencies_from_scenes(db: Session, redis: Redis, frame: Frame) -> set[str]: + """ + Examine each scene's config for 'apt' dependencies in config.json + and collect them all in a set. + """ + all_deps = set() + for scene in frame.scenes: + try: + for node in scene.get('nodes', []): + try: + config: Optional[dict[str, Any]] = None + if node.get('type') == 'app': + app = node.get('data', {}).get('keyword') + if app: + json_config = get_one_app_sources(app).get('config.json') + if json_config: + config = json.loads(json_config) + elif node.get('type') == 'source': + json_config = node.get('sources', {}).get('config.json') + if json_config: + config = json.loads(json_config) + if config and config.get('apt'): + for dep in config['apt']: + all_deps.add(dep) + except Exception: + pass + except Exception: + pass + return all_deps + + +def get_target_cpu(arch: str) -> str: + """ + Map 'uname -m' output to something Nim expects in --cpu + and that we can match with cross compilers. + """ + if arch in ("aarch64", "arm64"): + return "arm64" + elif arch in ("armv6l", "armv7l"): + return "arm" + elif arch == "i386": + return "i386" + # Fallback + return "amd64" + + +def get_cross_compiler_for_cpu(cpu: str) -> Optional[str]: + """ + Return the cross-compiler command for a given CPU, + or None if there's no well-known cross-compiler for that CPU. + For Pi Zero/1 (ARMv6) or Pi 2/3 (ARMv7) we guess 'arm-linux-gnueabihf-gcc'. + For 64-bit Pi: 'aarch64-linux-gnu-gcc'. + """ + if cpu == "arm64": + return "aarch64-linux-gnu-gcc" + elif cpu == "arm": + return "arm-linux-gnueabihf-gcc" + return None + + def find_nim_v2(): + """ + Locate a Nim executable >= 2.0.0. + Raises an exception if not found or if version < 2.0.0. + """ nim_path = find_nim_executable() if not nim_path: raise Exception("Nim executable not found") - nim_version = get_nim_version(nim_path) - if not nim_version or nim_version < version.parse("2.0.0"): + nim_ver = get_nim_version(nim_path) + if not nim_ver or nim_ver < version.parse("2.0.0"): raise Exception("Nim 2.0.0 or higher is required") return nim_path +def find_nim_executable(): + common_paths = { + 'Windows': [ + 'C:\\Program Files\\Nim\\bin\\nim.exe', + 'C:\\Nim\\bin\\nim.exe' + ], + 'Darwin': [ + '/opt/homebrew/bin/nim', + '/usr/local/bin/nim' + ], + 'Linux': [ + '/usr/bin/nim', + '/usr/local/bin/nim', + '/opt/nim/bin/nim', + ] + } + # If nim is in the PATH + if is_executable_in_path('nim'): + return 'nim' + os_type = platform.system() + for path in common_paths.get(os_type, []): + if os.path.isfile(path) and os.access(path, os.X_OK): + return path + return None + + +def is_executable_in_path(executable: str): + try: + subprocess.run([executable, '--version'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) + return True + except FileNotFoundError: + return False + + +def get_nim_version(executable_path: str): + try: + result = subprocess.run([executable_path, '--version'], + stdout=subprocess.PIPE, stderr=subprocess.PIPE, + text=True) + output = result.stdout.split('\n')[0] + parts = output.split() + for p in parts: + if re.match(r'^\d+(\.\d+){1,2}', p): + return version.parse(p) + return None + except Exception as e: + print(f"Error getting Nim version: {e}") + return None + + def create_build_folders(temp_dir, build_id): + """ + Create local build directories to store Nim source + build artifacts. + Returns (build_dir, source_dir). + """ build_dir = os.path.join(temp_dir, f"build_{build_id}") source_dir = os.path.join(temp_dir, "frameos") os.makedirs(source_dir, exist_ok=True) @@ -378,6 +554,10 @@ def create_build_folders(temp_dir, build_id): async def make_local_modifications(db: Session, redis: Redis, frame: Frame, source_dir: str): + """ + Write out scene, app, driver code into the Nim sources + according to the current frame config. + """ shutil.rmtree(os.path.join(source_dir, "src", "scenes"), ignore_errors=True) os.makedirs(os.path.join(source_dir, "src", "scenes"), exist_ok=True) @@ -414,6 +594,7 @@ async def make_local_modifications(db: Session, redis: Redis, if frame.debug: await log(db, redis, int(frame.id), "stdout", f"Generated drivers.nim:\n{source}") + # Waveshare driver code (if needed) if drivers.get("waveshare"): with open(os.path.join(source_dir, "src", "drivers", "waveshare", "driver.nim"), "w") as wf: source = write_waveshare_driver_nim(drivers) @@ -422,19 +603,6 @@ async def make_local_modifications(db: Session, redis: Redis, await log(db, redis, int(frame.id), "stdout", f"Generated waveshare driver:\n{source}") -def compile_line_md5(input_str: str) -> str: - words = [] - ignore_next = False - for word in input_str.split(' '): - if word == '-I': - ignore_next = True - elif ignore_next or word.startswith("-I"): - pass - else: - words.append(word) - return hashlib.md5(" ".join(words).encode()).hexdigest() - - async def create_local_build_archive( db: Session, redis: Redis, @@ -444,44 +612,54 @@ async def create_local_build_archive( nim_path: str, source_dir: str, temp_dir: str, - cpu: str -): + cpu: str, + pass_c_l_flags: str = "", + do_cross_compile: bool = False +) -> str: + """ + Run Nim to generate the C files (and Makefile scaffolding), + then create a tar.gz of the build directory. + Returns path to the .tar.gz. + """ drivers = drivers_for_frame(frame) + # Copy vendor folder(s) if needed for e.g. Inky if inkyPython := drivers.get('inkyPython'): vendor_folder = inkyPython.vendor_folder or "" os.makedirs(os.path.join(build_dir, "vendor"), exist_ok=True) - shutil.copytree( - f"../frameos/vendor/{vendor_folder}/", - os.path.join(build_dir, "vendor", vendor_folder), - dirs_exist_ok=True - ) + local_from = f"../frameos/vendor/{vendor_folder}/" + shutil.copytree(local_from, + os.path.join(build_dir, "vendor", vendor_folder), + dirs_exist_ok=True) shutil.rmtree(os.path.join(build_dir, "vendor", vendor_folder, "env"), ignore_errors=True) shutil.rmtree(os.path.join(build_dir, "vendor", vendor_folder, "__pycache__"), ignore_errors=True) if inkyHyperPixel2r := drivers.get('inkyHyperPixel2r'): vendor_folder = inkyHyperPixel2r.vendor_folder or "" os.makedirs(os.path.join(build_dir, "vendor"), exist_ok=True) - shutil.copytree( - f"../frameos/vendor/{vendor_folder}/", - os.path.join(build_dir, "vendor", vendor_folder), - dirs_exist_ok=True - ) + local_from = f"../frameos/vendor/{vendor_folder}/" + shutil.copytree(local_from, + os.path.join(build_dir, "vendor", vendor_folder), + dirs_exist_ok=True) shutil.rmtree(os.path.join(build_dir, "vendor", vendor_folder, "env"), ignore_errors=True) shutil.rmtree(os.path.join(build_dir, "vendor", vendor_folder, "__pycache__"), ignore_errors=True) - await log(db, redis, int(frame.id), "stdout", - "- No cross compilation. Generating source code for compilation on frame.") + await log(db, redis, int(frame.id), "stdout", "- Generating Nim => C code for compilation.") debug_options = "--lineTrace:on" if frame.debug else "" + extra_passes = "" + if pass_c_l_flags: + extra_passes = f'--passC:"{pass_c_l_flags}" --passL:"{pass_c_l_flags}"' + cmd = ( f"cd {source_dir} && nimble assets -y && nimble setup && " f"{nim_path} compile --os:linux --cpu:{cpu} " f"--compileOnly --genScript --nimcache:{build_dir} " - f"{debug_options} src/frameos.nim 2>&1" + f"{debug_options} {extra_passes} src/frameos.nim 2>&1" ) status, out, err = await exec_local_command(db, redis, frame, cmd) if status != 0: + # Attempt to parse any relevant final line for error location lines = (out or "").split("\n") filtered = [ln for ln in lines if ln.strip()] if filtered: @@ -499,22 +677,23 @@ async def create_local_build_archive( all_lines = of.readlines() await log(db, redis, int(frame.id), "stdout", f"Error in {rel_fn}:{line_nr}:{column}") - await log(db, redis, int(frame.id), "stdout", - f"Line {line_nr}: {all_lines[line_nr - 1]}") - await log(db, redis, int(frame.id), "stdout", - f".......{'.'*(column - 1 + len(str(line_nr)))}^") + if 0 < line_nr <= len(all_lines): + line_text = all_lines[line_nr - 1] + await log(db, redis, int(frame.id), "stdout", f"Line {line_nr}: {line_text}") + caret_prefix = "......." + ('.' * (column - 1 + len(str(line_nr)))) + await log(db, redis, int(frame.id), "stdout", f"{caret_prefix}^") else: await log(db, redis, int(frame.id), "stdout", f"Error in {fn}:{line_nr}:{column}") - raise Exception("Failed to generate frameos sources") + # Copy nimbase.h into build_dir nimbase_path = find_nimbase_file(nim_path) if not nimbase_path: raise Exception("nimbase.h not found") - shutil.copy(nimbase_path, os.path.join(build_dir, "nimbase.h")) + # Waveshare variant? if waveshare := drivers.get('waveshare'): if waveshare.variant: variant_folder = get_variant_folder(waveshare.variant) @@ -524,8 +703,8 @@ async def create_local_build_archive( os.path.join(source_dir, "src", "drivers", "waveshare", variant_folder, uf), os.path.join(build_dir, uf) ) - - # color e-paper variants + # color e-paper variants need bc-based filenames + # e.g. EPD_2in9b -> EPD_2in9bc.(c/h) if waveshare.variant in [ "EPD_2in9b", "EPD_2in9c", "EPD_2in13b", "EPD_2in13c", "EPD_4in2b", "EPD_4in2c", "EPD_5in83b", "EPD_5in83c", @@ -534,100 +713,70 @@ async def create_local_build_archive( c_file = re.sub(r'[bc]', 'bc', waveshare.variant) variant_files = [f"{waveshare.variant}.nim", f"{c_file}.c", f"{c_file}.h"] else: - variant_files = [f"{waveshare.variant}.nim", f"{waveshare.variant}.c", f"{waveshare.variant}.h"] - + variant_files = [ + f"{waveshare.variant}.nim", + f"{waveshare.variant}.c", + f"{waveshare.variant}.h" + ] for vf in variant_files: shutil.copy( os.path.join(source_dir, "src", "drivers", "waveshare", variant_folder, vf), os.path.join(build_dir, vf) ) + # Generate the final Makefile with open(os.path.join(build_dir, "Makefile"), "w") as mk: script_path = os.path.join(build_dir, "compile_frameos.sh") linker_flags = ["-pthread", "-lm", "-lrt", "-ldl"] compiler_flags: list[str] = [] - with open(script_path, "r") as sc: - lines_sc = sc.readlines() - for line in lines_sc: - if " -o frameos " in line and " -l" in line: - linker_flags = [ - fl.strip() for fl in line.split(' ') - if fl.startswith('-') and fl != '-o' - ] - elif " -c " in line and not compiler_flags: - compiler_flags = [ - fl for fl in line.split(' ') - if fl.startswith('-') and not fl.startswith('-I') - and fl not in ['-o', '-c', '-D'] - ] - + if os.path.isfile(script_path): + with open(script_path, "r") as sc: + lines_sc = sc.readlines() + for line in lines_sc: + if " -o frameos " in line and " -l" in line: + # This line typically has -o frameos -lpthread -lm etc. + linker_flags = [ + fl.strip() for fl in line.split(' ') + if fl.startswith('-') and fl != '-o' + ] + elif " -c " in line and not compiler_flags: + # Nim's compile command for each .c + compiler_flags = [ + fl for fl in line.split(' ') + if fl.startswith('-') and not fl.startswith('-I') + and fl not in ['-o', '-c', '-D'] + ] + + if do_cross_compile: + if cpu == "arm": + linker_flags += ["-L/usr/lib/arm-linux-gnueabihf"] + compiler_flags += ["-I/usr/include/arm-linux-gnueabihf"] + elif cpu == "arm64": + linker_flags += ["-L/usr/lib/aarch64-linux-gnu"] + compiler_flags += ["-I/usr/include/aarch64-linux-gnu"] + + # Base Makefile template with open(os.path.join(source_dir, "tools", "nimc.Makefile"), "r") as mf_in: lines_make = mf_in.readlines() for ln in lines_make: if ln.startswith("LIBS = "): - ln = "LIBS = -L. " + " ".join(linker_flags) + "\n" + ln = ("LIBS = -L. " + " ".join(linker_flags) + "\n") if ln.startswith("CFLAGS = "): - ln = "CFLAGS = " + " ".join([f for f in compiler_flags if f != '-c']) + "\n" + cf = [f for f in compiler_flags if f != '-c'] + ln = "CFLAGS = " + " ".join(cf) + "\n" mk.write(ln) + # Make a tar of the entire build_dir archive_path = os.path.join(temp_dir, f"build_{build_id}.tar.gz") zip_base = os.path.join(temp_dir, f"build_{build_id}") shutil.make_archive(zip_base, 'gztar', temp_dir, f"build_{build_id}") return archive_path -def find_nim_executable(): - common_paths = { - 'Windows': [ - 'C:\\Program Files\\Nim\\bin\\nim.exe', - 'C:\\Nim\\bin\\nim.exe' - ], - 'Darwin': [ - '/opt/homebrew/bin/nim', - '/usr/local/bin/nim' - ], - 'Linux': [ - '/usr/bin/nim', - '/usr/local/bin/nim', - '/opt/nim/bin/nim', - ] - } - - if is_executable_in_path('nim'): - return 'nim' - - os_type = platform.system() - for path in common_paths.get(os_type, []): - if os.path.isfile(path) and os.access(path, os.X_OK): - return path - return None - - -def is_executable_in_path(executable: str): - try: - subprocess.run([executable, '--version'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) - return True - except FileNotFoundError: - return False - - -def get_nim_version(executable_path: str): - try: - result = subprocess.run([executable_path, '--version'], - stdout=subprocess.PIPE, stderr=subprocess.PIPE, - text=True) - output = result.stdout.split('\n')[0] - version_str = output.split()[3] - return version.parse(version_str) - except Exception as e: - print(f"Error getting Nim version: {e}") - return None - - def find_nimbase_file(nim_executable: str): nimbase_paths: list[str] = [] - try: + # Attempt nim dump to see if it reveals the Nim lib location nim_dump_output = subprocess.run( [nim_executable, "dump"], text=True, stdout=subprocess.DEVNULL, stderr=subprocess.PIPE @@ -645,11 +794,6 @@ def find_nimbase_file(nim_executable: str): elif os_type == 'Windows': nimbase_paths.append('C:\\Nim\\lib') - for path in nimbase_paths: - nb_file = os.path.join(path, 'nimbase.h') - if os.path.isfile(nb_file): - return nb_file - if os_type == 'Darwin': base_dir = '/opt/homebrew/Cellar/nim/' if os.path.exists(base_dir): @@ -657,4 +801,148 @@ def find_nimbase_file(nim_executable: str): nb_file = os.path.join(base_dir, verdir, 'nim', 'lib', 'nimbase.h') if os.path.isfile(nb_file): return nb_file + + for path in nimbase_paths: + nb_file = os.path.join(path, 'nimbase.h') + if os.path.isfile(nb_file): + return nb_file return None + + +def requires_vendor_upload(drivers: dict) -> bool: + """ + Returns True if we have inky drivers that require uploading Python code to the Pi. + """ + return any(k in drivers for k in ["inkyPython", "inkyHyperPixel2r"]) + + +def copy_vendor_folders(drivers: dict, vendor_folder_temp: str): + """ + Copies Inky or other vendor folders into a temp area for tar/transfer. + """ + if inkyPython := drivers.get('inkyPython'): + vf = inkyPython.vendor_folder or "" + local_from = f"../frameos/vendor/{vf}/" + dest = os.path.join(vendor_folder_temp, vf) + shutil.copytree(local_from, dest, dirs_exist_ok=True) + # remove venv, __pycache__ to reduce size + shutil.rmtree(os.path.join(dest, "env"), ignore_errors=True) + shutil.rmtree(os.path.join(dest, "__pycache__"), ignore_errors=True) + + if inkyHyperPixel2r := drivers.get('inkyHyperPixel2r'): + vf = inkyHyperPixel2r.vendor_folder or "" + local_from = f"../frameos/vendor/{vf}/" + dest = os.path.join(vendor_folder_temp, vf) + shutil.copytree(local_from, dest, dirs_exist_ok=True) + shutil.rmtree(os.path.join(dest, "env"), ignore_errors=True) + shutil.rmtree(os.path.join(dest, "__pycache__"), ignore_errors=True) + + +async def install_inky_vendors(db: Session, redis: Redis, frame: Frame, ssh, build_id: str, drivers: dict): + """ + If the user wants inky/HyperPixel drivers, set up the Python venv on the Pi. + (We assume the vendor folder was either included in the on-device build tar + or scp'd separately if cross-compiled.) + """ + if inkyPython := drivers.get("inkyPython"): + await install_if_necessary(db, redis, frame, ssh, "python3-pip") + await install_if_necessary(db, redis, frame, ssh, "python3-venv") + cmd = ( + f"cd /srv/frameos/vendor/{inkyPython.vendor_folder} && " + "([ ! -d env ] && python3 -m venv env || echo 'env exists') && " + "(sha256sum -c requirements.txt.sha256sum 2>/dev/null || " + "(echo '> env/bin/pip3 install -r requirements.txt' && " + "env/bin/pip3 install -r requirements.txt && " + "sha256sum requirements.txt > requirements.txt.sha256sum))" + ) + await exec_command(db, redis, frame, ssh, cmd) + + if inkyHyperPixel2r := drivers.get("inkyHyperPixel2r"): + await install_if_necessary(db, redis, frame, ssh, "python3-dev") + await install_if_necessary(db, redis, frame, ssh, "python3-pip") + await install_if_necessary(db, redis, frame, ssh, "python3-venv") + cmd = ( + f"cd /srv/frameos/vendor/{inkyHyperPixel2r.vendor_folder} && " + "([ ! -d env ] && python3 -m venv env || echo 'env exists') && " + "(sha256sum -c requirements.txt.sha256sum 2>/dev/null || " + "(echo '> env/bin/pip3 install -r requirements.txt' && " + "env/bin/pip3 install -r requirements.txt && " + "sha256sum requirements.txt > requirements.txt.sha256sum))" + ) + await exec_command(db, redis, frame, ssh, cmd) + + +async def handle_additional_device_config(db: Session, redis: Redis, frame: Frame, ssh, arch: str, drivers: dict): + """ + E.g. enabling I2C, SPI, or messing with apt-daily timers for low memory devices, + plus appending lines to /boot/config.txt if needed. + """ + mem_output: list[str] = [] + await exec_command(db, redis, frame, ssh, "free -m", mem_output, raise_on_error=False) + total_memory = 0 + try: + total_memory = int(mem_output[1].split()[1]) + except: + pass + low_memory = (total_memory < 512) + + boot_config = "/boot/config.txt" + if await exec_command(db, redis, frame, ssh, "test -f /boot/firmware/config.txt", raise_on_error=False) == 0: + boot_config = "/boot/firmware/config.txt" + + # i2c + if drivers.get("i2c"): + await exec_command(db, redis, frame, ssh, + f'grep -q "^dtparam=i2c_vc=on$" {boot_config} ' + f'|| echo "dtparam=i2c_vc=on" | sudo tee -a {boot_config}') + await exec_command(db, redis, frame, ssh, + 'command -v raspi-config > /dev/null && ' + 'sudo raspi-config nonint get_i2c | grep -q "1" && { ' + ' sudo raspi-config nonint do_i2c 0; echo "I2C enabled"; ' + '} || echo "I2C already enabled"') + + # spi + if drivers.get("spi"): + await exec_command(db, redis, frame, ssh, 'sudo raspi-config nonint do_spi 0') + elif drivers.get("noSpi"): + await exec_command(db, redis, frame, ssh, 'sudo raspi-config nonint do_spi 1') + + # Possibly disable apt timers on low memory + if low_memory: + await exec_command( + db, redis, frame, ssh, + "systemctl is-enabled apt-daily-upgrade.timer 2>/dev/null | grep -q masked || " + "(" + " sudo systemctl mask apt-daily-upgrade && " + " sudo systemctl mask apt-daily && " + " sudo systemctl disable apt-daily.service apt-daily.timer apt-daily-upgrade.timer apt-daily-upgrade.service" + ")" + ) + + # Reboot or auto-restart logic from frame.reboot + if frame.reboot and frame.reboot.get('enabled') == 'true': + cron_schedule = frame.reboot.get('crontab', '0 0 * * *') + if frame.reboot.get('type') == 'raspberry': + crontab = f"{cron_schedule} root /sbin/shutdown -r now" + else: + crontab = f"{cron_schedule} root systemctl restart frameos.service" + await exec_command(db, redis, frame, ssh, f"echo '{crontab}' | sudo tee /etc/cron.d/frameos-reboot") + else: + await exec_command(db, redis, frame, ssh, "sudo rm -f /etc/cron.d/frameos-reboot") + + # If we have lines to add to /boot/config.txt: + if drivers.get("bootconfig"): + lines = drivers["bootconfig"].lines + must_reboot = False + for line in lines: + cmd = f'grep -q "^{line}" {boot_config}' + if await exec_command(db, redis, frame, ssh, cmd, raise_on_error=False) != 0: + # not found in boot_config + await exec_command( + db, redis, frame, ssh, + f'echo "{line}" | sudo tee -a {boot_config}', + log_output=False + ) + must_reboot = True + # We store that in the driver dict so the main deploy logic can check: + drivers["bootconfig"].needs_reboot = must_reboot diff --git a/backend/app/utils/ssh_utils.py b/backend/app/utils/ssh_utils.py index 5280e72a..d9226244 100644 --- a/backend/app/utils/ssh_utils.py +++ b/backend/app/utils/ssh_utils.py @@ -319,7 +319,8 @@ async def exec_local_command(db: Session, redis: ArqRedis, frame: Frame, command output = process.stdout.readline() if not output: break - await log(db, redis, int(frame.id), "stdout", output) + if generate_log: + await log(db, redis, int(frame.id), "stdout", output) outputs.append(output) if process.stderr: @@ -327,7 +328,8 @@ async def exec_local_command(db: Session, redis: ArqRedis, frame: Frame, command error = process.stderr.readline() if not error: break - await log(db, redis, int(frame.id), "stderr", error) + if generate_log: + await log(db, redis, int(frame.id), "stderr", error) errors.append(error) if break_next: @@ -337,7 +339,7 @@ async def exec_local_command(db: Session, redis: ArqRedis, frame: Frame, command await asyncio.sleep(0.1) exit_status = process.returncode - if exit_status != 0: + if exit_status != 0 and generate_log: await log(db, redis, int(frame.id), "exit_status", f"The command exited with status {exit_status}") return (exit_status, diff --git a/frameos/frameos.nimble b/frameos/frameos.nimble index ac9979b6..1d45b4ea 100644 --- a/frameos/frameos.nimble +++ b/frameos/frameos.nimble @@ -20,6 +20,7 @@ requires "linuxfb >= 0.1.0" requires "psutil >= 0.6.0" requires "ws >= 0.5.0" requires "qrgen >= 3.1.0" +requires "nimassets >= 0.2.4" taskRequires "assets", "nimassets >= 0.2.4" diff --git a/frameos/nimble.lock b/frameos/nimble.lock index f2697147..c29a117c 100644 --- a/frameos/nimble.lock +++ b/frameos/nimble.lock @@ -139,6 +139,28 @@ "sha1": "8e639fafa952f3e9d0315f181aa05e0694603bfc" } }, + "zstd": { + "version": "0.9.0", + "vcsRevision": "f8f80a57ff782f176b16de0b3885600523d39d80", + "url": "https://github.com/wltsmrz/nim_zstd", + "downloadMethod": "git", + "dependencies": [], + "checksums": { + "sha1": "20b23158e94f01ea0c4bf419a21b0feabe70bf31" + } + }, + "nimassets": { + "version": "0.2.4", + "vcsRevision": "d06724dd7b80fb470542ab932f3a94af78fe2eb1", + "url": "https://github.com/xmonader/nimassets", + "downloadMethod": "git", + "dependencies": [ + "zstd" + ], + "checksums": { + "sha1": "71d5510ad86a323fc0ad5dc6b774261e80fe0361" + } + }, "zippy": { "version": "0.10.11", "vcsRevision": "9560f3d20479fb390c97f731ef8d100f1ed54e6c", @@ -201,29 +223,6 @@ } }, "tasks": { - "assets": { - "zstd": { - "version": "0.9.0", - "vcsRevision": "f8f80a57ff782f176b16de0b3885600523d39d80", - "url": "https://github.com/wltsmrz/nim_zstd", - "downloadMethod": "git", - "dependencies": [], - "checksums": { - "sha1": "20b23158e94f01ea0c4bf419a21b0feabe70bf31" - } - }, - "nimassets": { - "version": "0.2.4", - "vcsRevision": "d06724dd7b80fb470542ab932f3a94af78fe2eb1", - "url": "https://github.com/xmonader/nimassets", - "downloadMethod": "git", - "dependencies": [ - "zstd" - ], - "checksums": { - "sha1": "71d5510ad86a323fc0ad5dc6b774261e80fe0361" - } - } - } + "assets": {} } } diff --git a/frameos/tools/nimc.Makefile b/frameos/tools/nimc.Makefile index fe8dd481..5464c0ad 100644 --- a/frameos/tools/nimc.Makefile +++ b/frameos/tools/nimc.Makefile @@ -5,8 +5,8 @@ SOURCES := $(shell ls -S *.c) OBJECTS = $(SOURCES:.c=.o) TOTAL = $(words $(SOURCES)) EXECUTABLE = frameos -LIBS = -pthread -lm -lm -lrt -ldl -CFLAGS = -w -fmax-errors=3 -pthread -O3 -fno-strict-aliasing -fno-ident -fno-math-errno +LIBS = -pthread -lm -lm -lrt -ldl $SYSROOT +CFLAGS = -w -fmax-errors=3 -pthread -O3 -fno-strict-aliasing -fno-ident -fno-math-errno -g all: $(EXECUTABLE) @@ -19,7 +19,6 @@ clean: pre-build: @mkdir -p ../cache - @echo "Compiling on device, largest files first. This might take minutes on the first run." $(OBJECTS): pre-build