Initial checkin

This commit is contained in:
Ryan Turner
2025-01-04 19:39:23 -06:00
parent 3392d2d5a8
commit df15fb54b0
3 changed files with 70 additions and 6 deletions

View File

@@ -7,18 +7,14 @@ RUN apt-get update && apt-get install -y gettext tzdata locales && rm -rf /var/l
RUN sed -i -e 's/# en_US.UTF-8 UTF-8/en_US.UTF-8 UTF-8/' /etc/locale.gen && \
dpkg-reconfigure --frontend=noninteractive locales && \
update-locale LANG=en_US.UTF-8
ENV LANG=en_US.UTF-8
ENV LANG="en_US.UTF-8"
ENV TZ="America/Los_Angeles"
WORKDIR /app
COPY . /app
COPY requirements.txt .
RUN pip install -r requirements.txt
COPY . .
COPY config.ini /app/config.ini
COPY entrypoint.sh /app/entrypoint.sh
RUN chmod +x /app/entrypoint.sh
ENTRYPOINT ["/app/entrypoint.sh"]

View File

@@ -0,0 +1,52 @@
services:
meshing-around:
build:
context: ..
# depends_on:
# ollama:
# condition: service_healthy
devices:
- /dev/ttyAMA10 # Replace this with your actual device!
configs:
- source: me_config
target: /app/config.ini
extra_hosts:
- "host.docker.internal:host-gateway" # Used to access a local linux meshtasticd device via tcp
# ollama:
# image: ollama/ollama:0.5.1
# volumes:
# - ./ollama:/root/.ollama
# - ./ollama-entrypoint.sh:/entrypoint.sh
# container_name: ollama
# pull_policy: always
# tty: true
# restart: always
# entrypoint:
# - /usr/bin/bash
# - /entrypoint.sh
# expose:
# - 11434
# healthcheck:
# test: "apt update && apt install curl -y && curl -f http://localhost:11434/api/tags | grep -q llama3.2:3b"
# interval: 30s
# timeout: 10s
# retries: 20
# node-exporter:
# image: quay.io/prometheus/node-exporter:latest
# volumes:
# - /proc:/host/proc:ro
# - /sys:/host/sys:ro
# - /:/rootfs:ro
# command:
# - --path.procfs=/host/proc
# - --path.rootfs=/rootfs
# - --path.sysfs=/host/sys
# - --collector.filesystem.mount-points-exclude=^/(sys|proc|dev|host|etc)($$|/)
# restart: unless-stopped
# expose:
# - 9100
# network_mode: host
# pid: host
configs:
me_config:
file: ./config.ini

View File

@@ -0,0 +1,16 @@
#!/bin/bash
# Start Ollama in the background.
/bin/ollama serve &
# Record Process ID.
pid=$!
# Pause for Ollama to start.
sleep 5
echo "🔴 Retrieve llama3.2:3b model..."
ollama pull llama3.2:3b
echo "🟢 Done!"
# Wait for Ollama process to finish.
wait $pid