-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathDockerfile.test
More file actions
54 lines (46 loc) · 1.52 KB
/
Dockerfile.test
File metadata and controls
54 lines (46 loc) · 1.52 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
# AluminatiAI — RunPod test image
#
# Builds the agent from source (no PyPI release needed) and bundles
# PyTorch so the demo workload can actually saturate the GPU.
#
# Build:
# docker build -f Dockerfile.test -t aluminatai-test .
#
# Run on RunPod GPU Pod (pick any CUDA-capable template, then SSH in and run):
# docker run --rm --gpus all --pid=host \
# -e ALUMINATAI_API_KEY=alum_... \
# -e ALUMINATAI_TEAM=demo \
# -e DRY_RUN=0 \
# -e METRICS_PORT=9100 \
# -p 9100:9100 \
# aluminatai-test python3 /app/tests/runpod_demo.py
#
# Or to just drop into a shell and poke around:
# docker run --rm -it --gpus all --pid=host aluminatai-test bash
FROM nvidia/cuda:12.4.1-runtime-ubuntu22.04
ENV DEBIAN_FRONTEND=noninteractive \
PYTHONUNBUFFERED=1 \
PYTHONDONTWRITEBYTECODE=1
RUN apt-get update && \
apt-get install -y --no-install-recommends \
python3-pip \
curl \
&& rm -rf /var/lib/apt/lists/*
WORKDIR /app
# Install PyTorch (CPU+CUDA wheel — lets us run GPU workloads for the demo)
RUN pip3 install --no-cache-dir \
torch --index-url https://download.pytorch.org/whl/cu124
# Install agent dependencies + prometheus support
COPY agent/pyproject.toml /app/
COPY agent/ /app/
RUN pip3 install --no-cache-dir \
"nvidia-ml-py>=12.0.0" \
"requests>=2.28" \
"python-dotenv>=1.0" \
"rich>=13.0" \
"prometheus-client>=0.19" \
"numpy>=1.24"
# Copy test scripts
COPY agent/tests/ /app/tests/
# Default: run the demo
CMD ["python3", "/app/tests/runpod_demo.py"]