services: eval-llm: build: . container_name: eval-llm-safety environment: - HF_TOKEN=${HF_TOKEN} # Pass Hugging Face token from .env volumes: - ./src:/app # Mount the current directory for code changes command: python /app/eval_llm.py --model_name "HuggingFaceTB/SmolLM2-135M-Instruct" --dataset_name "declare-lab/HarmfulQA" --num_samples 10 --temperature 0.5 --seed 42 --output_file results.json deploy: resources: reservations: devices: - driver: nvidia count: all capabilities: [gpu] gradio-app: build: . container_name: eval-llm-safety-gradio environment: - HF_TOKEN=${HF_TOKEN} # Pass Hugging Face token from .env volumes: - .:/app # Mount the entire project directory ports: - "7861:7860" # Expose Gradio port on 7861 to avoid conflict entrypoint: ["python3", "-u", "/app/src/app.py"] # add access to GPU if available deploy: resources: reservations: devices: - driver: nvidia count: all capabilities: [gpu]