#!/bin/bash exec /app/llama-server $BASE_TOP_MODEL \ -c $BASE_CONTEXT_SIZE -ngl $BASE_GPU_LAYERS -n $BASE_MAX_TOKENS \ --temp 0.5 --top-p 0.9 --top-k 40 --repeat-penalty 1.1 \ --flash-attn auto --threads -1 --threads-batch -1 --threads-http -1 \ --jinja \ --timeout 900 --host 0.0.0.0 --port 8093 & PID=$! cleanup() { echo "Stopping llama-server..." kill $PID 2>/dev/null wait $PID 2>/dev/null exit 0 } trap cleanup SIGTERM SIGINT wait $PID