#!/bin/bash # Report descrittivi: 0.6 ok; 0.55 più stabile TEMP=${BASE_TEMP:-0.6} exec /app/llama-server $BASE_MEDIUM_MODEL \ -c $BASE_CONTEXT_SIZE -ngl $BASE_GPU_LAYERS -n $BASE_MAX_TOKENS \ --temp $TEMP --top-p 0.9 --top-k 40 --repeat-penalty 1.1 \ --flash-attn auto --threads -1 --threads-batch -1 --threads-http -1 \ --jinja \ --timeout 600 --host 0.0.0.0 --port 8092 & PID=$! cleanup() { echo "Stopping llama-server..." kill $PID 2>/dev/null wait $PID 2>/dev/null exit 0 } trap cleanup SIGTERM SIGINT wait $PID