Runing Ollama models on GPU for Presenton
--gpus=all
docker run -it --name presenton --gpus=all -p 5000:80 \ -v "./app_data:/app_data" \ ghcr.io/presenton/presenton:v0.3.0-beta
docker run -it --name presenton --gpus=all -p 5000:80 \ -e LLM="ollama" \ -e OLLAMA_MODEL="llama3.2:3b" \ -e IMAGE_PROVIDER="pexels" \ -e PEXELS_API_KEY="your_pexels_api_key" \ -e CAN_CHANGE_KEYS="false" \ -v "./user_data:/app/user_data" \ ghcr.io/presenton/presenton:latest