Hey
I think I am missing something. I will post my VPN container and my docker container..maybe you can see something I am missing?
#1 The VPN container does indeed work correctly, I have verified via curl ifconfig.io
#2 The Docker Containers do work, but keep using my real WAN IP
#3 I did create the docker network in question.
Code
VPN
services:
alpineqbit:
image: trigus42/qbittorrentvpn
container_name: alpineqbit
environment:
- PUID=1000 #UID of a user on your system
- PGID=100 #GID of the user group on your system
- VPN_TYPE=openvpn
- VPN_USERNAME=[name]
- VPN_PASSWORD=[password]
- WEBUI_PASSWORD=[password]
volumes:
- /srv/dev-disk-by-uuid-49fe686e-e61f-451b-94f3-f11f9df5625c/Server/downloads:/downloads
- /srv/dev-disk-by-uuid-49fe686e-e61f-451b-94f3-f11f9df5625c/Server:/server
- /srv/dev-disk-by-uuid-49fe686e-e61f-451b-94f3-f11f9df5625c/containers/alpineqbit/config:/config
ports:
- 8080:8080
restart: unless-stopped
cap_add:
- NET_ADMIN
sysctls:
- net.ipv4.conf.all.src_valid_mark=1
- net.ipv6.conf.all.disable_ipv6=0
devices:
- /dev/net/tun
networks:
- my-vpn-network
networks:
my-vpn-network:
driver: bridge
driver_opts:
com.docker.network.mtu: 1400
Display More
Code
Container
services:
openwebui:
image: ghcr.io/open-webui/open-webui:main
# If you have GPU support configured, use the :cuda tag instead:
# image: ghcr.io/open-webui/open-webui:cuda
networks:
- my-vpn-network
container_name: openwebui
restart: unless-stopped
dns:
- 1.1.1.1
ports:
- "3000:8080"
environment:
# This tells Open WebUI where to find the Ollama service using its service name (ollama)
OLLAMA_BASE_URL: http://ollama:11434
USER_AGENT: "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/100.0.4896.75 Safari/537.36"
# Optional: Set a secret key for security in production environments
WEBUI_SECRET_KEY: [key]
volumes:
# Persists the Open WebUI data (database, settings)
- open-webui-data:/app/backend/data
depends_on:
- ollama
ollama:
image: ollama/ollama
networks:
- my-vpn-network
container_name: ollama
restart: unless-stopped
# Use the 'deploy' section with 'gpus: all' for Nvidia GPU support
# Remove this section for CPU-only use
# deploy:
# resources:
# reservations:
# devices:
# - driver: nvidia
# count: all
# capabilities: [gpu]
volumes:
# Persists the models you download
- ollama-data:/root/.ollama
ports:
- "11434:11434" # Exposes the Ollama API port
dns:
- 1.1.1.1
# Define the volumes for persistence
volumes:
open-webui-data:
ollama-data:
networks:
my-vpn-network:
external: true
Display More