Compare commits

..

No commits in common. "master" and "v1.0" have entirely different histories.
master ... v1.0

17 changed files with 415 additions and 265 deletions

2
.dockerignore Normal file
View file

@ -0,0 +1,2 @@
.git
README.md

View file

@ -1,46 +0,0 @@
name: Build
on:
push:
tags:
- 'v[0-9]+.[0-9]+.[0-9]+'
jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: docker/setup-qemu-action@v2
- uses: docker/setup-buildx-action@v2
- uses: docker/login-action@v2
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }}
- id: tags
uses: docker/metadata-action@v4
with:
images: ghcr.io/wfg/openvpn-client
tags: |
type=semver,pattern={{version}}
type=semver,pattern={{major}}.{{minor}}
type=semver,pattern={{major}},enable=${{ !startsWith(github.ref, 'refs/tags/v0.') }}
- id: build-args
run: |
ref=${{ github.ref }}
vpatch=${ref##refs/*/}
patch=${vpatch#v}
echo "::set-output name=date::$(date --utc --iso-8601=seconds)"
echo "::set-output name=version::$patch"
- uses: docker/build-push-action@v3
with:
context: "{{defaultContext}}:build"
platforms: linux/amd64,linux/arm64,linux/arm/v7,linux/arm/v6
build-args: |
BUILD_DATE=${{ steps.build-args.outputs.date }}
IMAGE_VERSION=${{ steps.build-args.outputs.version }}
tags: ${{ steps.tags.outputs.tags }}
push: true

36
.github/workflows/publish.yml vendored Normal file
View file

@ -0,0 +1,36 @@
name: Publish
on:
push:
tags:
- v*
env:
IMAGE_NAME: openvpn-client
jobs:
publish:
runs-on: ubuntu-latest
steps:
- name: Check out repository
uses: actions/checkout@v2
- name: Get the version
id: get_version
run: echo ::set-output name=VERSION::${GITHUB_REF#refs/tags/v}
- name: Log in to registry
uses: docker/login-action@v1
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Build and push
uses: docker/build-push-action@v2
with:
tags: |
ghcr.io/wfg/openvpn-client:${{ steps.get_version.outputs.VERSION }}
ghcr.io/wfg/openvpn-client:latest
push: true

2
.gitignore vendored
View file

@ -1,2 +0,0 @@
# Anything used during development should be put in local/ to prevent accidental committing.
local/

41
Dockerfile Normal file
View file

@ -0,0 +1,41 @@
FROM alpine:3.13 AS build
ARG DANTE_VERSION=1.4.2
RUN apk add --no-cache build-base
RUN wget https://www.inet.no/dante/files/dante-$DANTE_VERSION.tar.gz --output-document - | tar -xz \
&& cd dante-$DANTE_VERSION \
&& ac_cv_func_sched_setscheduler=no ./configure --disable-client \
&& make install
FROM alpine:3.13
ARG IMAGE_VERSION
ARG BUILD_DATE
LABEL source="github.com/wfg/docker-openvpn-client"
LABEL version="$IMAGE_VERSION"
LABEL created="$BUILD_DATE"
COPY --from=build /usr/local/sbin/sockd /usr/local/sbin/sockd
ENV KILL_SWITCH=on \
VPN_LOG_LEVEL=3 \
HTTP_PROXY=off \
SOCKS_PROXY=off
RUN apk add --no-cache \
bind-tools \
openvpn \
tinyproxy
RUN mkdir -p /data/vpn \
&& addgroup -S socks \
&& adduser -S -D -G socks -g "socks" -H -h /dev/null socks
COPY data/ /data
HEALTHCHECK CMD ping -c 3 1.1.1.1 || exit 1
ENTRYPOINT ["/data/scripts/entry.sh"]

21
LICENSE
View file

@ -1,21 +0,0 @@
MIT License
Copyright (c) 2021 Wyatt Gill
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

124
README.md
View file

@ -1,126 +1,102 @@
# OpenVPN Client for Docker
Archived in favor of [a WireGuard version](https://github.com/wfg/docker-wireguard).
## What is this and what does it do?
[`ghcr.io/wfg/openvpn-client`](https://github.com/users/wfg/packages/container/package/openvpn-client) is a containerized OpenVPN client.
It has a kill switch built with `iptables` that kills Internet connectivity to the container if the VPN tunnel goes down for any reason.
[`ghcr.io/wfg/openvpn-client`](https://github.com/users/wfg/packages/container/package/openvpn-client) is a containerized OpenVPN client. It has a kill switch built with `iptables` that kills Internet connectivity to the container if the VPN tunnel goes down for any reason. It also includes an HTTP proxy server ([Tinyproxy](https://tinyproxy.github.io/)) and a SOCKS proxy server ([Dante](https://www.inet.no/dante/index.html)). This allows hosts and non-containerized applications to use the VPN without having to run VPN clients on those hosts.
This image requires you to supply the necessary OpenVPN configuration file(s).
Because of this, any VPN provider should work.
If you find something that doesn't work or have an idea for a new feature, issues and **pull requests are welcome** (however, I'm not promising they will be merged).
This image requires you to supply the necessary OpenVPN configuration file(s). Because of this, any VPN provider should work (however, if you find something that doesn't, please open an issue for it).
## Why?
Having a containerized VPN client lets you use container networking to easily choose which applications you want using the VPN instead of having to set up split tunnelling.
It also keeps you from having to install an OpenVPN client on the underlying host.
Having a containerized VPN client lets you use container networking to easily choose which applications you want using the VPN instead of having to set up split tunnelling. It also keeps you from having to install an OpenVPN client on the underlying host.
The idea for this image came from a similar project by [qdm12](https://github.com/qdm12) that has since evolved into something bigger and more complex than I wanted to use. I decided to dissect it and take it in my own direction. I plan to keep everything here well-documented because I want this to be a learning experience for both me and hopefully anyone else that uses it.
## How do I use it?
### Getting the image
You can either pull it from GitHub Container Registry or build it yourself.
To pull it from GitHub Container Registry, run
```
docker pull ghcr.io/wfg/openvpn-client
```
To pull from GitHub Container Registry, run `docker pull ghcr.io/wfg/openvpn-client`.
To build it yourself, run
```
docker build -t ghcr.io/wfg/openvpn-client https://github.com/wfg/docker-openvpn-client.git#:build
To build it yourself, do the following:
```bash
git clone https://github.com/wfg/docker-openvpn-client.git
cd docker-openvpn-client
docker build -t ghcr.io/wfg/openvpn-client .
```
### Creating and running a container
The image requires the container be created with the `NET_ADMIN` capability and `/dev/net/tun` accessible.
Below are bare-bones examples for `docker run` and Compose; however, you'll probably want to do more than just run the VPN client.
See the below to learn how to have [other containers use `openvpn-client`'s network stack](#using-with-other-containers).
The image requires the container be created with the `NET_ADMIN` capability and `/dev/net/tun` accessible. Below are bare-bones examples for `docker run` and Compose; however, you'll probably want to do more than just run the VPN client. See the sections below to learn how to use the [proxies](#http_proxy-and-socks_proxy) and have [other containers use `openvpn-client`'s network stack](#using-with-other-containers).
#### `docker run`
```
docker run --detach \
```bash
docker run -d \
--name=openvpn-client \
--cap-add=NET_ADMIN \
--device=/dev/net/tun \
--volume <path/to/config/dir>:/config \
-v <path/to/config>:/data/vpn \
ghcr.io/wfg/openvpn-client
```
#### `docker-compose`
```yaml
version: '2'
services:
openvpn-client:
image: ghcr.io/wfg/openvpn-client
container_name: openvpn-client
cap_add:
- NET_ADMIN
devices:
- /dev/net/tun
volumes:
- <path/to/config/dir>:/config
restart: unless-stopped
openvpn-client:
image: ghcr.io/wfg/openvpn-client
container_name: openvpn-client
cap_add:
- NET_ADMIN
devices:
- /dev/net/tun
volumes:
- <path/to/config>:/data/vpn
restart: unless-stopped
```
#### Environment variables
| Variable | Default (blank is unset) | Description |
| --- | --- | --- |
| `ALLOWED_SUBNETS` | | A list of one or more comma-separated subnets (e.g. `192.168.0.0/24,192.168.1.0/24`) to allow outside of the VPN tunnel. |
| `AUTH_SECRET` | | Docker secret that contains the credentials for accessing the VPN. |
| `CONFIG_FILE` | | The OpenVPN configuration file or search pattern. If unset, a random `.conf` or `.ovpn` file will be selected. |
| `KILL_SWITCH` | `on` | Whether or not to enable the kill switch. Set to any "truthy" value[1] to enable. |
[1] "Truthy" values in this context are the following: `true`, `t`, `yes`, `y`, `1`, `on`, `enable`, or `enabled`.
| `SUBNETS` | | A list of one or more comma-separated subnets (e.g. `192.168.0.0/24,192.168.1.0/24`) to allow outside of the VPN tunnel. See important note about this [below](#subnets). |
| `VPN_LOG_LEVEL` | `3` | OpenVPN verbosity (`1`-`11`) |
| `HTTP_PROXY` | `off` | The on/off status of Tinyproxy, the built-in HTTP proxy server. To enable, set to `on`. Any other value (including unset) will cause the proxy server to not start. It listens on port 8080. |
| `SOCKS_PROXY` | `off` | The on/off status of Dante, the built-in SOCKS proxy server. To enable, set to `on`. Any other value (including unset) will cause the proxy server to not start. It listens on port 1080. |
| `PROXY_USERNAME` | | Credentials for accessing the proxies. If `PROXY_USERNAME` is specified, you must also specify `PROXY_PASSWORD`. |
| `PROXY_PASSWORD` | | Credentials for accessing the proxies. If `PROXY_PASSWORD` is specified, you must also specify `PROXY_USERNAME`. |
##### Environment variable considerations
###### `ALLOWED_SUBNETS`
If you intend on connecting to containers that use the OpenVPN container's network stack (which you probably do), **you will probably want to use this variable**.
Regardless of whether or not you're using the kill switch, the entrypoint script also adds routes to each of the `ALLOWED_SUBNETS` to allow network connectivity from outside of Docker.
###### `SUBNETS`
**Important**: The DNS server used by this container prior to VPN connection must be included in the value specified. For example, if your container is using 192.168.1.1 as a DNS server, then this address or an appropriate CIDR block must be included in `SUBNETS`. This is necessary because the kill switch blocks traffic outside of the VPN tunnel before it's actually established. If the DNS server is not allowed, the server addresses in the VPN configuration will not resolve.
##### `AUTH_SECRET`
Compose has support for [Docker secrets](https://docs.docker.com/engine/swarm/secrets/#use-secrets-in-compose).
See the [Compose file](docker-compose.yml) in this repository for example usage of passing proxy credentials as Docker secrets.
The subnets specified will be allowed through the firewall which allows for connectivity to and from hosts on the subnets.
###### `HTTP_PROXY` and `SOCKS_PROXY`
If enabling the the proxy server(s), you'll want to publish the appropriate port(s) in order to access the server(s). To do that using `docker run`, add `-p <host_port>:8080` and/or `-p <host_port>:1080` where `<host_port>` is whatever port you want to use on the host. If you're using `docker-compose`, add the relevant port specification(s) from the snippet below to the `openvpn-client` service definition in your Compose file.
```yaml
ports:
- <host_port>:8080
- <host_port>:1080
```
### Using with other containers
Once you have your `openvpn-client` container up and running, you can tell other containers to use `openvpn-client`'s network stack which gives them the ability to utilize the VPN tunnel.
There are a few ways to accomplish this depending how how your container is created.
Once you have your `openvpn-client` container up and running, you can tell other containers to use `openvpn-client`'s network stack which gives them the ability to utilize the VPN tunnel. There are a few ways to accomplish this depending how how your container is created.
If your container is being created with
1. the same Compose YAML file as `openvpn-client`, add `network_mode: service:openvpn-client` to the container's service definition.
2. a different Compose YAML file than `openvpn-client`, add `network_mode: container:openvpn-client` to the container's service definition.
3. `docker run`, add `--network=container:openvpn-client` as an option to `docker run`.
Once running and provided your container has `wget` or `curl`, you can run `docker exec <container_name> wget -qO - ifconfig.me` or `docker exec <container_name> curl -s ifconfig.me` to get the public IP of the container and make sure everything is working as expected.
This IP should match the one of `openvpn-client`.
Once running and provided your container has `wget` or `curl`, you can run `docker exec <container_name> wget -qO - ifconfig.me` or `docker exec <container_name> curl -s ifconfig.me` to get the public IP of the container and make sure everything is working as expected. This IP should match the one of `openvpn-client`.
#### Handling ports intended for connected containers
If you have a connected container and you need to access a port that container, you'll want to publish that port on the `openvpn-client` container instead of the connected container.
To do that, add `-p <host_port>:<container_port>` if you're using `docker run`, or add the below snippet to the `openvpn-client` service definition in your Compose file if using `docker-compose`.
If you have a connected container and you need to access a port that container, you'll want to publish that port on the `openvpn-client` container instead of the connected container. To do that, add `-p <host_port>:<container_port>` if you're using `docker run`, or add the below snippet to the `openvpn-client` service definition in your Compose file if using `docker-compose`.
```yaml
ports:
- <host_port>:<container_port>
- <host_port>:<container_port>
```
In both cases, replace `<host_port>` and `<container_port>` with the port used by your connected container.
### Verifying functionality
Once you have container running `ghcr.io/wfg/openvpn-client`, run the following command to spin up a temporary container using `openvpn-client` for networking.
The `wget -qO - ifconfig.me` bit will return the public IP of the container (and anything else using `openvpn-client` for networking).
You should see an IP address owned by your VPN provider.
```
Once you have container running `ghcr.io/wfg/openvpn-client`, run the following command to spin up a temporary container using `openvpn-client` for networking. The `wget -qO - ifconfig.me` bit will return the public IP of the container (and anything else using `openvpn-client` for networking). You should see an IP address owned by your VPN provider.
```bash
docker run --rm -it --network=container:openvpn-client alpine wget -qO - ifconfig.me
```
### Troubleshooting
#### VPN authentication
Your OpenVPN configuration file may not come with authentication baked in.
To provide OpenVPN the necessary credentials, create a file (any name will work, but this example will use `credentials.txt`) next to the OpenVPN configuration file with your username on the first line and your password on the second line.
For example:
```
vpn_username
vpn_password
```
In the OpenVPN configuration file, add the following line:
```
auth-user-pass credentials.txt
```
This will tell OpenVPN to read `credentials.txt` whenever it needs credentials.

View file

@ -1 +0,0 @@
Dockerfile

View file

@ -1,14 +0,0 @@
FROM alpine:3.17
RUN apk add --no-cache \
bash \
bind-tools \
iptables \
ip6tables \
openvpn
COPY . /usr/local/bin
ENV KILL_SWITCH=on
ENTRYPOINT [ "entry.sh" ]

View file

@ -1,50 +0,0 @@
#!/usr/bin/env bash
set -o errexit
set -o nounset
set -o pipefail
cleanup() {
kill TERM "$openvpn_pid"
exit 0
}
is_enabled() {
[[ ${1,,} =~ ^(true|t|yes|y|1|on|enable|enabled)$ ]]
}
# Either a specific file name or a pattern.
if [[ $CONFIG_FILE ]]; then
config_file=$(find /config -name "$CONFIG_FILE" 2> /dev/null | sort | shuf -n 1)
else
config_file=$(find /config -name '*.conf' -o -name '*.ovpn' 2> /dev/null | sort | shuf -n 1)
fi
if [[ -z $config_file ]]; then
echo "no openvpn configuration file found" >&2
exit 1
fi
echo "using openvpn configuration file: $config_file"
openvpn_args=(
"--config" "$config_file"
"--cd" "/config"
)
if is_enabled "$KILL_SWITCH"; then
openvpn_args+=("--route-up" "/usr/local/bin/killswitch.sh $ALLOWED_SUBNETS")
fi
# Docker secret that contains the credentials for accessing the VPN.
if [[ $AUTH_SECRET ]]; then
openvpn_args+=("--auth-user-pass" "/run/secrets/$AUTH_SECRET")
fi
openvpn "${openvpn_args[@]}" &
openvpn_pid=$!
trap cleanup TERM
wait $openvpn_pid

View file

@ -1,44 +0,0 @@
#!/usr/bin/env bash
set -o errexit
set -o nounset
set -o pipefail
iptables --insert OUTPUT \
! --out-interface tun0 \
--match addrtype ! --dst-type LOCAL \
! --destination "$(ip -4 -oneline addr show dev eth0 | awk 'NR == 1 { print $4 }')" \
--jump REJECT
# Create static routes for any ALLOWED_SUBNETS and punch holes in the firewall
# (ALLOWED_SUBNETS is passed as $1 from entry.sh)
default_gateway=$(ip -4 route | awk '$1 == "default" { print $3 }')
for subnet in ${1//,/ }; do
ip route add "$subnet" via "$default_gateway"
iptables --insert OUTPUT --destination "$subnet" --jump ACCEPT
done
# Punch holes in the firewall for the OpenVPN server addresses
# $config is set by OpenVPN:
# "Name of first --config file. Set on program initiation and reset on SIGHUP."
global_port=$(awk '$1 == "port" { print $2 }' "${config:?"config file not found by kill switch"}")
global_protocol=$(awk '$1 == "proto" { print $2 }' "${config:?"config file not found by kill switch"}")
remotes=$(awk '$1 == "remote" { print $2, $3, $4 }' "${config:?"config file not found by kill switch"}")
ip_regex='^(([1-9]?[0-9]|1[0-9][0-9]|2([0-4][0-9]|5[0-5]))\.){3}([1-9]?[0-9]|1[0-9][0-9]|2([0-4][0-9]|5[0-5]))$'
while IFS= read -r line; do
# Read a comment-stripped version of the line
# Fixes #84
IFS=" " read -ra remote <<< "${line%%\#*}"
address=${remote[0]}
port=${remote[1]:-${global_port:-1194}}
protocol=${remote[2]:-${global_protocol:-udp}}
if [[ $address =~ $ip_regex ]]; then
iptables --insert OUTPUT --destination "$address" --protocol "$protocol" --destination-port "$port" --jump ACCEPT
else
for ip in $(dig -4 +short "$address"); do
iptables --insert OUTPUT --destination "$ip" --protocol "$protocol" --destination-port "$port" --jump ACCEPT
echo "$ip $address" >> /etc/hosts
done
fi
done <<< "$remotes"

11
data/scripts/dante_wrapper.sh Executable file
View file

@ -0,0 +1,11 @@
#!/bin/ash
# shellcheck shell=ash
# shellcheck disable=SC2169 # making up for lack of ash support
echo -e "Running Dante SOCKS proxy server.\n"
until ping -c 3 1.1.1.1 > /dev/null 2>&1; do
sleep 1
done
sockd -f /data/sockd.conf

173
data/scripts/entry.sh Executable file
View file

@ -0,0 +1,173 @@
#!/bin/ash
# shellcheck shell=ash
# shellcheck disable=SC2169 # making up for lack of ash support
cleanup() {
# When you run `docker stop` or any equivalent, a SIGTERM signal is sent to PID 1.
# A process running as PID 1 inside a container is treated specially by Linux:
# it ignores any signal with the default action. As a result, the process will
# not terminate on SIGINT or SIGTERM unless it is coded to do so. Because of this,
# I've defined behavior for when SIGINT and SIGTERM is received.
if [ "$openvpn_child" ]; then
echo "Stopping OpenVPN..."
kill -TERM "$openvpn_child"
fi
sleep 1
rm "$config_file_modified"
echo "Exiting."
exit 0
}
is_ip() {
echo "$1" | grep -Eq "[0-9][0-9]*\.[0-9][0-9]*\.[0-9][0-9]*\.[0-9][0-9]*"
}
# Capture the filename of the first .conf file to use as the OpenVPN config.
config_file_original=$(find /data/vpn -name "*.conf" 2> /dev/null | sort | head -1)
if [ -z "$config_file_original" ]; then
>&2 echo "ERROR: No configuration file found. Please check your mount and file permissions. Exiting."
exit 1
fi
# shellcheck disable=SC2153
if ! (echo "$VPN_LOG_LEVEL" | grep -Eq '^([1-9]|1[0-1])$'); then
echo "WARNING: Invalid log level $VPN_LOG_LEVEL. Setting to default."
vpn_log_level=3
else
vpn_log_level=$VPN_LOG_LEVEL
fi
echo "
---- Running with the following variables ----
Kill switch: ${KILL_SWITCH:-off}
HTTP proxy: ${HTTP_PROXY:-off}
SOCKS proxy: ${SOCKS_PROXY:-off}
Allowing subnets: ${SUBNETS:-none}
Using configuration file: $config_file_original
Using OpenVPN log level: $vpn_log_level
"
# Create a new configuration file to modify so the original is left untouched.
config_file_modified="${config_file_original}.modified"
echo "Creating $config_file_modified and making required changes to that file."
cp "$config_file_original" "$config_file_modified"
# These configuration file changes are required by Alpine.
sed -i \
-e '/up /c up \/etc\/openvpn\/up.sh' \
-e '/down /c down \/etc\/openvpn\/down.sh' \
-e 's/^proto udp$/proto udp4/' \
-e 's/^proto tcp$/proto tcp4/' \
"$config_file_modified"
echo -e "Changes made.\n"
trap cleanup INT TERM
# NOTE: When testing with the kill switch enabled, don't forget to pass in the
# local subnet. It will save a lot of headache.
if [ "$KILL_SWITCH" = "on" ]; then
local_subnet=$(ip r | grep -v 'default via' | grep eth0 | tail -n 1 | cut -d " " -f 1)
default_gateway=$(ip r | grep 'default via' | cut -d " " -f 3)
echo "Creating VPN kill switch and local routes."
echo "Allowing established and related connections..."
iptables -A INPUT -m conntrack --ctstate ESTABLISHED,RELATED -j ACCEPT
echo "Allowing loopback connections..."
iptables -A INPUT -i lo -j ACCEPT
iptables -A OUTPUT -o lo -j ACCEPT
echo "Allowing Docker network connections..."
iptables -A INPUT -s "$local_subnet" -j ACCEPT
iptables -A OUTPUT -d "$local_subnet" -j ACCEPT
echo "Allowing specified subnets..."
# for every specified subnet...
for subnet in ${SUBNETS//,/ }; do
# create a route to it and...
ip route add "$subnet" via "$default_gateway" dev eth0
# allow connections
iptables -A INPUT -s "$subnet" -j ACCEPT
iptables -A OUTPUT -d "$subnet" -j ACCEPT
done
echo "Allowing remote servers in configuration file..."
remote_port=$(grep "port " "$config_file_modified" | cut -d " " -f 2)
remote_proto=$(grep "proto " "$config_file_modified" | cut -d " " -f 2 | cut -c1-3)
remotes=$(grep "remote " "$config_file_modified" | cut -d " " -f 2-4)
echo " Using:"
echo "${remotes}" | while IFS= read -r line; do
# strip any comments from line that could mess up cuts
clean_line=${line%% #*}
addr=$(echo "$clean_line" | cut -d " " -f 1)
port=$(echo "$clean_line" | cut -d " " -f 2)
proto=$(echo "$clean_line" | cut -d " " -f 3 | cut -c1-3)
if is_ip "$addr"; then
echo " IP: $addr PORT: $port"
iptables -A OUTPUT -o eth0 -d "$addr" -p "${proto:-$remote_proto}" --dport "${port:-$remote_port}" -j ACCEPT
else
for ip in $(dig -4 +short "$addr"); do
echo " $addr (IP: $ip PORT: $port)"
iptables -A OUTPUT -o eth0 -d "$ip" -p "${proto:-$remote_proto}" --dport "${port:-$remote_port}" -j ACCEPT
done
fi
done
echo "Allowing connections over VPN interface..."
iptables -A INPUT -i tun0 -j ACCEPT
iptables -A OUTPUT -o tun0 -j ACCEPT
echo "Preventing anything else..."
iptables -P INPUT DROP
iptables -P OUTPUT DROP
iptables -P FORWARD DROP
echo -e "iptables rules created and routes configured.\n"
else
echo -e "WARNING: VPN kill switch is disabled. Traffic will be allowed outside of the tunnel if the connection is lost.\n"
fi
if [ "$HTTP_PROXY" = "on" ]; then
if [ "$PROXY_USERNAME" ]; then
if [ "$PROXY_PASSWORD" ]; then
echo "Configuring proxy authentication."
echo -e "\nBasicAuth $PROXY_USERNAME $PROXY_PASSWORD" >> /data/tinyproxy.conf
else
echo "WARNING: Proxy username supplied without password. Starting HTTP proxy without credentials."
fi
fi
/data/scripts/tinyproxy_wrapper.sh &
fi
if [ "$SOCKS_PROXY" = "on" ]; then
if [ "$PROXY_USERNAME" ]; then
if [ "$PROXY_PASSWORD" ]; then
echo "Configuring proxy authentication."
adduser -S -D -g "$PROXY_USERNAME" -H -h /dev/null "$PROXY_USERNAME"
echo "$PROXY_USERNAME:$PROXY_PASSWORD" | chpasswd 2> /dev/null
sed -i 's/socksmethod: none/socksmethod: username/' /data/sockd.conf
else
echo "WARNING: Proxy username supplied without password. Starting SOCKS proxy without credentials."
fi
fi
/data/scripts/dante_wrapper.sh &
fi
echo -e "Running OpenVPN client.\n"
openvpn --config "$config_file_modified" \
--verb "$vpn_log_level" \
--auth-nocache \
--connect-retry-max 10 \
--pull-filter ignore "route-ipv6" \
--pull-filter ignore "ifconfig-ipv6" \
--up-restart \
--cd /data/vpn &
openvpn_child=$!
wait $openvpn_child

View file

@ -0,0 +1,18 @@
#!/bin/ash
# shellcheck shell=ash
# shellcheck disable=SC2169 # making up for lack of ash support
echo -e "Running Tinyproxy HTTP proxy server.\n"
until ping -c 3 1.1.1.1 > /dev/null 2>&1; do
sleep 1
done
addr_eth=$(ip a show dev eth0 | grep inet | cut -d " " -f 6 | cut -d "/" -f 1)
addr_tun=$(ip a show dev tun0 | grep inet | cut -d " " -f 6 | cut -d "/" -f 1)
sed -i \
-e "/Listen/c Listen $addr_eth" \
-e "/Bind/c Bind $addr_tun" \
/data/tinyproxy.conf
tinyproxy -d -c /data/tinyproxy.conf

65
data/sockd.conf Normal file
View file

@ -0,0 +1,65 @@
# Logging
logoutput: /var/log/sockd.log
errorlog: stderr
# Server address specification
internal: eth0 port = 1080
external: tun0
# Authentication methods
clientmethod: none
socksmethod: none
# Server identities
user.unprivileged: socks
##
## SOCKS client access rules
##
# Rule processing stops at the first match; no match results in blocking
# Block access to socks server from 192.0.2.22
# client block {
# # Block connections from 192.0.2.22/32
# from: 192.0.2.22/24 to: 0.0.0.0/0
# log: error # connect disconnect
# }
# Allow all connections
client pass {
from: 0.0.0.0/0 to: 0.0.0.0/0
log: error connect disconnect
}
##
## SOCKS command rules
##
# Rule processing stops at the first match; no match results in blocking
# Block communication with www.example.org
# socks block {
# from: 0.0.0.0/0 to: www.example.org
# command: bind connect udpassociate
# log: error # connect disconnect iooperation
# }
# Generic pass statement - bind/outgoing traffic
socks pass {
from: 0.0.0.0/0 to: 0.0.0.0/0
command: bind connect udpassociate
log: error connect disconnect # iooperation
}
# Block incoming connections/packets from ftp.example.org
# socks block {
# from: ftp.example.org to: 0.0.0.0/0
# command: bindreply udpreply
# log: error # connect disconnect iooperation
# }
# Generic pass statement for incoming connections/packets
socks pass {
from: 0.0.0.0/0 to: 0.0.0.0/0
command: bindreply udpreply
log: error connect disconnect # iooperation
}

19
data/tinyproxy.conf Normal file
View file

@ -0,0 +1,19 @@
User tinyproxy
Group tinyproxy
Port 8080
Listen
Bind
Timeout 600
DefaultErrorFile "/usr/share/tinyproxy/default.html"
StatFile "/usr/share/tinyproxy/stats.html"
LogFile "/var/log/tinyproxy/tinyproxy.log"
LogLevel Info
MaxClients 100
MinSpareServers 5
MaxSpareServers 15
StartServers 10

View file

@ -1,13 +0,0 @@
services:
openvpn-client:
image: ghcr.io/wfg/openvpn-client:latest
container_name: openvpn-client
cap_add:
- NET_ADMIN
devices:
- /dev/net/tun:/dev/net/tun
environment:
- ALLOWED_SUBNETS=192.168.10.0/24
volumes:
- ./local:/config
restart: unless-stopped