Home | Markdown | Gemini | Microblog

$ git clone https://codeberg.org/snonux/conf.git $ cd conf $ git checkout 15a86f3 # Last commit before ArgoCD migration $ cd f3s/
dnf update -y reboot
paul@f0:~ % doas freebsd-update fetch
paul@f0:~ % doas freebsd-update install
paul@f0:~ % doas reboot
.
.
.
paul@f0:~ % doas freebsd-update -r 14.3-RELEASE upgrade
paul@f0:~ % doas freebsd-update install
paul@f0:~ % doas freebsd-update install
paul@f0:~ % doas reboot
.
.
.
paul@f0:~ % doas freebsd-update install
paul@f0:~ % doas pkg update
paul@f0:~ % doas pkg upgrade
paul@f0:~ % doas reboot
.
.
.
paul@f0:~ % uname -a
FreeBSD f0.lan.buetow.org 14.3-RELEASE FreeBSD 14.3-RELEASE
releng/14.3-n271432-8c9ce319fef7 GENERIC amd64
[root@r0 ~]# echo -n SECRET_TOKEN > ~/.k3s_token
[root@r0 ~]# curl -sfL https://get.k3s.io | K3S_TOKEN=$(cat ~/.k3s_token) \
sh -s - server --cluster-init \
--node-ip=192.168.2.120 \
--advertise-address=192.168.2.120 \
--tls-san=r0.wg0.wan.buetow.org
[INFO] Finding release for channel stable
[INFO] Using v1.32.6+k3s1 as release
.
.
.
[INFO] systemd: Starting k3s
[root@r1 ~]# curl -sfL https://get.k3s.io | K3S_TOKEN=$(cat ~/.k3s_token) \
sh -s - server --server https://r0.wg0.wan.buetow.org:6443 \
--node-ip=192.168.2.121 \
--advertise-address=192.168.2.121 \
--tls-san=r1.wg0.wan.buetow.org
[root@r2 ~]# curl -sfL https://get.k3s.io | K3S_TOKEN=$(cat ~/.k3s_token) \
sh -s - server --server https://r0.wg0.wan.buetow.org:6443 \
--node-ip=192.168.2.122 \
--advertise-address=192.168.2.122 \
--tls-san=r2.wg0.wan.buetow.org
.
.
.
[root@r0 ~]# kubectl get nodes NAME STATUS ROLES AGE VERSION r0.lan.buetow.org Ready control-plane,etcd,master 4m44s v1.32.6+k3s1 r1.lan.buetow.org Ready control-plane,etcd,master 3m13s v1.32.6+k3s1 r2.lan.buetow.org Ready control-plane,etcd,master 30s v1.32.6+k3s1 [root@r0 ~]# kubectl get pods --all-namespaces NAMESPACE NAME READY STATUS RESTARTS AGE kube-system coredns-5688667fd4-fs2jj 1/1 Running 0 5m27s kube-system helm-install-traefik-crd-f9hgd 0/1 Completed 0 5m27s kube-system helm-install-traefik-zqqqk 0/1 Completed 2 5m27s kube-system local-path-provisioner-774c6665dc-jqlnc 1/1 Running 0 5m27s kube-system metrics-server-6f4c6675d5-5xpmp 1/1 Running 0 5m27s kube-system svclb-traefik-411cec5b-cdp2l 2/2 Running 0 78s kube-system svclb-traefik-411cec5b-f625r 2/2 Running 0 4m58s kube-system svclb-traefik-411cec5b-twrd7 2/2 Running 0 4m2s kube-system traefik-c98fdf6fb-lt6fx 1/1 Running 0 4m58s
> ~ kubectl create namespace test namespace/test created > ~ kubectl get namespaces NAME STATUS AGE default Active 6h11m kube-node-lease Active 6h11m kube-public Active 6h11m kube-system Active 6h11m test Active 5s > ~ kubectl config set-context --current --namespace=test Context "default" modified.
> ~ cat <<END > apache-deployment.yaml
# Apache HTTP Server Deployment
apiVersion: apps/v1
kind: Deployment
metadata:
name: apache-deployment
spec:
replicas: 1
selector:
matchLabels:
app: apache
template:
metadata:
labels:
app: apache
spec:
containers:
- name: apache
image: httpd:latest
ports:
# Container port where Apache listens
- containerPort: 80
END
> ~ kubectl apply -f apache-deployment.yaml
deployment.apps/apache-deployment created
> ~ kubectl get all
NAME READY STATUS RESTARTS AGE
pod/apache-deployment-5fd955856f-4pjmf 1/1 Running 0 7s
NAME READY UP-TO-DATE AVAILABLE AGE
deployment.apps/apache-deployment 1/1 1 1 7s
NAME DESIRED CURRENT READY AGE
replicaset.apps/apache-deployment-5fd955856f 1 1 1 7s
> ~ cat <<END > apache-service.yaml
apiVersion: v1
kind: Service
metadata:
labels:
app: apache
name: apache-service
spec:
ports:
- name: web
port: 80
protocol: TCP
# Expose port 80 on the service
targetPort: 80
selector:
# Link this service to pods with the label app=apache
app: apache
END
> ~ kubectl apply -f apache-service.yaml
service/apache-service created
> ~ kubectl get service
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
apache-service ClusterIP 10.43.249.165 <none> 80/TCP 4s
> ~ cat <<END > apache-ingress.yaml
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: apache-ingress
namespace: test
annotations:
spec.ingressClassName: traefik
traefik.ingress.kubernetes.io/router.entrypoints: web
spec:
rules:
- host: f3s.foo.zone
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: apache-service
port:
number: 80
- host: standby.f3s.foo.zone
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: apache-service
port:
number: 80
- host: www.f3s.foo.zone
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: apache-service
port:
number: 80
END
> ~ kubectl apply -f apache-ingress.yaml
ingress.networking.k8s.io/apache-ingress created
> ~ kubectl describe ingress
Name: apache-ingress
Labels: <none>
Namespace: test
Address: 192.168.2.120,192.168.2.121,192.168.2.122
Ingress Class: traefik
Default backend: <default>
Rules:
Host Path Backends
---- ---- --------
f3s.foo.zone
/ apache-service:80 (10.42.1.11:80)
standby.f3s.foo.zone
/ apache-service:80 (10.42.1.11:80)
www.f3s.foo.zone
/ apache-service:80 (10.42.1.11:80)
Annotations: spec.ingressClassName: traefik
traefik.ingress.kubernetes.io/router.entrypoints: web
Events: <none>
> ~ curl -H "Host: www.f3s.foo.zone" http://r0.lan.buetow.org:80 <html><body><h1>It works!</h1></body></html>
> ~ cat <<END > apache-deployment.yaml
# Apache HTTP Server Deployment
apiVersion: apps/v1
kind: Deployment
metadata:
name: apache-deployment
namespace: test
spec:
replicas: 2
selector:
matchLabels:
app: apache
template:
metadata:
labels:
app: apache
spec:
containers:
- name: apache
image: httpd:latest
ports:
# Container port where Apache listens
- containerPort: 80
readinessProbe:
httpGet:
path: /
port: 80
initialDelaySeconds: 5
periodSeconds: 10
livenessProbe:
httpGet:
path: /
port: 80
initialDelaySeconds: 15
periodSeconds: 10
volumeMounts:
- name: apache-htdocs
mountPath: /usr/local/apache2/htdocs/
volumes:
- name: apache-htdocs
persistentVolumeClaim:
claimName: example-apache-pvc
END
> ~ cat <<END > apache-ingress.yaml
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: apache-ingress
namespace: test
annotations:
spec.ingressClassName: traefik
traefik.ingress.kubernetes.io/router.entrypoints: web
spec:
rules:
- host: f3s.foo.zone
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: apache-service
port:
number: 80
- host: standby.f3s.foo.zone
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: apache-service
port:
number: 80
- host: www.f3s.foo.zone
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: apache-service
port:
number: 80
END
> ~ cat <<END > apache-persistent-volume.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
name: example-apache-pv
spec:
capacity:
storage: 1Gi
volumeMode: Filesystem
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
hostPath:
path: /data/nfs/k3svolumes/example-apache-volume-claim
type: Directory
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: example-apache-pvc
namespace: test
spec:
storageClassName: ""
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
END
> ~ cat <<END > apache-service.yaml
apiVersion: v1
kind: Service
metadata:
labels:
app: apache
name: apache-service
namespace: test
spec:
ports:
- name: web
port: 80
protocol: TCP
# Expose port 80 on the service
targetPort: 80
selector:
# Link this service to pods with the label app=apache
app: apache
END
> ~ kubectl apply -f apache-persistent-volume.yaml > ~ kubectl apply -f apache-service.yaml > ~ kubectl apply -f apache-deployment.yaml > ~ kubectl apply -f apache-ingress.yaml
> ~ kubectl get pods
NAME READY STATUS RESTARTS AGE
apache-deployment-5b96bd6b6b-fv2jx 0/1 ContainerCreating 0 9m15s
apache-deployment-5b96bd6b6b-ax2ji 0/1 ContainerCreating 0 9m15s
> ~ kubectl describe pod apache-deployment-5b96bd6b6b-fv2jx | tail -n 5
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Scheduled 9m34s default-scheduler Successfully
assigned test/apache-deployment-5b96bd6b6b-fv2jx to r2.lan.buetow.org
Warning FailedMount 80s (x12 over 9m34s) kubelet MountVolume.SetUp
failed for volume "example-apache-pv" : hostPath type check failed:
/data/nfs/k3svolumes/example-apache is not a directory
[root@r0 ~]# mkdir /data/nfs/k3svolumes/example-apache-volume-claim/ [root@r0 ~]# cat <<END > /data/nfs/k3svolumes/example-apache-volume-claim/index.html <!DOCTYPE html> <html> <head> <title>Hello, it works</title> </head> <body> <h1>Hello, it works!</h1> <p>This site is served via a PVC!</p> </body> </html> END
> ~ kubectl delete pod apache-deployment-5b96bd6b6b-fv2jx > ~ curl -H "Host: www.f3s.foo.zone" http://r0.lan.buetow.org:80 <!DOCTYPE html> <html> <head> <title>Hello, it works</title> </head> <body> <h1>Hello, it works!</h1> <p>This site is served via a PVC!</p> </body> </html>
> ~ kubectl -n kube-system scale deployment traefik --replicas=2
> ~ kubectl -n kube-system get pods -l app.kubernetes.io/name=traefik kube-system traefik-c98fdf6fb-97kqk 1/1 Running 19 (53d ago) 64d kube-system traefik-c98fdf6fb-9npg2 1/1 Running 11 (53d ago) 61d
> ~ curl https://f3s.foo.zone <html><body><h1>It works!</h1></body></html> > ~ curl https://www.f3s.foo.zone <html><body><h1>It works!</h1></body></html> > ~ curl https://standby.f3s.foo.zone <html><body><h1>It works!</h1></body></html>
table <f3s> {
192.168.2.120
192.168.2.121
192.168.2.122
}
http protocol "https" {
# TLS certificates for all f3s services
tls keypair f3s.foo.zone
tls keypair www.f3s.foo.zone
tls keypair standby.f3s.foo.zone
tls keypair anki.f3s.foo.zone
tls keypair www.anki.f3s.foo.zone
tls keypair standby.anki.f3s.foo.zone
tls keypair bag.f3s.foo.zone
tls keypair www.bag.f3s.foo.zone
tls keypair standby.bag.f3s.foo.zone
tls keypair flux.f3s.foo.zone
tls keypair www.flux.f3s.foo.zone
tls keypair standby.flux.f3s.foo.zone
tls keypair audiobookshelf.f3s.foo.zone
tls keypair www.audiobookshelf.f3s.foo.zone
tls keypair standby.audiobookshelf.f3s.foo.zone
tls keypair gpodder.f3s.foo.zone
tls keypair www.gpodder.f3s.foo.zone
tls keypair standby.gpodder.f3s.foo.zone
tls keypair radicale.f3s.foo.zone
tls keypair www.radicale.f3s.foo.zone
tls keypair standby.radicale.f3s.foo.zone
tls keypair vault.f3s.foo.zone
tls keypair www.vault.f3s.foo.zone
tls keypair standby.vault.f3s.foo.zone
tls keypair syncthing.f3s.foo.zone
tls keypair www.syncthing.f3s.foo.zone
tls keypair standby.syncthing.f3s.foo.zone
tls keypair uprecords.f3s.foo.zone
tls keypair www.uprecords.f3s.foo.zone
tls keypair standby.uprecords.f3s.foo.zone
# Explicitly route non-f3s hosts to localhost
match request header "Host" value "foo.zone" forward to <localhost>
match request header "Host" value "www.foo.zone" forward to <localhost>
match request header "Host" value "dtail.dev" forward to <localhost>
# ... other non-f3s hosts ...
# NOTE: f3s hosts have NO match rules here!
# They use relay-level failover (f3s -> localhost backup)
# See the relay configuration below for automatic failover details
}
relay "https4" {
listen on 46.23.94.99 port 443 tls
protocol "https"
# Primary: f3s cluster (with health checks) - Falls back to localhost when all hosts down
forward to <f3s> port 80 check tcp
forward to <localhost> port 8080
}
relay "https6" {
listen on 2a03:6000:6f67:624::99 port 443 tls
protocol "https"
# Primary: f3s cluster (with health checks) - Falls back to localhost when all hosts down
forward to <f3s> port 80 check tcp
forward to <localhost> port 8080
}
# NEW configuration - supports automatic failover
http protocol "https" {
# Explicitly route non-f3s hosts to localhost
match request header "Host" value "foo.zone" forward to <localhost>
match request header "Host" value "dtail.dev" forward to <localhost>
# ... other non-f3s hosts ...
# f3s hosts have NO protocol rules - they use relay-level failover
# (no match rules for f3s.foo.zone, anki.f3s.foo.zone, etc.)
}
relay "https4" {
# f3s FIRST (with health checks), localhost as BACKUP
forward to <f3s> port 80 check tcp
forward to <localhost> port 8080
}
# OpenBSD httpd.conf
# Fallback for f3s hosts - serve fallback page for ALL paths
server "f3s.foo.zone" {
listen on * port 8080
log style forwarded
location * {
# Rewrite all requests to /index.html to show fallback page regardless of path
request rewrite "/index.html"
root "/htdocs/f3s_fallback"
}
}
server "anki.f3s.foo.zone" {
listen on * port 8080
log style forwarded
location * {
request rewrite "/index.html"
root "/htdocs/f3s_fallback"
}
}
# ... similar blocks for all f3s hostnames ...
<!DOCTYPE html>
<html>
<head>
<title>Server turned off</title>
<style>
body {
font-family: sans-serif;
text-align: center;
padding-top: 50px;
}
.container {
max-width: 600px;
margin: 0 auto;
}
</style>
</head>
<body>
<div class="container">
<h1>Server turned off</h1>
<p>The servers are all currently turned off.</p>
<p>Please try again later.</p>
<p>Or email <a href="mailto:paul@nospam.buetow.org">paul@nospam.buetow.org</a>
- so I can turn them back on for you!</p>
</div>
</body>
</html>
Internet → OpenBSD relayd (TLS termination, Let's Encrypt)
→ WireGuard tunnel
→ k3s Traefik :80 (HTTP)
→ Service
LAN → FreeBSD CARP VIP (192.168.1.138)
→ FreeBSD relayd (TCP forwarding)
→ k3s Traefik :443 (TLS termination, cert-manager)
→ Service
$ cd conf/f3s/cert-manager $ just install kubectl apply -f cert-manager.yaml # ... cert-manager CRDs and resources created ... kubectl apply -f self-signed-issuer.yaml clusterissuer.cert-manager.io/selfsigned-issuer created clusterissuer.cert-manager.io/selfsigned-ca-issuer created kubectl apply -f ca-certificate.yaml certificate.cert-manager.io/selfsigned-ca created kubectl apply -f wildcard-certificate.yaml certificate.cert-manager.io/f3s-lan-wildcard created
$ kubectl get certificate -n cert-manager NAME READY SECRET AGE f3s-lan-wildcard True f3s-lan-tls 5m selfsigned-ca True selfsigned-ca-secret 5m
$ kubectl get secret f3s-lan-tls -n cert-manager -o yaml | \
sed 's/namespace: cert-manager/namespace: services/' | \
kubectl apply -f -
paul@f0:~ % doas pkg install -y relayd
# k3s nodes backend table
table <k3s_nodes> { 192.168.1.120 192.168.1.121 192.168.1.122 }
# TCP forwarding to Traefik (no TLS termination)
relay "lan_http" {
listen on 192.168.1.138 port 80
forward to <k3s_nodes> port 80 check tcp
}
relay "lan_https" {
listen on 192.168.1.138 port 443
forward to <k3s_nodes> port 443 check tcp
}
# Basic PF rules for relayd set skip on lo0 pass in quick pass out quick
paul@f0:~ % doas sysrc pf_enable=YES pflog_enable=YES relayd_enable=YES paul@f0:~ % doas service pf start paul@f0:~ % doas service pflog start paul@f0:~ % doas service relayd start
paul@f0:~ % doas sockstat -4 -l | grep 192.168.1.138 _relayd relayd 2903 11 tcp4 192.168.1.138:80 *:* _relayd relayd 2903 12 tcp4 192.168.1.138:443 *:*
---
# LAN Ingress for f3s.lan.foo.zone
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: ingress-lan
namespace: services
annotations:
spec.ingressClassName: traefik
traefik.ingress.kubernetes.io/router.entrypoints: web,websecure
spec:
tls:
- hosts:
- f3s.lan.foo.zone
secretName: f3s-lan-tls
rules:
- host: f3s.lan.foo.zone
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: service
port:
number: 4533
$ kubectl apply -f ingress-lan.yaml ingress.networking.k8s.io/ingress-lan created $ curl -k https://f3s.lan.foo.zone HTTP/2 302 location: /app/
$ sudo tee -a /etc/hosts << 'EOF' # f3s LAN services 192.168.1.138 f3s.lan.foo.zone EOF
$ kubectl get secret selfsigned-ca-secret -n cert-manager -o jsonpath='{.data.ca\.crt}' | \
base64 -d > f3s-lan-ca.crt
$ sudo cp f3s-lan-ca.crt /etc/pki/ca-trust/source/anchors/ $ sudo update-ca-trust
[root@r0 ~]# mkdir -p /data/nfs/k3svolumes/registry
$ git clone https://codeberg.org/snonux/conf/f3s.git $ cd conf/f3s/examples/conf/f3s/registry $ helm upgrade --install registry ./helm-chart --namespace infra --create-namespace
$ kubectl get pods --namespace infra NAME READY STATUS RESTARTS AGE docker-registry-6bc9bb46bb-6grkr 1/1 Running 6 (53d ago) 54d $ kubectl get svc docker-registry-service -n infra NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE docker-registry-service NodePort 10.43.141.56 <none> 5000:30001/TCP 54d
$ cat <<"EOF" | sudo tee /etc/docker/daemon.json >/dev/null
{
"insecure-registries": [
"r0.lan.buetow.org:30001",
"r1.lan.buetow.org:30001",
"r2.lan.buetow.org:30001"
]
}
EOF
$ sudo systemctl restart docker
$ for node in r0 r1 r2; do
> ssh root@$node "echo '127.0.0.1 registry.lan.buetow.org' >> /etc/hosts"
> done
$ for node in r0 r1 r2; do
> ssh root@$node "cat <<'EOF' > /etc/rancher/k3s/registries.yaml
mirrors:
"registry.lan.buetow.org:30001":
endpoint:
- "http://localhost:30001"
EOF
systemctl restart k3s"
> done
$ docker tag my-app:latest r0.lan.buetow.org:30001/my-app:latest $ docker push r0.lan.buetow.org:30001/my-app:latest
image: docker-registry-service:5000/my-app:latest
$ kubectl run registry-test \ > --image=docker-registry-service:5000/my-app:latest \ > --restart=Never -n test --command -- sleep 300
$ cd conf/f3s/examples/conf/f3s/anki-sync-server/docker-image
$ docker build -t anki-sync-server:25.07.5b --build-arg ANKI_VERSION=25.07.5 .
$ docker tag anki-sync-server:25.07.5b \
r0.lan.buetow.org:30001/anki-sync-server:25.07.5b
$ docker push r0.lan.buetow.org:30001/anki-sync-server:25.07.5b
$ ssh root@r0 "mkdir -p /data/nfs/k3svolumes/anki-sync-server/anki_data"
$ kubectl create namespace services
$ kubectl create secret generic anki-sync-server-secret \
--from-literal=SYNC_USER1='paul:SECRETPASSWORD' \
-n services
$ cd ../helm-chart $ helm upgrade --install anki-sync-server . -n services
containers:
- name: anki-sync-server image: registry.lan.buetow.org:30001/anki-sync-server:25.07.5b
volumeMounts:
- name: anki-data
mountPath: /anki_data
$ kubectl get pods -n services $ kubectl get ingress anki-sync-server-ingress -n services $ curl https://anki.f3s.foo.zone/health
> ~ kubectl exec -n services deploy/miniflux-postgres -- id postgres uid=999(postgres) gid=999(postgres) groups=999(postgres) [root@r0 ~]# id postgres uid=999(postgres) gid=999(postgres) groups=999(postgres) paul@f0:~ % doas id postgres uid=999(postgres) gid=99(postgres) groups=999(postgres)
[root@r0 ~]# groupadd --gid 999 postgres
[root@r0 ~]# useradd --uid 999 --gid 999 \
--home-dir /var/lib/pgsql \
--shell /sbin/nologin postgres
paul@f0:~ % doas pw groupadd postgres -g 999
paul@f0:~ % doas pw useradd postgres -u 999 -g postgres \
-d /var/db/postgres -s /usr/sbin/nologin
# Persistent volume lives on the NFS export
hostPath:
path: /data/nfs/k3svolumes/miniflux/data
type: Directory
...
containers:
- name: miniflux-postgres
image: postgres:17
volumeMounts:
- name: miniflux-postgres-data
mountPath: /var/lib/postgresql/data
$ cd examples/conf/f3s/miniflux/helm-chart
$ mkdir -p /data/nfs/k3svolumes/miniflux/data
$ kubectl create secret generic miniflux-db-password \
--from-literal=fluxdb_password='YOUR_PASSWORD' -n services
$ kubectl create secret generic miniflux-admin-password \
--from-literal=admin_password='YOUR_ADMIN_PASSWORD' -n services
$ helm upgrade --install miniflux . -n services --create-namespace
$ kubectl get all --namespace=services | grep mini pod/miniflux-postgres-556444cb8d-xvv2p 1/1 Running 0 54d pod/miniflux-server-85d7c64664-stmt9 1/1 Running 0 54d service/miniflux ClusterIP 10.43.47.80 <none> 8080/TCP 54d service/miniflux-postgres ClusterIP 10.43.139.50 <none> 5432/TCP 54d deployment.apps/miniflux-postgres 1/1 1 1 54d deployment.apps/miniflux-server 1/1 1 1 54d replicaset.apps/miniflux-postgres-556444cb8d 1 1 1 54d replicaset.apps/miniflux-server-85d7c64664 1 1 1 54d