add netbird

This commit is contained in:
2024-05-14 20:47:17 +02:00
parent 000adcd17c
commit 76e493f1bd
6 changed files with 19 additions and 365 deletions

View File

@@ -19,9 +19,19 @@ firewall_allowed_tcp_ports:
- "80"
- "443"
- "9100"
#! Kubernetes control plane ports
- "6443"
- "2379"
- "2380"
- "10250"
- "10259"
- "10257"
# - "9090"
# - "3000"
# - "9323"
#! Kubernetes Worker ports
#* NETBIRD

View File

@@ -1,99 +0,0 @@
# ---
# package_list:
# - name: python3-pip
# sshd_skip_defaults: true
# sshd_config_file: /etc/ssh/sshd_config
# sshd_AuthorizedKeysFile: .ssh/authorized_keys
# sshd_AcceptEnv: "LANG LC_CTYPE LC_NUMERIC LC_TIME LC_COLLATE LC_MONETARY LC_MESSAGES LC_PAPER LC_NAME LC_ADDRESS LC_TELEPHONE LC_MEASUREMENT LC_IDENTIFICATION LC_ALL"
# sshd_Protocol: 2
# sshd_LoginGraceTime: 30
# sshd_SyslogFacility: AUTH
# sshd_LogLevel: VERBOSE
# sshd_PermitRootLogin: 'no'
# sshd_StrictModes: 'yes'
# sshd_IgnoreRhosts: 'yes'
# sshd_HostbasedAuthentication: 'no'
# sshd_PasswordAuthentication: 'no'
# sshd_PermitEmptyPasswords: 'no'
# sshd_ChallengeResponseAuthentication: 'no'
# sshd_GSSAPIAuthentication: 'no'
# sshd_X11DisplayOffset: 10
# sshd_PrintMotd: 'yes'
# sshd_PrintLastLog: 'yes'
# sshd_TCPKeepAlive: 'yes'
# sshd_Subsystem: "sftp /usr/lib/openssh/sftp-server"
# sshd_UsePAM: 'yes'
# sshd_UseDNS: 'no'
# sshd_KexAlgorithms: "curve25519-sha256@libssh.org,diffie-hellman-group-exchange-sha256"
# sshd_Ciphers: "chacha20-poly1305@openssh.com,aes256-gcm@openssh.com,aes256-ctr"
# sshd_MACs: "hmac-sha2-512-etm@openssh.com,hmac-sha2-256-etm@openssh.com,umac-128-etm@openssh.com"
# sshd_HostKey:
# - /etc/ssh/ssh_host_rsa_key
# #######
# # APT #
# #######
# apt_repositories_sources:
# - deb http://nova.clouds.archive.ubuntu.com/ubuntu/ focal main restricted
# - deb http://nova.clouds.archive.ubuntu.com/ubuntu/ focal-updates main restricted
# - deb http://nova.clouds.archive.ubuntu.com/ubuntu/ focal universe
# - deb http://nova.clouds.archive.ubuntu.com/ubuntu/ focal-updates universe
# - deb http://nova.clouds.archive.ubuntu.com/ubuntu/ focal multiverse
# - deb http://nova.clouds.archive.ubuntu.com/ubuntu/ focal-updates multiverse
# - deb http://nova.clouds.archive.ubuntu.com/ubuntu/ focal-backports main restricted universe multiverse
# - deb http://security.ubuntu.com/ubuntu focal-security main restricted
# - deb http://security.ubuntu.com/ubuntu focal-security universe
# - deb http://security.ubuntu.com/ubuntu focal-security multiverse
# ########
# # KUBE #
# ########
# disable_firewall: true
# # Need public_network for argocd
# # I use any because both worker and master are not on the same network (
# # They have only one public IP
# kubernetes_subnet: 0.0.0.0/0
# # vip control plan 192.168.25.255
# setup_vip: false
# install_nginx_ingress: false
# install_longhorn: false
# # This variable is used when the cluster is bootstrapped for the first time
# kubernetes_init_host: ovh-master
# kubernetes_init_app: true
# kubernetes_app:
# - url: https://raw.githubusercontent.com/argoproj/argo-cd/stable/manifests/install.yaml
# namespace: argocd
# kubernetes_alias_bashrc:
# - path: "/root/.bashrc"
# regexp: "^source /usr/share/bash-completion/bash_completion"
# state: present
# line: "source /usr/share/bash-completion/bash_completion"
# - path: "/root/.bashrc"
# regexp: "^source /etc/bash_completion"
# state: present
# line: "source /etc/bash_completion"
# - path: "/root/.bashrc"
# regexp: "^source <(kubectl completion bash)"
# state: present
# line: "source <(kubectl completion bash)"
# - path: "/root/.bashrc"
# regexp: "^alias k=kubectl"
# state: present
# line: "alias k=kubectl"
# - path: "/root/.bashrc"
# regexp: "^complete -F __start_kubectl k"
# state: present
# line: "complete -F __start_kubectl k"
# - path: "/root/.bashrc"
# regexp: '^alias kname="kubectl config set-context --current --namespace="'
# state: present
# line: '^alias kname="kubectl config set-context --current --namespace="'

View File

@@ -1,250 +0,0 @@
---
##########
# DOCKER #
##########
dockerapp_tree_base_dir:
- "/opt"
# - "/opt/entreprise/dockerapps"
#docker_install_compose: false
# boostrap dockerapp:
dockerapp_service: dockerapps
dockerapp_tree_volumes:
- "gitlab"
- "gitlab/config"
- "gitlab/data"
- "gitlab/docker"
- "gitlab/logs"
- "gitlab-runner"
- "gitlab-runner/certs"
- "gitlab-runner/docker"
- "pypi-mirror"
- "pypi-mirror/etc"
- "sonarqube"
- "sonarqube/postgresql"
- "sonarqube/sonarqube"
- "sonarqube/sonarqube/data"
- "sonarqube/sonarqube/extensions"
- "sonarqube/sonarqube/key"
- "sonarqube/sonarqube/logs"
- "traefik/certs"
# generate docker-compose.yml
dockerapp_compose:
version: '3.9'
networks:
t2_proxy:
name: t2_proxy
driver: bridge
ipam:
config:
- subnet: 192.168.90.0/24
default:
driver: bridge
socket_proxy:
name: socket_proxy
driver: bridge
ipam:
config:
- subnet: 192.168.91.0/24
x-environment: &default-tz-puid-pgid
TZ: Europe/Paris
PUID: 1000
PGID: 1000
# Proxy Network and Security
x-network-and-security: &network-and-security
networks:
- t2_proxy
security_opt:
- no-new-privileges:true
# Keys common to some of the services in basic-services.txt
x-common-keys-core: &common-keys-core
<<: *network-and-security
restart: always
# profiles:
# - basic
# Keys common to some of the dependent services/apps
x-common-keys-apps: &common-keys-apps
<<: *network-and-security
restart: unless-stopped
# profiles:
# - apps
services:
pypi-mirror:
image: "pypa/bandersnatch:latest"
restart: always
networks:
dev_net:
ipv4_address: 10.10.20.9
command: python /bandersnatch/src/runner.py 3600
logging:
driver: "json-file"
options:
max-size: "10m"
max-file: "10"
volumes:
- "{{ dockerapp_tree_base_dir | last }}/{{ dockerapp_service }}/pypi-mirror/etc/bandersnatch.conf:/conf/bandersnatch.conf"
# Traefik 2 - Reverse Proxy
# Touch (create empty files) traefik.log and acme/acme.json. Set acme.json permissions to 600.
# touch $DOCKERDIR/traefik2/acme/acme.json
# chmod 600 $DOCKERDIR/traefik2/acme/acme.json
# touch $DOCKERDIR/logs/homeserver/traefik.log # customize this
#### LETSENCRYPT CHALLENGE ######
# https://doc.traefik.io/traefik/user-guides/docker-compose/acme-http/
# Add new https services/fqdn
# uncomment acme.caserver line and remove appdata/traefik2/acme/letsencrypt/acme.json file
# Down all containers and up all (docker-compose down/up -d), wait for news cert/key on acme.json
# At this moment, cert/key are staging, you need to comment acme.caserver line and remove acme.json file then restart traefik
traefik:
<<: *common-keys-core # See EXTENSION FIELDS at the top
container_name: traefik
image: traefik:latest
command: # CLI arguments
- --global.checkNewVersion=true
- --global.sendAnonymousUsage=true
- --entryPoints.http.address=:80/tcp
- --entryPoints.https.address=:443/tcp
- --entryPoints.wireguard.address=:443/udp
- --api=true
- --api.dashboard=true
- --log=true
- --log.level=WARN # (Default: error) DEBUG, INFO, WARN, ERROR, FATAL, PANIC
- --accessLog=true
- --accessLog.filePath=/traefik.log
- --accessLog.bufferingSize=100 # Configuring a buffer of 100 lines
- --providers.docker=true
- --providers.docker.endpoint=tcp://socket-proxy:2375
- --providers.docker.exposedByDefault=false
- --providers.docker.network=t2_proxy
- --providers.docker.swarmMode=false
- --providers.file.directory=/rules # Load dynamic configuration from one or more .toml or .yml files in a directory
- --providers.file.watch=true # Only works on top level files in the rules folder
- --metrics.prometheus=true
- --metrics.prometheus.buckets=0.1,0.3,1.2,5.0
- --metrics.prometheus.addEntryPointsLabels=true
- --metrics.prometheus.addrouterslabels=true
- --metrics.prometheus.addServicesLabels=true
- --metrics.prometheus.manualrouting=true
- --certificatesresolvers.letsencrypt-resolver.acme.tlschallenge=true
#- --certificatesresolvers.letsencrypt-resolver.acme.caserver=https://acme-staging-v02.api.letsencrypt.org/directory
- --certificatesresolvers.letsencrypt-resolver.acme.email=stephane.gratiasquiquandon@gmail.com
- --certificatesresolvers.letsencrypt-resolver.acme.storage=/letsencrypt/acme.json
networks:
t2_proxy:
ipv4_address: 192.168.90.254 # You can specify a static IP
socket_proxy:
environment:
<<: *default-tz-puid-pgid
ports:
- target: 80
published: 80
protocol: tcp
mode: host
- target: 443
published: 443
protocol: tcp
mode: host
- target: 443
published: 443
protocol: udp
mode: host
volumes:
- ./appdata/traefik2/rules/homeserver:/rules # file provider directory
- ./appdata/traefik2/acme/letsencrypt:/letsencrypt
#- ./appdata/traefik2/acme/acme.json:/acme.json # cert location - you must touch this file and change permissions to 600
- ./logs/homeserver/traefik.log:/traefik.log # for fail2ban - make sure to touch file before starting container
- /etc/timezone:/etc/timezone:ro
- /etc/localtime:/etc/localtime:ro
labels:
- "traefik.enable=true"
# HTTP-to-HTTPS Redirect
- "traefik.http.routers.http-catchall.entrypoints=http"
- "traefik.http.routers.http-catchall.rule=HostRegexp(`{host:.+}`)"
- "traefik.http.routers.http-catchall.middlewares=redirect-to-https"
- "traefik.http.middlewares.redirect-to-https.redirectscheme.scheme=https"
- "traefik.http.middlewares.redirect-to-https.redirectscheme.permanent=true"
# HTTP Routers
- "traefik.http.routers.traefik-rtr.entrypoints=https"
- "traefik.http.routers.traefik-rtr.rule=Host(`traefik.jingoh.fr`)"
## Services - API
- "traefik.http.routers.traefik-rtr.service=api@internal"
- "traefik.http.routers.traefik-rtr.tls=true"
## MONITORING
- traefik.http.routers.prometheus.entrypoints=https
- traefik.http.routers.prometheus.rule=Host(`traefik.jingoh.fr`) && PathPrefix(`/metrics`)
- traefik.http.routers.prometheus.service=prometheus@internal
- traefik.http.routers.prometheus.middlewares=traefik-basic
## Middlewares
# echo $(htpasswd -nB user) | sed -e s/\\$/\\$\\$/g
## Middlewares
- "traefik.http.routers.traefik-rtr.middlewares=traefik-basic"
- "traefik.http.middlewares.traefik-basic.basicauth.users=jingohtraf:$$2y$$05$$JO8mJnOV2PARzEcVj.Grp.H.JbkWYneAIjgMt7c0.5NTyBNDkRIiW"
#- "traefik.http.middlewares.traefik-rtr-ratelimit.ratelimit.average=10"
# - "traefik.http.middlewares.traefik-rtr-ratelimit.ratelimit.burst=10"
# - "traefik.http.middlewares.traefik-rtr-ratelimit.ratelimit.period=1"
# - "traefik.http.routers.traefik-rtr-ratelimit.middlewares=traefik-rtr-ratelimit@docker"
## TLS
- "traefik.http.routers.traefik-rtr.tls.certresolver=letsencrypt-resolver"
- "traefik.http.routers.prometheus.tls.certresolver=letsencrypt-resolver"
# Docker Socket Proxy - Security Enchanced Proxy for Docker Socket
socket-proxy:
<<: *common-keys-core # See EXTENSION FIELDS at the top
container_name: socket-proxy
image: tecnativa/docker-socket-proxy:latest
networks:
socket_proxy:
ipv4_address: 192.168.91.254 # You can specify a static IP
volumes:
- "/var/run/docker.sock:/var/run/docker.sock"
environment:
- LOG_LEVEL=info # debug,info,notice,warning,err,crit,alert,emerg
## Variables match the URL prefix (i.e. AUTH blocks access to /auth/* parts of the API, etc.).
# 0 to revoke access.
# 1 to grant access.
## Granted by Default
- EVENTS=1
- PING=1
- VERSION=1
## Revoked by Default
# Security critical
- AUTH=0
- SECRETS=0
- POST=0 # Watchtower
# Not always needed
- BUILD=0
- COMMIT=0
- CONFIGS=0
- CONTAINERS=1 # Traefik, portainer, etc.
- DISTRIBUTION=0
- EXEC=0
- IMAGES=0 # Portainer
- INFO=0 # Portainer
- NETWORKS=0 # Portainer
- NODES=0
- PLUGINS=0
- SERVICES=0 # Portainer
- SESSION=0
- SWARM=0
- SYSTEM=0
- TASKS=0 # Portainer
- VOLUMES=0 # Portainer
##############
# KUBERNETES #
##############
kubernetes_tree_base_dir:
- /opt
- /opt/kubernetes
kubernetes_service: infra

View File

@@ -115,20 +115,6 @@
# tags: test2
tasks:
- name: Update repositories and install py3-pip package
community.general.apk:
name: python3-pip
update_cache: true
delegate_to: localhost
- name: Install pip package
ansible.builtin.pip:
name: "{{ item }}"
loop:
- setuptools
- netaddr
delegate_to: localhost
- name: Tcheck ssh conf for phone connection
ansible.builtin.lineinfile:
path: /etc/ssh/ssh_config

11
hosts
View File

@@ -3,13 +3,20 @@ scale01 ansible_host=163.172.209.36 ansible_user=stephane
ovh01 ansible_host=5.135.181.11 ansible_user=stephane
scaleway ansible_host=163.172.84.28 ansible_user=stephane
[controller]
[tower]
scaleway ansible_host=163.172.84.28 ansible_user=stephane
[kubernetes]
[control]
scale01 ansible_host=163.172.209.36 ansible_user=stephane
ovh01 ansible_host=5.135.181.11 ansible_user=stephane
[worker]
[kubernetes:children]
control
worker
[vagrant]
v1 ansible_host=192.168.121.2 ansible_user=vagrant ansible_ssh_pass=vagrant
v2 ansible_host=192.168.121.240 ansible_user=vagrant ansible_ssh_pass=vagrant