First real commit after test

This commit is contained in:
2023-09-23 18:16:40 +02:00
parent 134f895212
commit ccabfe9b03
14 changed files with 637 additions and 0 deletions

2
README copy.md Normal file
View File

@@ -0,0 +1,2 @@
# ansible
Ansible perso playbooks and role

43
all.yml Normal file
View File

@@ -0,0 +1,43 @@
---
- name: Main playbook for common services
hosts: all
become: true
# #
# # @author Stéphane Gratias (2023).
# #
pre_tasks:
- name: >-
Show target servers -> Hostname : OS - IP
debug:
msg: "{{ ansible_hostname }} : {{ ansible_distribution }} {{ ansible_distribution_version }} - {{ ansible_default_ipv4.address }}"
tags:
- always
- test
- name: Ensure specified repository packages are into sources list
ansible.builtin.apt_repository:
repo: "{{ item }}"
state: present
loop: "{{ apt_repositories_sources }}"
tags:
- repo
roles:
- { role: grog.management-user, tags: user }
- { role: willshersystems.sshd, tags: ssh }
- { role: claranet.apt, tags: apt }
- { role: robertdebock.fail2ban, tags: ban }
- { role: ome.rsync_server, tags: rsync }
tasks:
# - name: Show ssh backup
# debug:
# msg: "{{ ssh_backup }}"
# tags:
# - always
# - test

12
ansible.cfg Normal file
View File

@@ -0,0 +1,12 @@
[defaults]
roles_path=./roles:/usr/share/ansible/roles:/etc/ansible/roles
callback_whitelist = timer, profile_tasks, log_plays
#log_path= /var/log/ansible/ansible-base.log
display_args_to_stdout= true
load_callback_plugins= yes
#to keep display output, comment stdout_callback
#stdout_callback= log_plays
; [ssh_connection]
; ssh_args=-o ForwardAgent=yes

79
docker.yml Normal file
View File

@@ -0,0 +1,79 @@
---
- name: Main playbook
hosts: all
become: true
# #
# # @author Stéphane Gratias (2021).
#
roles:
- { role: geerlingguy.pip, tags: [docker, pip] }
- { role: geerlingguy.docker, tags: [docker] }
# manage docker-compose@dev systemd unit file
- { role: tumf.systemd-service, tags: docker-compose,
when: ansible_service_mgr == 'systemd',
vars: {
# do not restart service via systemd
ansible_unit_test: true,
systemd_service_name: "docker-compose@lab",
# [Unit]
systemd_service_Unit_Description: "%i service with docker compose",
systemd_service_Service_Type: "simple",
systemd_service_Unit_After: [ "docker.service" ],
systemd_service_Unit_Requires: [ "docker.service" ],
# [Service]
systemd_service_Service_WorkingDirectory: "{{ dockerapp_tree_base_dir | last }}/{{ dockerapp_service }}",
# Remove old containers, images and volumes
systemd_service_Service_ExecStartPre: [
"{{ '/usr/local/bin' if docker_install_compose else '/usr/bin' }}/docker-compose down -v",
"{{ '/usr/local/bin' if docker_install_compose else '/usr/bin' }}/docker-compose rm -fv",
],
# Compose up
systemd_service_Service_ExecStart: "{{ '/usr/local/bin' if docker_install_compose else '/usr/bin' }}/docker-compose up",
# Compose down, remove containers and volumes
systemd_service_Service_ExecStop: "{{ '/usr/local/bin' if docker_install_compose else '/usr/bin' }}/docker-compose down -v",
systemd_service_Service_Restart: "always",
# [Install]
systemd_service_Install_WantedBy: "multi-user.target"
}
}
tasks:
- name: create docker app base dir
file:
path: "{{ item }}"
state: directory
mode: 0755
owner: root
group: root
with_items:
- "{{ dockerapp_tree_base_dir | last }}"
- "{{ dockerapp_tree_base_dir | last }}/{{ dockerapp_service }}"
- "{{ dockerapp_tree_base_dir | last }}/{{ dockerapp_service }}/logs"
tags:
- docker-compose
- bootstrap_dockerapp_create_base_dir
- name: create docker volumes tree for containers
file:
path: "{{ dockerapp_tree_base_dir | last }}/{{ dockerapp_service }}/{{ item | default('') }}"
state: directory
mode: 0755
with_items: "{{ dockerapp_tree_volumes | default([]) }}"
tags:
- docker-compose
- bootstrap_dockerapp_create_app_dir
- name: create the main docker-compose file (docker-compose.yml)
template:
src: "../templates/docker-compose.yml.j2"
dest: "{{ dockerapp_tree_base_dir | last }}/{{ dockerapp_service }}/docker-compose.yml"
mode: 0600
tags:
- docker-compose
- bootstrap_dockerapp_configure_docker_compose

241
group_vars/local.yml Normal file
View File

@@ -0,0 +1,241 @@
---
##########
# DOCKER #
##########
dockerapp_tree_base_dir:
- "/opt"
# - "/opt/entreprise/dockerapps"
#docker_install_compose: false
# boostrap dockerapp:
dockerapp_service: dockerapps
dockerapp_tree_volumes:
- "gitlab"
- "gitlab/config"
- "gitlab/data"
- "gitlab/docker"
- "gitlab/logs"
- "gitlab-runner"
- "gitlab-runner/certs"
- "gitlab-runner/docker"
- "pypi-mirror"
- "pypi-mirror/etc"
- "sonarqube"
- "sonarqube/postgresql"
- "sonarqube/sonarqube"
- "sonarqube/sonarqube/data"
- "sonarqube/sonarqube/extensions"
- "sonarqube/sonarqube/key"
- "sonarqube/sonarqube/logs"
- "traefik/certs"
# generate docker-compose.yml
dockerapp_compose:
version: '3.9'
networks:
t2_proxy:
name: t2_proxy
driver: bridge
ipam:
config:
- subnet: 192.168.90.0/24
default:
driver: bridge
socket_proxy:
name: socket_proxy
driver: bridge
ipam:
config:
- subnet: 192.168.91.0/24
x-environment: &default-tz-puid-pgid
TZ: Europe/Paris
PUID: 1000
PGID: 1000
# Proxy Network and Security
x-network-and-security: &network-and-security
networks:
- t2_proxy
security_opt:
- no-new-privileges:true
# Keys common to some of the services in basic-services.txt
x-common-keys-core: &common-keys-core
<<: *network-and-security
restart: always
# profiles:
# - basic
# Keys common to some of the dependent services/apps
x-common-keys-apps: &common-keys-apps
<<: *network-and-security
restart: unless-stopped
# profiles:
# - apps
services:
pypi-mirror:
image: "pypa/bandersnatch:latest"
restart: always
networks:
dev_net:
ipv4_address: 10.10.20.9
command: python /bandersnatch/src/runner.py 3600
logging:
driver: "json-file"
options:
max-size: "10m"
max-file: "10"
volumes:
- "{{ dockerapp_tree_base_dir | last }}/{{ dockerapp_service }}/pypi-mirror/etc/bandersnatch.conf:/conf/bandersnatch.conf"
# Traefik 2 - Reverse Proxy
# Touch (create empty files) traefik.log and acme/acme.json. Set acme.json permissions to 600.
# touch $DOCKERDIR/traefik2/acme/acme.json
# chmod 600 $DOCKERDIR/traefik2/acme/acme.json
# touch $DOCKERDIR/logs/homeserver/traefik.log # customize this
#### LETSENCRYPT CHALLENGE ######
# https://doc.traefik.io/traefik/user-guides/docker-compose/acme-http/
# Add new https services/fqdn
# uncomment acme.caserver line and remove appdata/traefik2/acme/letsencrypt/acme.json file
# Down all containers and up all (docker-compose down/up -d), wait for news cert/key on acme.json
# At this moment, cert/key are staging, you need to comment acme.caserver line and remove acme.json file then restart traefik
traefik:
<<: *common-keys-core # See EXTENSION FIELDS at the top
container_name: traefik
image: traefik:latest
command: # CLI arguments
- --global.checkNewVersion=true
- --global.sendAnonymousUsage=true
- --entryPoints.http.address=:80/tcp
- --entryPoints.https.address=:443/tcp
- --entryPoints.wireguard.address=:443/udp
- --api=true
- --api.dashboard=true
- --log=true
- --log.level=WARN # (Default: error) DEBUG, INFO, WARN, ERROR, FATAL, PANIC
- --accessLog=true
- --accessLog.filePath=/traefik.log
- --accessLog.bufferingSize=100 # Configuring a buffer of 100 lines
- --providers.docker=true
- --providers.docker.endpoint=tcp://socket-proxy:2375
- --providers.docker.exposedByDefault=false
- --providers.docker.network=t2_proxy
- --providers.docker.swarmMode=false
- --providers.file.directory=/rules # Load dynamic configuration from one or more .toml or .yml files in a directory
- --providers.file.watch=true # Only works on top level files in the rules folder
- --metrics.prometheus=true
- --metrics.prometheus.buckets=0.1,0.3,1.2,5.0
- --metrics.prometheus.addEntryPointsLabels=true
- --metrics.prometheus.addrouterslabels=true
- --metrics.prometheus.addServicesLabels=true
- --metrics.prometheus.manualrouting=true
- --certificatesresolvers.letsencrypt-resolver.acme.tlschallenge=true
#- --certificatesresolvers.letsencrypt-resolver.acme.caserver=https://acme-staging-v02.api.letsencrypt.org/directory
- --certificatesresolvers.letsencrypt-resolver.acme.email=stephane.gratiasquiquandon@gmail.com
- --certificatesresolvers.letsencrypt-resolver.acme.storage=/letsencrypt/acme.json
networks:
t2_proxy:
ipv4_address: 192.168.90.254 # You can specify a static IP
socket_proxy:
environment:
<<: *default-tz-puid-pgid
ports:
- target: 80
published: 80
protocol: tcp
mode: host
- target: 443
published: 443
protocol: tcp
mode: host
- target: 443
published: 443
protocol: udp
mode: host
volumes:
- ./appdata/traefik2/rules/homeserver:/rules # file provider directory
- ./appdata/traefik2/acme/letsencrypt:/letsencrypt
#- ./appdata/traefik2/acme/acme.json:/acme.json # cert location - you must touch this file and change permissions to 600
- ./logs/homeserver/traefik.log:/traefik.log # for fail2ban - make sure to touch file before starting container
- /etc/timezone:/etc/timezone:ro
- /etc/localtime:/etc/localtime:ro
labels:
- "traefik.enable=true"
# HTTP-to-HTTPS Redirect
- "traefik.http.routers.http-catchall.entrypoints=http"
- "traefik.http.routers.http-catchall.rule=HostRegexp(`{host:.+}`)"
- "traefik.http.routers.http-catchall.middlewares=redirect-to-https"
- "traefik.http.middlewares.redirect-to-https.redirectscheme.scheme=https"
- "traefik.http.middlewares.redirect-to-https.redirectscheme.permanent=true"
# HTTP Routers
- "traefik.http.routers.traefik-rtr.entrypoints=https"
- "traefik.http.routers.traefik-rtr.rule=Host(`traefik.jingoh.fr`)"
## Services - API
- "traefik.http.routers.traefik-rtr.service=api@internal"
- "traefik.http.routers.traefik-rtr.tls=true"
## MONITORING
- traefik.http.routers.prometheus.entrypoints=https
- traefik.http.routers.prometheus.rule=Host(`traefik.jingoh.fr`) && PathPrefix(`/metrics`)
- traefik.http.routers.prometheus.service=prometheus@internal
- traefik.http.routers.prometheus.middlewares=traefik-basic
## Middlewares
# echo $(htpasswd -nB user) | sed -e s/\\$/\\$\\$/g
## Middlewares
- "traefik.http.routers.traefik-rtr.middlewares=traefik-basic"
- "traefik.http.middlewares.traefik-basic.basicauth.users=jingohtraf:$$2y$$05$$JO8mJnOV2PARzEcVj.Grp.H.JbkWYneAIjgMt7c0.5NTyBNDkRIiW"
#- "traefik.http.middlewares.traefik-rtr-ratelimit.ratelimit.average=10"
# - "traefik.http.middlewares.traefik-rtr-ratelimit.ratelimit.burst=10"
# - "traefik.http.middlewares.traefik-rtr-ratelimit.ratelimit.period=1"
# - "traefik.http.routers.traefik-rtr-ratelimit.middlewares=traefik-rtr-ratelimit@docker"
## TLS
- "traefik.http.routers.traefik-rtr.tls.certresolver=letsencrypt-resolver"
- "traefik.http.routers.prometheus.tls.certresolver=letsencrypt-resolver"
# Docker Socket Proxy - Security Enchanced Proxy for Docker Socket
socket-proxy:
<<: *common-keys-core # See EXTENSION FIELDS at the top
container_name: socket-proxy
image: tecnativa/docker-socket-proxy:latest
networks:
socket_proxy:
ipv4_address: 192.168.91.254 # You can specify a static IP
volumes:
- "/var/run/docker.sock:/var/run/docker.sock"
environment:
- LOG_LEVEL=info # debug,info,notice,warning,err,crit,alert,emerg
## Variables match the URL prefix (i.e. AUTH blocks access to /auth/* parts of the API, etc.).
# 0 to revoke access.
# 1 to grant access.
## Granted by Default
- EVENTS=1
- PING=1
- VERSION=1
## Revoked by Default
# Security critical
- AUTH=0
- SECRETS=0
- POST=0 # Watchtower
# Not always needed
- BUILD=0
- COMMIT=0
- CONFIGS=0
- CONTAINERS=1 # Traefik, portainer, etc.
- DISTRIBUTION=0
- EXEC=0
- IMAGES=0 # Portainer
- INFO=0 # Portainer
- NETWORKS=0 # Portainer
- NODES=0
- PLUGINS=0
- SERVICES=0 # Portainer
- SESSION=0
- SWARM=0
- SYSTEM=0
- TASKS=0 # Portainer
- VOLUMES=0 # Portainer
##############
# KUBERNETES #
##############

99
group_vars/perso.yml Normal file
View File

@@ -0,0 +1,99 @@
---
########
# USER #
########
management_user_list:
- name: stephane
shell: '/bin/bash'
authorized_keys:
- key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQClVS1uxDfwS6OusQ4qgcZ6hBc8YRBE8MyXu0sUfGN7S3itjI3W2ixD18v80el8dVQVR12jCY0ueavgoV1cHrfGWkFoLKi+QrA4MuSNUChj0NBbyLTmdwPvne8LRv3ttCbRSJ/6bIEveX8y/7kGn/R1NDFlfE6b5R8ersBUKCQM6YxblAkv/XH8cJlQXhr1nLhVOl/ae+Q/pTCbgioB8qrmGEuMvOLmavcFf7IJbJcSgeiXSOnyIRl2n64X6lbRK+MRZ61pF6vAOXA+Ixyt/fAbO7sjqU0+cEhU5Br5/VcqG4Bc5nhWimtXIHPry3aLV5PtN6K9/i3eA5F6Jpa82JzmUMEbWSBIga02yIw9GjRyAI6ccH/kJGuB6QN5/YwGHpOF2f0FGiEAbUz41mLngN3SsXL1pdV2hT3x56/GIcGe6p/f1cytwVCyOaE7W87B05w5JYb1sSFj6QuGW0rHWfnHT5SY87Mk/H8VgZPaPbm+hSjLIQRAmUYQR+Rub1o9bXE= stephane"
exclusive: yes
sudo:
hosts: ALL
as: ALL
commands: ALL
nopasswd: ALL
- name: staffadmin
shell: '/bin/bash'
authorized_keys:
- key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQClVS1uxDfwS6OusQ4qgcZ6hBc8YRBE8MyXu0sUfGN7S3itjI3W2ixD18v80el8dVQVR12jCY0ueavgoV1cHrfGWkFoLKi+QrA4MuSNUChj0NBbyLTmdwPvne8LRv3ttCbRSJ/6bIEveX8y/7kGn/R1NDFlfE6b5R8ersBUKCQM6YxblAkv/XH8cJlQXhr1nLhVOl/ae+Q/pTCbgioB8qrmGEuMvOLmavcFf7IJbJcSgeiXSOnyIRl2n64X6lbRK+MRZ61pF6vAOXA+Ixyt/fAbO7sjqU0+cEhU5Br5/VcqG4Bc5nhWimtXIHPry3aLV5PtN6K9/i3eA5F6Jpa82JzmUMEbWSBIga02yIw9GjRyAI6ccH/kJGuB6QN5/YwGHpOF2f0FGiEAbUz41mLngN3SsXL1pdV2hT3x56/GIcGe6p/f1cytwVCyOaE7W87B05w5JYb1sSFj6QuGW0rHWfnHT5SY87Mk/H8VgZPaPbm+hSjLIQRAmUYQR+Rub1o9bXE= stephane"
exclusive: yes
sudo:
hosts: ALL
as: ALL
commands: ALL
nopasswd: ALL
################
# SSH - CLIENT #
################
# ssh_drop_in_name: null
# #ssh_user: root
# ssh:
# # noqa var-naming
# Compression: true
# GSSAPIAuthentication: false
# # wokeignore:rule=master
# ControlMaster: auto
# ControlPath: ~/.ssh/.cm%C
# Match:
# - Condition: "final all"
# GSSAPIAuthentication: true
# Host:
# - Condition: example
# Hostname: example.com
# User: somebody
# ssh_ForwardX11: false
#################
# SSH - SERVEUR #
#################
sshd_skip_defaults: true
sshd_config_file: /etc/ssh/sshd_config
sshd_AuthorizedKeysFile: .ssh/authorized_keys
sshd_AcceptEnv: "LANG LC_CTYPE LC_NUMERIC LC_TIME LC_COLLATE LC_MONETARY LC_MESSAGES LC_PAPER LC_NAME LC_ADDRESS LC_TELEPHONE LC_MEASUREMENT LC_IDENTIFICATION LC_ALL"
sshd_Protocol: 2
sshd_LoginGraceTime: 30
sshd_SyslogFacility: AUTH
sshd_LogLevel: VERBOSE
sshd_PermitRootLogin: 'no'
sshd_StrictModes: 'yes'
sshd_IgnoreRhosts: 'yes'
sshd_HostbasedAuthentication: 'no'
sshd_PasswordAuthentication: 'no'
sshd_PermitEmptyPasswords: 'no'
sshd_ChallengeResponseAuthentication: 'no'
sshd_GSSAPIAuthentication: 'no'
sshd_X11DisplayOffset: 10
sshd_PrintMotd: 'yes'
sshd_PrintLastLog: 'yes'
sshd_TCPKeepAlive: 'yes'
sshd_Subsystem: "sftp /usr/lib/openssh/sftp-server"
sshd_UsePAM: 'yes'
sshd_UseDNS: 'no'
sshd_KexAlgorithms: "curve25519-sha256@libssh.org,diffie-hellman-group-exchange-sha256"
sshd_Ciphers: "chacha20-poly1305@openssh.com,aes256-gcm@openssh.com,aes256-ctr"
sshd_MACs: "hmac-sha2-512-etm@openssh.com,hmac-sha2-256-etm@openssh.com,umac-128-etm@openssh.com"
sshd_HostKey:
- /etc/ssh/ssh_host_rsa_key
#######
# APT #
#######
apt_upgrade: true
apt_repositories: []
apt_ppas: []
# # nginx ppa repo
# - repo: ppa:nginx/stable
# # not needed on ubuntu distribution
# #codename: trusty
apt_packages:
- name: openssh-server

14
host_vars/ovh_fr.yml Normal file
View File

@@ -0,0 +1,14 @@
---
# apt_repositories:
# - http://nova.clouds.archive.ubuntu.com
apt_repositories_sources:
- deb http://nova.clouds.archive.ubuntu.com/ubuntu/ focal main restricted
- deb http://nova.clouds.archive.ubuntu.com/ubuntu/ focal-updates main restricted
- deb http://nova.clouds.archive.ubuntu.com/ubuntu/ focal universe
- deb http://nova.clouds.archive.ubuntu.com/ubuntu/ focal-updates universe
- deb http://nova.clouds.archive.ubuntu.com/ubuntu/ focal multiverse
- deb http://nova.clouds.archive.ubuntu.com/ubuntu/ focal-updates multiverse
- deb http://nova.clouds.archive.ubuntu.com/ubuntu/ focal-backports main restricted universe multiverse
- deb http://security.ubuntu.com/ubuntu focal-security main restricted
- deb http://security.ubuntu.com/ubuntu focal-security universe
- deb http://security.ubuntu.com/ubuntu focal-security multiverse

View File

@@ -0,0 +1,3 @@
---
apt_repositories:
- http://mirrors.online.net

16
host_vars/vagrant.yml Normal file
View File

@@ -0,0 +1,16 @@
docker_install_compose: false
kubernetes_version: 1.28
kubernetes_apiserver_advertise_address: 192.168.33.10
kubernetes_pod_network:
# Flannel CNI.
cni: 'weave'
cidr: '10.244.0.0/16'
containerd_config_systemd: true
# kubernetes_ignore_preflight_errors: null
# kubernetes_kubeadm_init_extra_opts:
# - "--pod-network-cidr=10.244.0.0/16"
# - "--control-plane-endpoint=192.168.33.10"

12
hosts Normal file
View File

@@ -0,0 +1,12 @@
[perso]
ovh_fr ansible_host=37.187.127.90 ansible_user=stephane
scaleway_fr ansible_host=163.172.84.28 ansible_user=stephane
[ovh]
ovh_fr ansible_host=37.187.127.90 ansible_user=stephane
[scaleway]
scaleway_fr ansible_host=163.172.84.28 ansible_user=stephane
[local]
vagrant ansible_host=192.168.33.10 ansible_user=vagrant ansible_password=vagrant

61
kube.yml Normal file
View File

@@ -0,0 +1,61 @@
---
- name: Main playbook for Kubernetes
hosts: all
become: true
# #
# # @author Stéphane Gratias (2023).
# #
pre_tasks:
- name: >-
Show target servers -> Hostname : OS - IP
debug:
msg: "{{ ansible_hostname }} : {{ ansible_distribution }} {{ ansible_distribution_version }} - {{ ansible_default_ipv4.address }}"
tags:
- test
# Preparer le /etc/containerd/config.toml
# version = 2
# [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options]
# SystemdCgroup = true
# when kubernetes_config_kubelet_configuration.cgroupDriver: "systemd"
tasks:
- import_role:
name: geerlingguy.docker
tags:
- kubernetes
- docker
- name: Create containerd config for Kubernetes
template:
src: "../templates/containerd.toml.j2"
dest: "/etc/containerd/config.toml"
owner: root
group: root
mode: 0644
tags:
- kubernetes
- test
when:
- containerd_config_systemd is true
- kubernetes_config_kubelet_configuration.cgroupDriver is "systemd"
notify: restart containerd
- import_role:
name: geerlingguy.kubernetes
tags:
- kubernetes
handlers:
- name: restart containerd
service:
name: containerd
state: restarted

27
requirements.yml Normal file
View File

@@ -0,0 +1,27 @@
# USER
- src: grog.management-user
- src: GROG.user
- src: GROG.authorized-key
- src: GROG.sudo
# DOCKER
- src: geerlingguy.docker
- src: geerlingguy.kubernetes
- src: geerlingguy.pip
- src: tumf.systemd-service
# SSH client side
- src: linux-system-roles.ssh
# SSH server side
- src: willshersystems.sshd
# PACKAGE
- src: claranet.apt
# IPTABLES
- src: geerlingguy.firewall
# LOG ROTATE
- src: nickhammond.logrotate
- src: ome.logrotate
# FAIL2BAN
- src: robertdebock.fail2ban
# BACKUP
- src: ome.rsync_server
- src: ome.selinux_utils

View File

@@ -0,0 +1,26 @@
# {{ ansible_managed }}
{% if containerd_config_disabled_plugins is defined %}
disabled_plugins = ["{{ containerd_config_disabled_plugins| join (',') }}"]
{% endif%}
{% if containerd_config_systemd is true %}
version = 2
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options]
SystemdCgroup = true
{% endif%}
#root = "/var/lib/containerd"
#state = "/run/containerd"
#subreaper = true
#oom_score = 0
#[grpc]
# address = "/run/containerd/containerd.sock"
# uid = 0
# gid = 0
#[debug]
# address = "/run/containerd/debug.sock"
# uid = 0
# gid = 0
# level = "info"

View File

@@ -0,0 +1,2 @@
# {{ ansible_managed }}
{{ dockerapp_compose | to_nice_yaml(indent=2) }}